engine.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. package engine
  2. import (
  3. "fmt"
  4. "github.com/huichen/murmur"
  5. "github.com/huichen/sego"
  6. "github.com/huichen/wukong/core"
  7. "github.com/huichen/wukong/storage"
  8. "github.com/huichen/wukong/types"
  9. "github.com/huichen/wukong/utils"
  10. "log"
  11. "os"
  12. "runtime"
  13. "sort"
  14. "strconv"
  15. "sync/atomic"
  16. "time"
  17. )
  18. const (
  19. NumNanosecondsInAMillisecond = 1000000
  20. PersistentStorageFilePrefix = "wukong"
  21. )
  22. type Engine struct {
  23. // 计数器,用来统计有多少文档被索引等信息
  24. numDocumentsIndexed uint64
  25. numDocumentsRemoved uint64
  26. numDocumentsForceUpdated uint64
  27. numIndexingRequests uint64
  28. numRemovingRequests uint64
  29. numForceUpdatingRequests uint64
  30. numTokenIndexAdded uint64
  31. numDocumentsStored uint64
  32. // 记录初始化参数
  33. initOptions types.EngineInitOptions
  34. initialized bool
  35. indexers []core.Indexer
  36. rankers []core.Ranker
  37. segmenter sego.Segmenter
  38. stopTokens StopTokens
  39. dbs []storage.Storage
  40. // 建立索引器使用的通信通道
  41. segmenterChannel chan segmenterRequest
  42. indexerAddDocChannels []chan indexerAddDocumentRequest
  43. indexerRemoveDocChannels []chan indexerRemoveDocRequest
  44. rankerAddDocChannels []chan rankerAddDocRequest
  45. // 建立排序器使用的通信通道
  46. indexerLookupChannels []chan indexerLookupRequest
  47. rankerRankChannels []chan rankerRankRequest
  48. rankerRemoveDocChannels []chan rankerRemoveDocRequest
  49. // 建立持久存储使用的通信通道
  50. persistentStorageIndexDocumentChannels []chan persistentStorageIndexDocumentRequest
  51. persistentStorageInitChannel chan bool
  52. }
  53. func (engine *Engine) Init(options types.EngineInitOptions) {
  54. // 将线程数设置为CPU数
  55. runtime.GOMAXPROCS(runtime.NumCPU())
  56. // 初始化初始参数
  57. if engine.initialized {
  58. log.Fatal("请勿重复初始化引擎")
  59. }
  60. options.Init()
  61. engine.initOptions = options
  62. engine.initialized = true
  63. if !options.NotUsingSegmenter {
  64. // 载入分词器词典
  65. engine.segmenter.LoadDictionary(options.SegmenterDictionaries)
  66. // 初始化停用词
  67. engine.stopTokens.Init(options.StopTokenFile)
  68. }
  69. // 初始化索引器和排序器
  70. for shard := 0; shard < options.NumShards; shard++ {
  71. engine.indexers = append(engine.indexers, core.Indexer{})
  72. engine.indexers[shard].Init(*options.IndexerInitOptions)
  73. engine.rankers = append(engine.rankers, core.Ranker{})
  74. engine.rankers[shard].Init()
  75. }
  76. // 初始化分词器通道
  77. engine.segmenterChannel = make(
  78. //chan segmenterRequest)
  79. chan segmenterRequest, options.NumSegmenterThreads)
  80. // 初始化索引器通道
  81. engine.indexerAddDocChannels = make(
  82. []chan indexerAddDocumentRequest, options.NumShards)
  83. engine.indexerRemoveDocChannels = make(
  84. []chan indexerRemoveDocRequest, options.NumShards)
  85. engine.indexerLookupChannels = make(
  86. []chan indexerLookupRequest, options.NumShards)
  87. for shard := 0; shard < options.NumShards; shard++ {
  88. engine.indexerAddDocChannels[shard] = make(
  89. //chan indexerAddDocumentRequest)
  90. chan indexerAddDocumentRequest,
  91. options.IndexerBufferLength)
  92. engine.indexerRemoveDocChannels[shard] = make(
  93. //chan indexerRemoveDocRequest)
  94. chan indexerRemoveDocRequest,
  95. options.IndexerBufferLength)
  96. engine.indexerLookupChannels[shard] = make(
  97. chan indexerLookupRequest,
  98. options.IndexerBufferLength)
  99. }
  100. // 初始化排序器通道
  101. engine.rankerAddDocChannels = make(
  102. []chan rankerAddDocRequest, options.NumShards)
  103. engine.rankerRankChannels = make(
  104. []chan rankerRankRequest, options.NumShards)
  105. engine.rankerRemoveDocChannels = make(
  106. []chan rankerRemoveDocRequest, options.NumShards)
  107. for shard := 0; shard < options.NumShards; shard++ {
  108. engine.rankerAddDocChannels[shard] = make(
  109. chan rankerAddDocRequest,
  110. options.RankerBufferLength)
  111. engine.rankerRankChannels[shard] = make(
  112. chan rankerRankRequest,
  113. options.RankerBufferLength)
  114. engine.rankerRemoveDocChannels[shard] = make(
  115. chan rankerRemoveDocRequest,
  116. options.RankerBufferLength)
  117. }
  118. // 初始化持久化存储通道
  119. if engine.initOptions.UsePersistentStorage {
  120. engine.persistentStorageIndexDocumentChannels =
  121. make([]chan persistentStorageIndexDocumentRequest,
  122. engine.initOptions.PersistentStorageShards)
  123. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  124. engine.persistentStorageIndexDocumentChannels[shard] = make(
  125. chan persistentStorageIndexDocumentRequest)
  126. }
  127. engine.persistentStorageInitChannel = make(
  128. chan bool, engine.initOptions.PersistentStorageShards)
  129. }
  130. // 启动分词器
  131. for iThread := 0; iThread < options.NumSegmenterThreads; iThread++ {
  132. go engine.segmenterWorker()
  133. }
  134. // 启动索引器和排序器
  135. for shard := 0; shard < options.NumShards; shard++ {
  136. go engine.indexerAddDocumentWorker(shard)
  137. go engine.indexerRemoveDocWorker(shard)
  138. go engine.rankerAddDocWorker(shard)
  139. go engine.rankerRemoveDocWorker(shard)
  140. for i := 0; i < options.NumIndexerThreadsPerShard; i++ {
  141. go engine.indexerLookupWorker(shard)
  142. }
  143. for i := 0; i < options.NumRankerThreadsPerShard; i++ {
  144. go engine.rankerRankWorker(shard)
  145. }
  146. }
  147. // 启动持久化存储工作协程
  148. if engine.initOptions.UsePersistentStorage {
  149. err := os.MkdirAll(engine.initOptions.PersistentStorageFolder, 0700)
  150. if err != nil {
  151. log.Fatal("无法创建目录", engine.initOptions.PersistentStorageFolder)
  152. }
  153. // 打开或者创建数据库
  154. engine.dbs = make([]storage.Storage, engine.initOptions.PersistentStorageShards)
  155. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  156. dbPath := engine.initOptions.PersistentStorageFolder + "/" + PersistentStorageFilePrefix + "." + strconv.Itoa(shard)
  157. db, err := storage.OpenStorage(dbPath)
  158. if db == nil || err != nil {
  159. log.Fatal("无法打开数据库", dbPath, ": ", err)
  160. }
  161. engine.dbs[shard] = db
  162. }
  163. // 从数据库中恢复
  164. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  165. go engine.persistentStorageInitWorker(shard)
  166. }
  167. // 等待恢复完成
  168. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  169. <-engine.persistentStorageInitChannel
  170. }
  171. for {
  172. runtime.Gosched()
  173. if engine.numIndexingRequests == engine.numDocumentsIndexed {
  174. break
  175. }
  176. }
  177. // 关闭并重新打开数据库
  178. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  179. engine.dbs[shard].Close()
  180. dbPath := engine.initOptions.PersistentStorageFolder + "/" + PersistentStorageFilePrefix + "." + strconv.Itoa(shard)
  181. db, err := storage.OpenStorage(dbPath)
  182. if db == nil || err != nil {
  183. log.Fatal("无法打开数据库", dbPath, ": ", err)
  184. }
  185. engine.dbs[shard] = db
  186. }
  187. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  188. go engine.persistentStorageIndexDocumentWorker(shard)
  189. }
  190. }
  191. atomic.AddUint64(&engine.numDocumentsStored, engine.numIndexingRequests)
  192. }
  193. // 将文档加入索引
  194. //
  195. // 输入参数:
  196. // docId 标识文档编号,必须唯一,docId == 0 表示非法文档(用于强制刷新索引),[1, +oo) 表示合法文档
  197. // data 见DocumentIndexData注释
  198. //
  199. // 注意:
  200. // 1. 这个函数是线程安全的,请尽可能并发调用以提高索引速度
  201. // 2. 这个函数调用是非同步的,也就是说在函数返回时有可能文档还没有加入索引中,因此
  202. // 如果立刻调用Search可能无法查询到这个文档。强制刷新索引请调用FlushIndex函数。
  203. func (engine *Engine) IndexDocument(docId uint64, data types.DocumentIndexData, forceUpdate bool) {
  204. engine.internalIndexDocument(docId, data, forceUpdate)
  205. hash := murmur.Murmur3([]byte(fmt.Sprint("%d", docId))) % uint32(engine.initOptions.PersistentStorageShards)
  206. if engine.initOptions.UsePersistentStorage && docId != 0 {
  207. engine.persistentStorageIndexDocumentChannels[hash] <- persistentStorageIndexDocumentRequest{docId: docId, data: data}
  208. }
  209. }
  210. func (engine *Engine) internalIndexDocument(
  211. docId uint64, data types.DocumentIndexData, forceUpdate bool) {
  212. if !engine.initialized {
  213. log.Fatal("必须先初始化引擎")
  214. }
  215. if docId != 0 {
  216. atomic.AddUint64(&engine.numIndexingRequests, 1)
  217. }
  218. if forceUpdate {
  219. atomic.AddUint64(&engine.numForceUpdatingRequests, 1)
  220. }
  221. hash := murmur.Murmur3([]byte(fmt.Sprint("%d%s", docId, data.Content)))
  222. engine.segmenterChannel <- segmenterRequest{
  223. docId: docId, hash: hash, data: data, forceUpdate: forceUpdate}
  224. }
  225. // 将文档从索引中删除
  226. //
  227. // 输入参数:
  228. // docId 标识文档编号,必须唯一,docId == 0 表示非法文档(用于强制刷新索引),[1, +oo) 表示合法文档
  229. //
  230. // 注意:
  231. // 1. 这个函数是线程安全的,请尽可能并发调用以提高索引速度
  232. // 2. 这个函数调用是非同步的,也就是说在函数返回时有可能文档还没有加入索引中,因此
  233. // 如果立刻调用Search可能无法查询到这个文档。强制刷新索引请调用FlushIndex函数。
  234. func (engine *Engine) RemoveDocument(docId uint64, forceUpdate bool) {
  235. if !engine.initialized {
  236. log.Fatal("必须先初始化引擎")
  237. }
  238. if docId != 0 {
  239. atomic.AddUint64(&engine.numRemovingRequests, 1)
  240. }
  241. if forceUpdate {
  242. atomic.AddUint64(&engine.numForceUpdatingRequests, 1)
  243. }
  244. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  245. engine.indexerRemoveDocChannels[shard] <- indexerRemoveDocRequest{docId: docId, forceUpdate: forceUpdate}
  246. if docId == 0 {
  247. continue
  248. }
  249. engine.rankerRemoveDocChannels[shard] <- rankerRemoveDocRequest{docId: docId}
  250. }
  251. if engine.initOptions.UsePersistentStorage && docId != 0 {
  252. // 从数据库中删除
  253. hash := murmur.Murmur3([]byte(fmt.Sprint("%d", docId))) % uint32(engine.initOptions.PersistentStorageShards)
  254. go engine.persistentStorageRemoveDocumentWorker(docId, hash)
  255. }
  256. }
  257. // 查找满足搜索条件的文档,此函数线程安全
  258. func (engine *Engine) Search(request types.SearchRequest) (output types.SearchResponse) {
  259. if !engine.initialized {
  260. log.Fatal("必须先初始化引擎")
  261. }
  262. var rankOptions types.RankOptions
  263. if request.RankOptions == nil {
  264. rankOptions = *engine.initOptions.DefaultRankOptions
  265. } else {
  266. rankOptions = *request.RankOptions
  267. }
  268. if rankOptions.ScoringCriteria == nil {
  269. rankOptions.ScoringCriteria = engine.initOptions.DefaultRankOptions.ScoringCriteria
  270. }
  271. // 收集关键词
  272. tokens := []string{}
  273. if request.Text != "" {
  274. querySegments := engine.segmenter.Segment([]byte(request.Text))
  275. for _, s := range querySegments {
  276. token := s.Token().Text()
  277. if !engine.stopTokens.IsStopToken(token) {
  278. tokens = append(tokens, s.Token().Text())
  279. }
  280. }
  281. } else {
  282. for _, t := range request.Tokens {
  283. tokens = append(tokens, t)
  284. }
  285. }
  286. // 建立排序器返回的通信通道
  287. rankerReturnChannel := make(
  288. chan rankerReturnRequest, engine.initOptions.NumShards)
  289. // 生成查找请求
  290. lookupRequest := indexerLookupRequest{
  291. countDocsOnly: request.CountDocsOnly,
  292. tokens: tokens,
  293. labels: request.Labels,
  294. docIds: request.DocIds,
  295. options: rankOptions,
  296. rankerReturnChannel: rankerReturnChannel,
  297. orderless: request.Orderless,
  298. }
  299. // 向索引器发送查找请求
  300. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  301. engine.indexerLookupChannels[shard] <- lookupRequest
  302. }
  303. // 从通信通道读取排序器的输出
  304. numDocs := 0
  305. rankOutput := types.ScoredDocuments{}
  306. timeout := request.Timeout
  307. isTimeout := false
  308. if timeout <= 0 {
  309. // 不设置超时
  310. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  311. rankerOutput := <-rankerReturnChannel
  312. if !request.CountDocsOnly {
  313. for _, doc := range rankerOutput.docs {
  314. rankOutput = append(rankOutput, doc)
  315. }
  316. }
  317. numDocs += rankerOutput.numDocs
  318. }
  319. } else {
  320. // 设置超时
  321. deadline := time.Now().Add(time.Nanosecond * time.Duration(NumNanosecondsInAMillisecond*request.Timeout))
  322. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  323. select {
  324. case rankerOutput := <-rankerReturnChannel:
  325. if !request.CountDocsOnly {
  326. for _, doc := range rankerOutput.docs {
  327. rankOutput = append(rankOutput, doc)
  328. }
  329. }
  330. numDocs += rankerOutput.numDocs
  331. case <-time.After(deadline.Sub(time.Now())):
  332. isTimeout = true
  333. break
  334. }
  335. }
  336. }
  337. // 再排序
  338. if !request.CountDocsOnly && !request.Orderless {
  339. if rankOptions.ReverseOrder {
  340. sort.Sort(sort.Reverse(rankOutput))
  341. } else {
  342. sort.Sort(rankOutput)
  343. }
  344. }
  345. // 准备输出
  346. output.Tokens = tokens
  347. // 仅当CountDocsOnly为false时才充填output.Docs
  348. if !request.CountDocsOnly {
  349. if request.Orderless {
  350. // 无序状态无需对Offset截断
  351. output.Docs = rankOutput
  352. } else {
  353. var start, end int
  354. if rankOptions.MaxOutputs == 0 {
  355. start = utils.MinInt(rankOptions.OutputOffset, len(rankOutput))
  356. end = len(rankOutput)
  357. } else {
  358. start = utils.MinInt(rankOptions.OutputOffset, len(rankOutput))
  359. end = utils.MinInt(start+rankOptions.MaxOutputs, len(rankOutput))
  360. }
  361. output.Docs = rankOutput[start:end]
  362. }
  363. }
  364. output.NumDocs = numDocs
  365. output.Timeout = isTimeout
  366. return
  367. }
  368. // 阻塞等待直到所有索引添加完毕
  369. func (engine *Engine) FlushIndex() {
  370. // 强制更新,CHANNEL 中 REQUESTS 的无序性可能会导致 CACHE 中有残留
  371. engine.RemoveDocument(0, true)
  372. engine.IndexDocument(0, types.DocumentIndexData{}, true)
  373. for {
  374. runtime.Gosched()
  375. if engine.numIndexingRequests == engine.numDocumentsIndexed &&
  376. engine.numRemovingRequests*uint64(engine.initOptions.NumShards) == engine.numDocumentsRemoved &&
  377. engine.numForceUpdatingRequests*uint64(engine.initOptions.NumShards) ==
  378. engine.numDocumentsForceUpdated && (!engine.initOptions.UsePersistentStorage ||
  379. engine.numIndexingRequests == engine.numDocumentsStored) {
  380. return
  381. }
  382. }
  383. }
  384. // 关闭引擎
  385. func (engine *Engine) Close() {
  386. engine.FlushIndex()
  387. if engine.initOptions.UsePersistentStorage {
  388. for _, db := range engine.dbs {
  389. db.Close()
  390. }
  391. }
  392. }
  393. // 从文本hash得到要分配到的shard
  394. func (engine *Engine) getShard(hash uint32) int {
  395. return int(hash - hash/uint32(engine.initOptions.NumShards)*uint32(engine.initOptions.NumShards))
  396. }