engine.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. package engine
  2. import (
  3. "fmt"
  4. "github.com/huichen/murmur"
  5. "github.com/huichen/sego"
  6. "github.com/huichen/wukong/core"
  7. "github.com/huichen/wukong/storage"
  8. "github.com/huichen/wukong/types"
  9. "github.com/huichen/wukong/utils"
  10. "log"
  11. "os"
  12. "runtime"
  13. "sort"
  14. "strconv"
  15. "sync/atomic"
  16. "time"
  17. )
  18. const (
  19. NumNanosecondsInAMillisecond = 1000000
  20. PersistentStorageFilePrefix = "wukong"
  21. )
  22. type Engine struct {
  23. // 计数器,用来统计有多少文档被索引等信息
  24. numDocumentsIndexed uint64
  25. numDocumentsRemoved uint64
  26. numDocumentsForceUpdated uint64
  27. numIndexingRequests uint64
  28. numRemovingRequests uint64
  29. numForceUpdatingRequests uint64
  30. numTokenIndexAdded uint64
  31. numDocumentsStored uint64
  32. // 记录初始化参数
  33. initOptions types.EngineInitOptions
  34. initialized bool
  35. indexers []core.Indexer
  36. rankers []core.Ranker
  37. segmenter sego.Segmenter
  38. stopTokens StopTokens
  39. dbs []storage.Storage
  40. // 建立索引器使用的通信通道
  41. segmenterChannel chan segmenterRequest
  42. indexerAddDocChannels []chan indexerAddDocumentRequest
  43. indexerRemoveDocChannels []chan indexerRemoveDocRequest
  44. rankerAddDocChannels []chan rankerAddDocRequest
  45. // 建立排序器使用的通信通道
  46. indexerLookupChannels []chan indexerLookupRequest
  47. rankerRankChannels []chan rankerRankRequest
  48. rankerRemoveDocChannels []chan rankerRemoveDocRequest
  49. // 建立持久存储使用的通信通道
  50. persistentStorageIndexDocumentChannels []chan persistentStorageIndexDocumentRequest
  51. persistentStorageInitChannel chan bool
  52. }
  53. func (engine *Engine) Init(options types.EngineInitOptions) {
  54. // 将线程数设置为CPU数
  55. runtime.GOMAXPROCS(runtime.NumCPU())
  56. // 初始化初始参数
  57. if engine.initialized {
  58. log.Fatal("请勿重复初始化引擎")
  59. }
  60. options.Init()
  61. engine.initOptions = options
  62. engine.initialized = true
  63. if !options.NotUsingSegmenter {
  64. // 载入分词器词典
  65. engine.segmenter.LoadDictionary(options.SegmenterDictionaries)
  66. // 初始化停用词
  67. engine.stopTokens.Init(options.StopTokenFile)
  68. }
  69. // 初始化索引器和排序器
  70. for shard := 0; shard < options.NumShards; shard++ {
  71. engine.indexers = append(engine.indexers, core.Indexer{})
  72. engine.indexers[shard].Init(*options.IndexerInitOptions)
  73. engine.rankers = append(engine.rankers, core.Ranker{})
  74. engine.rankers[shard].Init()
  75. }
  76. // 初始化分词器通道
  77. engine.segmenterChannel = make(
  78. chan segmenterRequest, options.NumSegmenterThreads)
  79. // 初始化索引器通道
  80. engine.indexerAddDocChannels = make(
  81. []chan indexerAddDocumentRequest, options.NumShards)
  82. engine.indexerRemoveDocChannels = make(
  83. []chan indexerRemoveDocRequest, options.NumShards)
  84. engine.indexerLookupChannels = make(
  85. []chan indexerLookupRequest, options.NumShards)
  86. for shard := 0; shard < options.NumShards; shard++ {
  87. engine.indexerAddDocChannels[shard] = make(
  88. chan indexerAddDocumentRequest,
  89. options.IndexerBufferLength)
  90. engine.indexerRemoveDocChannels[shard] = make(
  91. chan indexerRemoveDocRequest,
  92. options.IndexerBufferLength)
  93. engine.indexerLookupChannels[shard] = make(
  94. chan indexerLookupRequest,
  95. options.IndexerBufferLength)
  96. }
  97. // 初始化排序器通道
  98. engine.rankerAddDocChannels = make(
  99. []chan rankerAddDocRequest, options.NumShards)
  100. engine.rankerRankChannels = make(
  101. []chan rankerRankRequest, options.NumShards)
  102. engine.rankerRemoveDocChannels = make(
  103. []chan rankerRemoveDocRequest, options.NumShards)
  104. for shard := 0; shard < options.NumShards; shard++ {
  105. engine.rankerAddDocChannels[shard] = make(
  106. chan rankerAddDocRequest,
  107. options.RankerBufferLength)
  108. engine.rankerRankChannels[shard] = make(
  109. chan rankerRankRequest,
  110. options.RankerBufferLength)
  111. engine.rankerRemoveDocChannels[shard] = make(
  112. chan rankerRemoveDocRequest,
  113. options.RankerBufferLength)
  114. }
  115. // 初始化持久化存储通道
  116. if engine.initOptions.UsePersistentStorage {
  117. engine.persistentStorageIndexDocumentChannels =
  118. make([]chan persistentStorageIndexDocumentRequest,
  119. engine.initOptions.PersistentStorageShards)
  120. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  121. engine.persistentStorageIndexDocumentChannels[shard] = make(
  122. chan persistentStorageIndexDocumentRequest)
  123. }
  124. engine.persistentStorageInitChannel = make(
  125. chan bool, engine.initOptions.PersistentStorageShards)
  126. }
  127. // 启动分词器
  128. for iThread := 0; iThread < options.NumSegmenterThreads; iThread++ {
  129. go engine.segmenterWorker()
  130. }
  131. // 启动索引器和排序器
  132. for shard := 0; shard < options.NumShards; shard++ {
  133. go engine.indexerAddDocumentWorker(shard)
  134. go engine.indexerRemoveDocWorker(shard)
  135. go engine.rankerAddDocWorker(shard)
  136. go engine.rankerRemoveDocWorker(shard)
  137. for i := 0; i < options.NumIndexerThreadsPerShard; i++ {
  138. go engine.indexerLookupWorker(shard)
  139. }
  140. for i := 0; i < options.NumRankerThreadsPerShard; i++ {
  141. go engine.rankerRankWorker(shard)
  142. }
  143. }
  144. // 启动持久化存储工作协程
  145. if engine.initOptions.UsePersistentStorage {
  146. err := os.MkdirAll(engine.initOptions.PersistentStorageFolder, 0700)
  147. if err != nil {
  148. log.Fatal("无法创建目录", engine.initOptions.PersistentStorageFolder)
  149. }
  150. // 打开或者创建数据库
  151. engine.dbs = make([]storage.Storage, engine.initOptions.PersistentStorageShards)
  152. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  153. dbPath := engine.initOptions.PersistentStorageFolder + "/" + PersistentStorageFilePrefix + "." + strconv.Itoa(shard)
  154. db, err := storage.OpenStorage(dbPath)
  155. if db == nil || err != nil {
  156. log.Fatal("无法打开数据库", dbPath, ": ", err)
  157. }
  158. engine.dbs[shard] = db
  159. }
  160. // 从数据库中恢复
  161. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  162. go engine.persistentStorageInitWorker(shard)
  163. }
  164. // 等待恢复完成
  165. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  166. <-engine.persistentStorageInitChannel
  167. }
  168. for {
  169. runtime.Gosched()
  170. if engine.numIndexingRequests == engine.numDocumentsIndexed {
  171. break
  172. }
  173. }
  174. // 关闭并重新打开数据库
  175. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  176. engine.dbs[shard].Close()
  177. dbPath := engine.initOptions.PersistentStorageFolder + "/" + PersistentStorageFilePrefix + "." + strconv.Itoa(shard)
  178. db, err := storage.OpenStorage(dbPath)
  179. if db == nil || err != nil {
  180. log.Fatal("无法打开数据库", dbPath, ": ", err)
  181. }
  182. engine.dbs[shard] = db
  183. }
  184. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  185. go engine.persistentStorageIndexDocumentWorker(shard)
  186. }
  187. }
  188. atomic.AddUint64(&engine.numDocumentsStored, engine.numIndexingRequests)
  189. }
  190. // 将文档加入索引
  191. //
  192. // 输入参数:
  193. // docId 标识文档编号,必须唯一,docId == 0 表示非法文档(用于强制刷新索引),[1, +oo) 表示合法文档
  194. // data 见DocumentIndexData注释
  195. // forceUpdate 是否强制刷新 cache,如果设为 true,则尽快添加到索引,否则等待 cache 满之后一次全量添加
  196. //
  197. // 注意:
  198. // 1. 这个函数是线程安全的,请尽可能并发调用以提高索引速度
  199. // 2. 这个函数调用是非同步的,也就是说在函数返回时有可能文档还没有加入索引中,因此
  200. // 如果立刻调用Search可能无法查询到这个文档。强制刷新索引请调用FlushIndex函数。
  201. func (engine *Engine) IndexDocument(docId uint64, data types.DocumentIndexData, forceUpdate bool) {
  202. engine.internalIndexDocument(docId, data, forceUpdate)
  203. hash := murmur.Murmur3([]byte(fmt.Sprint("%d", docId))) % uint32(engine.initOptions.PersistentStorageShards)
  204. if engine.initOptions.UsePersistentStorage && docId != 0 {
  205. engine.persistentStorageIndexDocumentChannels[hash] <- persistentStorageIndexDocumentRequest{docId: docId, data: data}
  206. }
  207. }
  208. func (engine *Engine) internalIndexDocument(
  209. docId uint64, data types.DocumentIndexData, forceUpdate bool) {
  210. if !engine.initialized {
  211. log.Fatal("必须先初始化引擎")
  212. }
  213. if docId != 0 {
  214. atomic.AddUint64(&engine.numIndexingRequests, 1)
  215. }
  216. if forceUpdate {
  217. atomic.AddUint64(&engine.numForceUpdatingRequests, 1)
  218. }
  219. hash := murmur.Murmur3([]byte(fmt.Sprint("%d%s", docId, data.Content)))
  220. engine.segmenterChannel <- segmenterRequest{
  221. docId: docId, hash: hash, data: data, forceUpdate: forceUpdate}
  222. }
  223. // 将文档从索引中删除
  224. //
  225. // 输入参数:
  226. // docId 标识文档编号,必须唯一,docId == 0 表示非法文档(用于强制刷新索引),[1, +oo) 表示合法文档
  227. // forceUpdate 是否强制刷新 cache,如果设为 true,则尽快删除索引,否则等待 cache 满之后一次全量删除
  228. //
  229. // 注意:
  230. // 1. 这个函数是线程安全的,请尽可能并发调用以提高索引速度
  231. // 2. 这个函数调用是非同步的,也就是说在函数返回时有可能文档还没有加入索引中,因此
  232. // 如果立刻调用Search可能无法查询到这个文档。强制刷新索引请调用FlushIndex函数。
  233. func (engine *Engine) RemoveDocument(docId uint64, forceUpdate bool) {
  234. if !engine.initialized {
  235. log.Fatal("必须先初始化引擎")
  236. }
  237. if docId != 0 {
  238. atomic.AddUint64(&engine.numRemovingRequests, 1)
  239. }
  240. if forceUpdate {
  241. atomic.AddUint64(&engine.numForceUpdatingRequests, 1)
  242. }
  243. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  244. engine.indexerRemoveDocChannels[shard] <- indexerRemoveDocRequest{docId: docId, forceUpdate: forceUpdate}
  245. if docId == 0 {
  246. continue
  247. }
  248. engine.rankerRemoveDocChannels[shard] <- rankerRemoveDocRequest{docId: docId}
  249. }
  250. if engine.initOptions.UsePersistentStorage && docId != 0 {
  251. // 从数据库中删除
  252. hash := murmur.Murmur3([]byte(fmt.Sprint("%d", docId))) % uint32(engine.initOptions.PersistentStorageShards)
  253. go engine.persistentStorageRemoveDocumentWorker(docId, hash)
  254. }
  255. }
  256. // 查找满足搜索条件的文档,此函数线程安全
  257. func (engine *Engine) Search(request types.SearchRequest) (output types.SearchResponse) {
  258. if !engine.initialized {
  259. log.Fatal("必须先初始化引擎")
  260. }
  261. var rankOptions types.RankOptions
  262. if request.RankOptions == nil {
  263. rankOptions = *engine.initOptions.DefaultRankOptions
  264. } else {
  265. rankOptions = *request.RankOptions
  266. }
  267. if rankOptions.ScoringCriteria == nil {
  268. rankOptions.ScoringCriteria = engine.initOptions.DefaultRankOptions.ScoringCriteria
  269. }
  270. // 收集关键词
  271. tokens := []string{}
  272. if request.Text != "" {
  273. querySegments := engine.segmenter.Segment([]byte(request.Text))
  274. for _, s := range querySegments {
  275. token := s.Token().Text()
  276. if !engine.stopTokens.IsStopToken(token) {
  277. tokens = append(tokens, s.Token().Text())
  278. }
  279. }
  280. } else {
  281. for _, t := range request.Tokens {
  282. tokens = append(tokens, t)
  283. }
  284. }
  285. // 建立排序器返回的通信通道
  286. rankerReturnChannel := make(
  287. chan rankerReturnRequest, engine.initOptions.NumShards)
  288. // 生成查找请求
  289. lookupRequest := indexerLookupRequest{
  290. countDocsOnly: request.CountDocsOnly,
  291. tokens: tokens,
  292. labels: request.Labels,
  293. docIds: request.DocIds,
  294. options: rankOptions,
  295. rankerReturnChannel: rankerReturnChannel,
  296. orderless: request.Orderless,
  297. }
  298. // 向索引器发送查找请求
  299. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  300. engine.indexerLookupChannels[shard] <- lookupRequest
  301. }
  302. // 从通信通道读取排序器的输出
  303. numDocs := 0
  304. rankOutput := types.ScoredDocuments{}
  305. timeout := request.Timeout
  306. isTimeout := false
  307. if timeout <= 0 {
  308. // 不设置超时
  309. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  310. rankerOutput := <-rankerReturnChannel
  311. if !request.CountDocsOnly {
  312. for _, doc := range rankerOutput.docs {
  313. rankOutput = append(rankOutput, doc)
  314. }
  315. }
  316. numDocs += rankerOutput.numDocs
  317. }
  318. } else {
  319. // 设置超时
  320. deadline := time.Now().Add(time.Nanosecond * time.Duration(NumNanosecondsInAMillisecond*request.Timeout))
  321. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  322. select {
  323. case rankerOutput := <-rankerReturnChannel:
  324. if !request.CountDocsOnly {
  325. for _, doc := range rankerOutput.docs {
  326. rankOutput = append(rankOutput, doc)
  327. }
  328. }
  329. numDocs += rankerOutput.numDocs
  330. case <-time.After(deadline.Sub(time.Now())):
  331. isTimeout = true
  332. break
  333. }
  334. }
  335. }
  336. // 再排序
  337. if !request.CountDocsOnly && !request.Orderless {
  338. if rankOptions.ReverseOrder {
  339. sort.Sort(sort.Reverse(rankOutput))
  340. } else {
  341. sort.Sort(rankOutput)
  342. }
  343. }
  344. // 准备输出
  345. output.Tokens = tokens
  346. // 仅当CountDocsOnly为false时才充填output.Docs
  347. if !request.CountDocsOnly {
  348. if request.Orderless {
  349. // 无序状态无需对Offset截断
  350. output.Docs = rankOutput
  351. } else {
  352. var start, end int
  353. if rankOptions.MaxOutputs == 0 {
  354. start = utils.MinInt(rankOptions.OutputOffset, len(rankOutput))
  355. end = len(rankOutput)
  356. } else {
  357. start = utils.MinInt(rankOptions.OutputOffset, len(rankOutput))
  358. end = utils.MinInt(start+rankOptions.MaxOutputs, len(rankOutput))
  359. }
  360. output.Docs = rankOutput[start:end]
  361. }
  362. }
  363. output.NumDocs = numDocs
  364. output.Timeout = isTimeout
  365. return
  366. }
  367. // 阻塞等待直到所有索引添加完毕
  368. func (engine *Engine) FlushIndex() {
  369. // 强制更新,CHANNEL 中 REQUESTS 的无序性可能会导致 CACHE 中有残留
  370. engine.RemoveDocument(0, true)
  371. engine.IndexDocument(0, types.DocumentIndexData{}, true)
  372. for {
  373. runtime.Gosched()
  374. if engine.numIndexingRequests == engine.numDocumentsIndexed &&
  375. engine.numRemovingRequests*uint64(engine.initOptions.NumShards) == engine.numDocumentsRemoved &&
  376. engine.numForceUpdatingRequests*uint64(engine.initOptions.NumShards) ==
  377. engine.numDocumentsForceUpdated && (!engine.initOptions.UsePersistentStorage ||
  378. engine.numIndexingRequests == engine.numDocumentsStored) {
  379. return
  380. }
  381. }
  382. }
  383. // 关闭引擎
  384. func (engine *Engine) Close() {
  385. engine.FlushIndex()
  386. if engine.initOptions.UsePersistentStorage {
  387. for _, db := range engine.dbs {
  388. db.Close()
  389. }
  390. }
  391. }
  392. // 从文本hash得到要分配到的shard
  393. func (engine *Engine) getShard(hash uint32) int {
  394. return int(hash - hash/uint32(engine.initOptions.NumShards)*uint32(engine.initOptions.NumShards))
  395. }