engine.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. package engine
  2. import (
  3. "fmt"
  4. "github.com/huichen/murmur"
  5. "github.com/huichen/sego"
  6. "github.com/huichen/wukong/core"
  7. "github.com/huichen/wukong/storage"
  8. "github.com/huichen/wukong/types"
  9. "github.com/huichen/wukong/utils"
  10. "log"
  11. "os"
  12. "runtime"
  13. "sort"
  14. "strconv"
  15. "sync/atomic"
  16. "time"
  17. )
  18. const (
  19. NumNanosecondsInAMillisecond = 1000000
  20. PersistentStorageFilePrefix = "wukong"
  21. )
  22. type Engine struct {
  23. // 计数器,用来统计有多少文档被索引等信息
  24. numDocumentsIndexed uint64
  25. numDocumentsRemoved uint64
  26. numDocumentsForceUpdated uint64
  27. numIndexingRequests uint64
  28. numRemovingRequests uint64
  29. numForceUpdatingRequests uint64
  30. numTokenIndexAdded uint64
  31. numDocumentsStored uint64
  32. // 记录初始化参数
  33. initOptions types.EngineInitOptions
  34. initialized bool
  35. indexers []core.Indexer
  36. rankers []core.Ranker
  37. segmenter sego.Segmenter
  38. stopTokens StopTokens
  39. dbs []storage.Storage
  40. // 建立索引器使用的通信通道
  41. segmenterChannel chan segmenterRequest
  42. indexerAddDocChannels []chan indexerAddDocumentRequest
  43. indexerRemoveDocChannels []chan indexerRemoveDocRequest
  44. rankerAddDocChannels []chan rankerAddDocRequest
  45. // 建立排序器使用的通信通道
  46. indexerLookupChannels []chan indexerLookupRequest
  47. rankerRankChannels []chan rankerRankRequest
  48. rankerRemoveDocChannels []chan rankerRemoveDocRequest
  49. // 建立持久存储使用的通信通道
  50. persistentStorageIndexDocumentChannels []chan persistentStorageIndexDocumentRequest
  51. persistentStorageInitChannel chan bool
  52. }
  53. func (engine *Engine) Init(options types.EngineInitOptions) {
  54. // 将线程数设置为CPU数
  55. runtime.GOMAXPROCS(runtime.NumCPU())
  56. // 初始化初始参数
  57. if engine.initialized {
  58. log.Fatal("请勿重复初始化引擎")
  59. }
  60. options.Init()
  61. engine.initOptions = options
  62. engine.initialized = true
  63. if !options.NotUsingSegmenter {
  64. // 载入分词器词典
  65. engine.segmenter.LoadDictionary(options.SegmenterDictionaries)
  66. // 初始化停用词
  67. engine.stopTokens.Init(options.StopTokenFile)
  68. }
  69. // 初始化索引器和排序器
  70. for shard := 0; shard < options.NumShards; shard++ {
  71. engine.indexers = append(engine.indexers, core.Indexer{})
  72. engine.indexers[shard].Init(*options.IndexerInitOptions)
  73. engine.rankers = append(engine.rankers, core.Ranker{})
  74. engine.rankers[shard].Init()
  75. }
  76. // 初始化分词器通道
  77. engine.segmenterChannel = make(
  78. //chan segmenterRequest)
  79. chan segmenterRequest, options.NumSegmenterThreads)
  80. // 初始化索引器通道
  81. engine.indexerAddDocChannels = make(
  82. []chan indexerAddDocumentRequest, options.NumShards)
  83. engine.indexerRemoveDocChannels = make(
  84. []chan indexerRemoveDocRequest, options.NumShards)
  85. engine.indexerLookupChannels = make(
  86. []chan indexerLookupRequest, options.NumShards)
  87. for shard := 0; shard < options.NumShards; shard++ {
  88. engine.indexerAddDocChannels[shard] = make(
  89. //chan indexerAddDocumentRequest)
  90. chan indexerAddDocumentRequest,
  91. options.IndexerBufferLength)
  92. engine.indexerRemoveDocChannels[shard] = make(
  93. //chan indexerRemoveDocRequest)
  94. chan indexerRemoveDocRequest,
  95. options.IndexerBufferLength)
  96. engine.indexerLookupChannels[shard] = make(
  97. chan indexerLookupRequest,
  98. options.IndexerBufferLength)
  99. }
  100. // 初始化排序器通道
  101. engine.rankerAddDocChannels = make(
  102. []chan rankerAddDocRequest, options.NumShards)
  103. engine.rankerRankChannels = make(
  104. []chan rankerRankRequest, options.NumShards)
  105. engine.rankerRemoveDocChannels = make(
  106. []chan rankerRemoveDocRequest, options.NumShards)
  107. for shard := 0; shard < options.NumShards; shard++ {
  108. engine.rankerAddDocChannels[shard] = make(
  109. chan rankerAddDocRequest,
  110. options.RankerBufferLength)
  111. engine.rankerRankChannels[shard] = make(
  112. chan rankerRankRequest,
  113. options.RankerBufferLength)
  114. engine.rankerRemoveDocChannels[shard] = make(
  115. chan rankerRemoveDocRequest,
  116. options.RankerBufferLength)
  117. }
  118. // 初始化持久化存储通道
  119. if engine.initOptions.UsePersistentStorage {
  120. engine.persistentStorageIndexDocumentChannels =
  121. make([]chan persistentStorageIndexDocumentRequest,
  122. engine.initOptions.PersistentStorageShards)
  123. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  124. engine.persistentStorageIndexDocumentChannels[shard] = make(
  125. chan persistentStorageIndexDocumentRequest)
  126. }
  127. engine.persistentStorageInitChannel = make(
  128. chan bool, engine.initOptions.PersistentStorageShards)
  129. }
  130. // 启动分词器
  131. for iThread := 0; iThread < options.NumSegmenterThreads; iThread++ {
  132. go engine.segmenterWorker()
  133. }
  134. // 启动索引器和排序器
  135. for shard := 0; shard < options.NumShards; shard++ {
  136. go engine.indexerAddDocumentWorker(shard)
  137. go engine.indexerRemoveDocWorker(shard)
  138. go engine.rankerAddDocWorker(shard)
  139. go engine.rankerRemoveDocWorker(shard)
  140. for i := 0; i < options.NumIndexerThreadsPerShard; i++ {
  141. go engine.indexerLookupWorker(shard)
  142. }
  143. for i := 0; i < options.NumRankerThreadsPerShard; i++ {
  144. go engine.rankerRankWorker(shard)
  145. }
  146. }
  147. // 启动持久化存储工作协程
  148. if engine.initOptions.UsePersistentStorage {
  149. err := os.MkdirAll(engine.initOptions.PersistentStorageFolder, 0700)
  150. if err != nil {
  151. log.Fatal("无法创建目录", engine.initOptions.PersistentStorageFolder)
  152. }
  153. // 打开或者创建数据库
  154. engine.dbs = make([]storage.Storage, engine.initOptions.PersistentStorageShards)
  155. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  156. dbPath := engine.initOptions.PersistentStorageFolder + "/" + PersistentStorageFilePrefix + "." + strconv.Itoa(shard)
  157. db, err := storage.OpenStorage(dbPath)
  158. if db == nil || err != nil {
  159. log.Fatal("无法打开数据库", dbPath, ": ", err)
  160. }
  161. engine.dbs[shard] = db
  162. }
  163. // 从数据库中恢复
  164. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  165. go engine.persistentStorageInitWorker(shard)
  166. }
  167. // 等待恢复完成
  168. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  169. <-engine.persistentStorageInitChannel
  170. }
  171. for {
  172. runtime.Gosched()
  173. if engine.numIndexingRequests == engine.numDocumentsIndexed {
  174. break
  175. }
  176. }
  177. // 关闭并重新打开数据库
  178. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  179. engine.dbs[shard].Close()
  180. dbPath := engine.initOptions.PersistentStorageFolder + "/" + PersistentStorageFilePrefix + "." + strconv.Itoa(shard)
  181. db, err := storage.OpenStorage(dbPath)
  182. if db == nil || err != nil {
  183. log.Fatal("无法打开数据库", dbPath, ": ", err)
  184. }
  185. engine.dbs[shard] = db
  186. }
  187. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  188. go engine.persistentStorageIndexDocumentWorker(shard)
  189. }
  190. }
  191. atomic.AddUint64(&engine.numDocumentsStored, engine.numIndexingRequests)
  192. }
  193. // 将文档加入索引
  194. //
  195. // 输入参数:
  196. // docId 标识文档编号,必须唯一,docId == 0 表示非法文档(用于强制刷新索引),[1, +oo) 表示合法文档
  197. // data 见DocumentIndexData注释
  198. // forceUpdate 是否强制刷新 cache,如果设为 true,则尽快添加到索引,否则等待 cache 满之后一次全量添加
  199. //
  200. // 注意:
  201. // 1. 这个函数是线程安全的,请尽可能并发调用以提高索引速度
  202. // 2. 这个函数调用是非同步的,也就是说在函数返回时有可能文档还没有加入索引中,因此
  203. // 如果立刻调用Search可能无法查询到这个文档。强制刷新索引请调用FlushIndex函数。
  204. func (engine *Engine) IndexDocument(docId uint64, data types.DocumentIndexData, forceUpdate bool) {
  205. engine.internalIndexDocument(docId, data, forceUpdate)
  206. hash := murmur.Murmur3([]byte(fmt.Sprint("%d", docId))) % uint32(engine.initOptions.PersistentStorageShards)
  207. if engine.initOptions.UsePersistentStorage && docId != 0 {
  208. engine.persistentStorageIndexDocumentChannels[hash] <- persistentStorageIndexDocumentRequest{docId: docId, data: data}
  209. }
  210. }
  211. func (engine *Engine) internalIndexDocument(
  212. docId uint64, data types.DocumentIndexData, forceUpdate bool) {
  213. if !engine.initialized {
  214. log.Fatal("必须先初始化引擎")
  215. }
  216. if docId != 0 {
  217. atomic.AddUint64(&engine.numIndexingRequests, 1)
  218. }
  219. if forceUpdate {
  220. atomic.AddUint64(&engine.numForceUpdatingRequests, 1)
  221. }
  222. hash := murmur.Murmur3([]byte(fmt.Sprint("%d%s", docId, data.Content)))
  223. engine.segmenterChannel <- segmenterRequest{
  224. docId: docId, hash: hash, data: data, forceUpdate: forceUpdate}
  225. }
  226. // 将文档从索引中删除
  227. //
  228. // 输入参数:
  229. // docId 标识文档编号,必须唯一,docId == 0 表示非法文档(用于强制刷新索引),[1, +oo) 表示合法文档
  230. // forceUpdate 是否强制刷新 cache,如果设为 true,则尽快删除索引,否则等待 cache 满之后一次全量删除
  231. //
  232. // 注意:
  233. // 1. 这个函数是线程安全的,请尽可能并发调用以提高索引速度
  234. // 2. 这个函数调用是非同步的,也就是说在函数返回时有可能文档还没有加入索引中,因此
  235. // 如果立刻调用Search可能无法查询到这个文档。强制刷新索引请调用FlushIndex函数。
  236. func (engine *Engine) RemoveDocument(docId uint64, forceUpdate bool) {
  237. if !engine.initialized {
  238. log.Fatal("必须先初始化引擎")
  239. }
  240. if docId != 0 {
  241. atomic.AddUint64(&engine.numRemovingRequests, 1)
  242. }
  243. if forceUpdate {
  244. atomic.AddUint64(&engine.numForceUpdatingRequests, 1)
  245. }
  246. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  247. engine.indexerRemoveDocChannels[shard] <- indexerRemoveDocRequest{docId: docId, forceUpdate: forceUpdate}
  248. if docId == 0 {
  249. continue
  250. }
  251. engine.rankerRemoveDocChannels[shard] <- rankerRemoveDocRequest{docId: docId}
  252. }
  253. if engine.initOptions.UsePersistentStorage && docId != 0 {
  254. // 从数据库中删除
  255. hash := murmur.Murmur3([]byte(fmt.Sprint("%d", docId))) % uint32(engine.initOptions.PersistentStorageShards)
  256. go engine.persistentStorageRemoveDocumentWorker(docId, hash)
  257. }
  258. }
  259. // 查找满足搜索条件的文档,此函数线程安全
  260. func (engine *Engine) Search(request types.SearchRequest) (output types.SearchResponse) {
  261. if !engine.initialized {
  262. log.Fatal("必须先初始化引擎")
  263. }
  264. var rankOptions types.RankOptions
  265. if request.RankOptions == nil {
  266. rankOptions = *engine.initOptions.DefaultRankOptions
  267. } else {
  268. rankOptions = *request.RankOptions
  269. }
  270. if rankOptions.ScoringCriteria == nil {
  271. rankOptions.ScoringCriteria = engine.initOptions.DefaultRankOptions.ScoringCriteria
  272. }
  273. // 收集关键词
  274. tokens := []string{}
  275. if request.Text != "" {
  276. querySegments := engine.segmenter.Segment([]byte(request.Text))
  277. for _, s := range querySegments {
  278. token := s.Token().Text()
  279. if !engine.stopTokens.IsStopToken(token) {
  280. tokens = append(tokens, s.Token().Text())
  281. }
  282. }
  283. } else {
  284. for _, t := range request.Tokens {
  285. tokens = append(tokens, t)
  286. }
  287. }
  288. // 建立排序器返回的通信通道
  289. rankerReturnChannel := make(
  290. chan rankerReturnRequest, engine.initOptions.NumShards)
  291. // 生成查找请求
  292. lookupRequest := indexerLookupRequest{
  293. countDocsOnly: request.CountDocsOnly,
  294. tokens: tokens,
  295. labels: request.Labels,
  296. docIds: request.DocIds,
  297. options: rankOptions,
  298. rankerReturnChannel: rankerReturnChannel,
  299. orderless: request.Orderless,
  300. }
  301. // 向索引器发送查找请求
  302. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  303. engine.indexerLookupChannels[shard] <- lookupRequest
  304. }
  305. // 从通信通道读取排序器的输出
  306. numDocs := 0
  307. rankOutput := types.ScoredDocuments{}
  308. timeout := request.Timeout
  309. isTimeout := false
  310. if timeout <= 0 {
  311. // 不设置超时
  312. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  313. rankerOutput := <-rankerReturnChannel
  314. if !request.CountDocsOnly {
  315. for _, doc := range rankerOutput.docs {
  316. rankOutput = append(rankOutput, doc)
  317. }
  318. }
  319. numDocs += rankerOutput.numDocs
  320. }
  321. } else {
  322. // 设置超时
  323. deadline := time.Now().Add(time.Nanosecond * time.Duration(NumNanosecondsInAMillisecond*request.Timeout))
  324. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  325. select {
  326. case rankerOutput := <-rankerReturnChannel:
  327. if !request.CountDocsOnly {
  328. for _, doc := range rankerOutput.docs {
  329. rankOutput = append(rankOutput, doc)
  330. }
  331. }
  332. numDocs += rankerOutput.numDocs
  333. case <-time.After(deadline.Sub(time.Now())):
  334. isTimeout = true
  335. break
  336. }
  337. }
  338. }
  339. // 再排序
  340. if !request.CountDocsOnly && !request.Orderless {
  341. if rankOptions.ReverseOrder {
  342. sort.Sort(sort.Reverse(rankOutput))
  343. } else {
  344. sort.Sort(rankOutput)
  345. }
  346. }
  347. // 准备输出
  348. output.Tokens = tokens
  349. // 仅当CountDocsOnly为false时才充填output.Docs
  350. if !request.CountDocsOnly {
  351. if request.Orderless {
  352. // 无序状态无需对Offset截断
  353. output.Docs = rankOutput
  354. } else {
  355. var start, end int
  356. if rankOptions.MaxOutputs == 0 {
  357. start = utils.MinInt(rankOptions.OutputOffset, len(rankOutput))
  358. end = len(rankOutput)
  359. } else {
  360. start = utils.MinInt(rankOptions.OutputOffset, len(rankOutput))
  361. end = utils.MinInt(start+rankOptions.MaxOutputs, len(rankOutput))
  362. }
  363. output.Docs = rankOutput[start:end]
  364. }
  365. }
  366. output.NumDocs = numDocs
  367. output.Timeout = isTimeout
  368. return
  369. }
  370. // 阻塞等待直到所有索引添加完毕
  371. func (engine *Engine) FlushIndex() {
  372. // 强制更新,CHANNEL 中 REQUESTS 的无序性可能会导致 CACHE 中有残留
  373. engine.RemoveDocument(0, true)
  374. engine.IndexDocument(0, types.DocumentIndexData{}, true)
  375. for {
  376. runtime.Gosched()
  377. if engine.numIndexingRequests == engine.numDocumentsIndexed &&
  378. engine.numRemovingRequests*uint64(engine.initOptions.NumShards) == engine.numDocumentsRemoved &&
  379. engine.numForceUpdatingRequests*uint64(engine.initOptions.NumShards) ==
  380. engine.numDocumentsForceUpdated && (!engine.initOptions.UsePersistentStorage ||
  381. engine.numIndexingRequests == engine.numDocumentsStored) {
  382. return
  383. }
  384. }
  385. }
  386. // 关闭引擎
  387. func (engine *Engine) Close() {
  388. engine.FlushIndex()
  389. if engine.initOptions.UsePersistentStorage {
  390. for _, db := range engine.dbs {
  391. db.Close()
  392. }
  393. }
  394. }
  395. // 从文本hash得到要分配到的shard
  396. func (engine *Engine) getShard(hash uint32) int {
  397. return int(hash - hash/uint32(engine.initOptions.NumShards)*uint32(engine.initOptions.NumShards))
  398. }