engine.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. package engine
  2. import (
  3. "fmt"
  4. "github.com/huichen/murmur"
  5. "github.com/huichen/sego"
  6. "github.com/huichen/wukong/core"
  7. "github.com/huichen/wukong/storage"
  8. "github.com/huichen/wukong/types"
  9. "github.com/huichen/wukong/utils"
  10. "log"
  11. "os"
  12. "runtime"
  13. "sort"
  14. "strconv"
  15. "sync/atomic"
  16. "time"
  17. )
  18. const (
  19. NumNanosecondsInAMillisecond = 1000000
  20. PersistentStorageFilePrefix = "wukong"
  21. )
  22. type Engine struct {
  23. // 计数器,用来统计有多少文档被索引等信息
  24. numDocumentsIndexed uint64
  25. numIndexingRequests uint64
  26. numTokenIndexAdded uint64
  27. numDocumentsStored uint64
  28. // 记录初始化参数
  29. initOptions types.EngineInitOptions
  30. initialized bool
  31. indexers []core.Indexer
  32. rankers []core.Ranker
  33. segmenter sego.Segmenter
  34. stopTokens StopTokens
  35. dbs []storage.Storage
  36. // 建立索引器使用的通信通道
  37. segmenterChannel chan segmenterRequest
  38. indexerAddDocumentChannels []chan indexerAddDocumentRequest
  39. rankerAddDocChannels []chan rankerAddDocRequest
  40. // 建立排序器使用的通信通道
  41. indexerLookupChannels []chan indexerLookupRequest
  42. rankerRankChannels []chan rankerRankRequest
  43. rankerRemoveDocChannels []chan rankerRemoveDocRequest
  44. // 建立持久存储使用的通信通道
  45. persistentStorageIndexDocumentChannels []chan persistentStorageIndexDocumentRequest
  46. persistentStorageInitChannel chan bool
  47. }
  48. func (engine *Engine) Init(options types.EngineInitOptions) {
  49. // 将线程数设置为CPU数
  50. runtime.GOMAXPROCS(runtime.NumCPU())
  51. // 初始化初始参数
  52. if engine.initialized {
  53. log.Fatal("请勿重复初始化引擎")
  54. }
  55. options.Init()
  56. engine.initOptions = options
  57. engine.initialized = true
  58. if !options.NotUsingSegmenter {
  59. // 载入分词器词典
  60. engine.segmenter.LoadDictionary(options.SegmenterDictionaries)
  61. // 初始化停用词
  62. engine.stopTokens.Init(options.StopTokenFile)
  63. }
  64. // 初始化索引器和排序器
  65. for shard := 0; shard < options.NumShards; shard++ {
  66. engine.indexers = append(engine.indexers, core.Indexer{})
  67. engine.indexers[shard].Init(*options.IndexerInitOptions)
  68. engine.rankers = append(engine.rankers, core.Ranker{})
  69. engine.rankers[shard].Init()
  70. }
  71. // 初始化分词器通道
  72. engine.segmenterChannel = make(
  73. chan segmenterRequest, options.NumSegmenterThreads)
  74. // 初始化索引器通道
  75. engine.indexerAddDocumentChannels = make(
  76. []chan indexerAddDocumentRequest, options.NumShards)
  77. engine.indexerLookupChannels = make(
  78. []chan indexerLookupRequest, options.NumShards)
  79. for shard := 0; shard < options.NumShards; shard++ {
  80. engine.indexerAddDocumentChannels[shard] = make(
  81. chan indexerAddDocumentRequest,
  82. options.IndexerBufferLength)
  83. engine.indexerLookupChannels[shard] = make(
  84. chan indexerLookupRequest,
  85. options.IndexerBufferLength)
  86. }
  87. // 初始化排序器通道
  88. engine.rankerAddDocChannels = make(
  89. []chan rankerAddDocRequest, options.NumShards)
  90. engine.rankerRankChannels = make(
  91. []chan rankerRankRequest, options.NumShards)
  92. engine.rankerRemoveDocChannels = make(
  93. []chan rankerRemoveDocRequest, options.NumShards)
  94. for shard := 0; shard < options.NumShards; shard++ {
  95. engine.rankerAddDocChannels[shard] = make(
  96. chan rankerAddDocRequest,
  97. options.RankerBufferLength)
  98. engine.rankerRankChannels[shard] = make(
  99. chan rankerRankRequest,
  100. options.RankerBufferLength)
  101. engine.rankerRemoveDocChannels[shard] = make(
  102. chan rankerRemoveDocRequest,
  103. options.RankerBufferLength)
  104. }
  105. // 初始化持久化存储通道
  106. if engine.initOptions.UsePersistentStorage {
  107. engine.persistentStorageIndexDocumentChannels =
  108. make([]chan persistentStorageIndexDocumentRequest,
  109. engine.initOptions.PersistentStorageShards)
  110. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  111. engine.persistentStorageIndexDocumentChannels[shard] = make(
  112. chan persistentStorageIndexDocumentRequest)
  113. }
  114. engine.persistentStorageInitChannel = make(
  115. chan bool, engine.initOptions.PersistentStorageShards)
  116. }
  117. // 启动分词器
  118. for iThread := 0; iThread < options.NumSegmenterThreads; iThread++ {
  119. go engine.segmenterWorker()
  120. }
  121. // 启动索引器和排序器
  122. for shard := 0; shard < options.NumShards; shard++ {
  123. go engine.indexerAddDocumentWorker(shard)
  124. go engine.rankerAddDocWorker(shard)
  125. go engine.rankerRemoveDocWorker(shard)
  126. for i := 0; i < options.NumIndexerThreadsPerShard; i++ {
  127. go engine.indexerLookupWorker(shard)
  128. }
  129. for i := 0; i < options.NumRankerThreadsPerShard; i++ {
  130. go engine.rankerRankWorker(shard)
  131. }
  132. }
  133. // 启动持久化存储工作协程
  134. if engine.initOptions.UsePersistentStorage {
  135. err := os.MkdirAll(engine.initOptions.PersistentStorageFolder, 0700)
  136. if err != nil {
  137. log.Fatal("无法创建目录", engine.initOptions.PersistentStorageFolder)
  138. }
  139. // 打开或者创建数据库
  140. engine.dbs = make([]storage.Storage, engine.initOptions.PersistentStorageShards)
  141. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  142. dbPath := engine.initOptions.PersistentStorageFolder + "/" + PersistentStorageFilePrefix + "." + strconv.Itoa(shard)
  143. db, err := storage.OpenStorage(dbPath)
  144. if db == nil || err != nil {
  145. log.Fatal("无法打开数据库", dbPath, ": ", err)
  146. }
  147. engine.dbs[shard] = db
  148. }
  149. // 从数据库中恢复
  150. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  151. go engine.persistentStorageInitWorker(shard)
  152. }
  153. // 等待恢复完成
  154. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  155. <-engine.persistentStorageInitChannel
  156. }
  157. for {
  158. runtime.Gosched()
  159. if engine.numIndexingRequests == engine.numDocumentsIndexed {
  160. break
  161. }
  162. }
  163. // 关闭并重新打开数据库
  164. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  165. engine.dbs[shard].Close()
  166. dbPath := engine.initOptions.PersistentStorageFolder + "/" + PersistentStorageFilePrefix + "." + strconv.Itoa(shard)
  167. db, err := storage.OpenStorage(dbPath)
  168. if db == nil || err != nil {
  169. log.Fatal("无法打开数据库", dbPath, ": ", err)
  170. }
  171. engine.dbs[shard] = db
  172. }
  173. for shard := 0; shard < engine.initOptions.PersistentStorageShards; shard++ {
  174. go engine.persistentStorageIndexDocumentWorker(shard)
  175. }
  176. }
  177. atomic.AddUint64(&engine.numDocumentsStored, engine.numIndexingRequests)
  178. }
  179. // 将文档加入索引
  180. //
  181. // 输入参数:
  182. // docId 标识文档编号,必须唯一
  183. // data 见DocumentIndexData注释
  184. //
  185. // 注意:
  186. // 1. 这个函数是线程安全的,请尽可能并发调用以提高索引速度
  187. // 2. 这个函数调用是非同步的,也就是说在函数返回时有可能文档还没有加入索引中,因此
  188. // 如果立刻调用Search可能无法查询到这个文档。强制刷新索引请调用FlushIndex函数。
  189. func (engine *Engine) IndexDocument(docId uint64, data types.DocumentIndexData) {
  190. engine.internalIndexDocument(docId, data)
  191. hash := murmur.Murmur3([]byte(fmt.Sprint("%d", docId))) % uint32(engine.initOptions.PersistentStorageShards)
  192. if engine.initOptions.UsePersistentStorage {
  193. engine.persistentStorageIndexDocumentChannels[hash] <- persistentStorageIndexDocumentRequest{docId: docId, data: data}
  194. }
  195. }
  196. func (engine *Engine) internalIndexDocument(docId uint64, data types.DocumentIndexData) {
  197. if !engine.initialized {
  198. log.Fatal("必须先初始化引擎")
  199. }
  200. atomic.AddUint64(&engine.numIndexingRequests, 1)
  201. hash := murmur.Murmur3([]byte(fmt.Sprint("%d%s", docId, data.Content)))
  202. engine.segmenterChannel <- segmenterRequest{
  203. docId: docId, hash: hash, data: data}
  204. }
  205. // 将文档从索引中删除
  206. //
  207. // 输入参数:
  208. // docId 标识文档编号,必须唯一
  209. //
  210. // 注意:这个函数仅从排序器中删除文档,索引器不会发生变化。
  211. func (engine *Engine) RemoveDocument(docId uint64) {
  212. if !engine.initialized {
  213. log.Fatal("必须先初始化引擎")
  214. }
  215. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  216. engine.rankerRemoveDocChannels[shard] <- rankerRemoveDocRequest{docId: docId}
  217. }
  218. if engine.initOptions.UsePersistentStorage {
  219. // 从数据库中删除
  220. hash := murmur.Murmur3([]byte(fmt.Sprint("%d", docId))) % uint32(engine.initOptions.PersistentStorageShards)
  221. go engine.persistentStorageRemoveDocumentWorker(docId, hash)
  222. }
  223. }
  224. // 阻塞等待直到所有索引添加完毕
  225. func (engine *Engine) FlushIndex() {
  226. for {
  227. runtime.Gosched()
  228. if engine.numIndexingRequests == engine.numDocumentsIndexed &&
  229. (!engine.initOptions.UsePersistentStorage ||
  230. engine.numIndexingRequests == engine.numDocumentsStored) {
  231. return
  232. }
  233. }
  234. }
  235. // 查找满足搜索条件的文档,此函数线程安全
  236. func (engine *Engine) Search(request types.SearchRequest) (output types.SearchResponse) {
  237. if !engine.initialized {
  238. log.Fatal("必须先初始化引擎")
  239. }
  240. var rankOptions types.RankOptions
  241. if request.RankOptions == nil {
  242. rankOptions = *engine.initOptions.DefaultRankOptions
  243. } else {
  244. rankOptions = *request.RankOptions
  245. }
  246. if rankOptions.ScoringCriteria == nil {
  247. rankOptions.ScoringCriteria = engine.initOptions.DefaultRankOptions.ScoringCriteria
  248. }
  249. // 收集关键词
  250. tokens := []string{}
  251. if request.Text != "" {
  252. querySegments := engine.segmenter.Segment([]byte(request.Text))
  253. for _, s := range querySegments {
  254. token := s.Token().Text()
  255. if !engine.stopTokens.IsStopToken(token) {
  256. tokens = append(tokens, s.Token().Text())
  257. }
  258. }
  259. } else {
  260. for _, t := range request.Tokens {
  261. tokens = append(tokens, t)
  262. }
  263. }
  264. // 建立排序器返回的通信通道
  265. rankerReturnChannel := make(
  266. chan rankerReturnRequest, engine.initOptions.NumShards)
  267. // 生成查找请求
  268. lookupRequest := indexerLookupRequest{
  269. countDocsOnly: request.CountDocsOnly,
  270. tokens: tokens,
  271. labels: request.Labels,
  272. docIds: request.DocIds,
  273. options: rankOptions,
  274. rankerReturnChannel: rankerReturnChannel,
  275. orderless: request.Orderless,
  276. }
  277. // 向索引器发送查找请求
  278. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  279. engine.indexerLookupChannels[shard] <- lookupRequest
  280. }
  281. // 从通信通道读取排序器的输出
  282. numDocs := 0
  283. rankOutput := types.ScoredDocuments{}
  284. timeout := request.Timeout
  285. isTimeout := false
  286. if timeout <= 0 {
  287. // 不设置超时
  288. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  289. rankerOutput := <-rankerReturnChannel
  290. if !request.CountDocsOnly {
  291. for _, doc := range rankerOutput.docs {
  292. rankOutput = append(rankOutput, doc)
  293. }
  294. }
  295. numDocs += rankerOutput.numDocs
  296. }
  297. } else {
  298. // 设置超时
  299. deadline := time.Now().Add(time.Nanosecond * time.Duration(NumNanosecondsInAMillisecond*request.Timeout))
  300. for shard := 0; shard < engine.initOptions.NumShards; shard++ {
  301. select {
  302. case rankerOutput := <-rankerReturnChannel:
  303. if !request.CountDocsOnly {
  304. for _, doc := range rankerOutput.docs {
  305. rankOutput = append(rankOutput, doc)
  306. }
  307. }
  308. numDocs += rankerOutput.numDocs
  309. case <-time.After(deadline.Sub(time.Now())):
  310. isTimeout = true
  311. break
  312. }
  313. }
  314. }
  315. // 再排序
  316. if !request.CountDocsOnly && !request.Orderless {
  317. if rankOptions.ReverseOrder {
  318. sort.Sort(sort.Reverse(rankOutput))
  319. } else {
  320. sort.Sort(rankOutput)
  321. }
  322. }
  323. // 准备输出
  324. output.Tokens = tokens
  325. if !request.Orderless {
  326. output.Docs = rankOutput
  327. } else if !request.CountDocsOnly {
  328. var start, end int
  329. if rankOptions.MaxOutputs == 0 {
  330. start = utils.MinInt(rankOptions.OutputOffset, len(rankOutput))
  331. end = len(rankOutput)
  332. } else {
  333. start = utils.MinInt(rankOptions.OutputOffset, len(rankOutput))
  334. end = utils.MinInt(start+rankOptions.MaxOutputs, len(rankOutput))
  335. }
  336. output.Docs = rankOutput[start:end]
  337. }
  338. output.NumDocs = numDocs
  339. output.Timeout = isTimeout
  340. return
  341. }
  342. // 关闭引擎
  343. func (engine *Engine) Close() {
  344. engine.FlushIndex()
  345. if engine.initOptions.UsePersistentStorage {
  346. for _, db := range engine.dbs {
  347. db.Close()
  348. }
  349. }
  350. }
  351. // 从文本hash得到要分配到的shard
  352. func (engine *Engine) getShard(hash uint32) int {
  353. return int(hash - hash/uint32(engine.initOptions.NumShards)*uint32(engine.initOptions.NumShards))
  354. }