1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
{-|
Module : Gargantext.Database.Flow
Description : Database Flow
Copyright : (c) CNRS, 2017-Present
License : AGPL + CECILL v3
Maintainer : team@gargantext.org
Stability : experimental
Portability : POSIX
-- TODO-ACCESS:
-- check userId CanFillUserCorpus userCorpusId
-- check masterUserId CanFillMasterCorpus masterCorpusId
-- TODO-ACCESS: check uId CanInsertDoc pId && checkDocType nodeType
-- TODO-EVENTS: InsertedNodes
-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
{-# LANGUAGE ConstrainedClassMethods #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE InstanceSigs #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeOperators #-}
module Gargantext.Database.Action.Flow -- (flowDatabase, ngrams2list)
( DataText(..)
, getDataText
, getDataText_Debug
, flowDataText
, flow
, flowCorpusFile
, flowCorpus
, flowCorpusUser
, flowAnnuaire
, insertMasterDocs
, saveDocNgramsWith
, addDocumentsToHyperCorpus
, reIndexWith
, docNgrams
, getOrMkRoot
, getOrMk_RootWithCorpus
, TermType(..)
, DataOrigin(..)
, allDataOrigins
, do_api
, indexAllDocumentsWithPosTag
)
where
import Conduit
import Control.Lens hiding (elements, Indexed)
import Control.Monad (void)
import Data.Aeson.TH (deriveJSON)
import Data.Conduit.Internal (zipSources)
import Data.Either
import Data.Foldable (for_)
import Data.HashMap.Strict (HashMap)
import Data.Hashable (Hashable)
import Data.List (concat)
import Data.Map.Strict (Map, lookup)
import Data.Maybe (catMaybes)
import Data.Monoid
import Data.Proxy
import Data.Set (Set)
import Data.Swagger
import Data.Tuple.Extra (first, second)
import GHC.Generics (Generic)
import GHC.Num (fromInteger)
import Gargantext.API.Ngrams.Tools (getTermsWith)
import Gargantext.Core (Lang(..), PosTagAlgo(..), NLPServerConfig)
import Gargantext.Core (withDefaultLanguage)
import Gargantext.Core.Ext.IMTUser (readFile_Annuaire)
import Gargantext.Core.Flow.Types
import Gargantext.Core.NLP (nlpServerGet)
import Gargantext.Core.NodeStory (HasNodeStory)
import Gargantext.Core.Text
import Gargantext.Core.Text.Corpus.Parsers (parseFile, FileFormat, FileType, splitOn)
import Gargantext.Core.Text.List (buildNgramsLists)
import Gargantext.Core.Text.List.Group.WithStem ({-StopSize(..),-} GroupParams(..))
import Gargantext.Core.Text.List.Social (FlowSocialListWith(..))
import Gargantext.Core.Text.Terms
import Gargantext.Core.Text.Terms.Mono.Stem.En (stemIt)
import Gargantext.Core.Text.Terms.WithList (MatchedText, buildPatternsWith, termsInText)
import Gargantext.Core.Types (POS(NP), TermsCount)
import Gargantext.Core.Types.Individu (User(..))
import Gargantext.Core.Types.Main
import Gargantext.Core.Types.Query (Limit)
import Gargantext.Core.Utils (addTuples)
import Gargantext.Core.Utils.Prefix (unPrefix, unPrefixSwagger)
import Gargantext.Database.Action.Flow.List
import Gargantext.Database.Action.Flow.Types
import Gargantext.Database.Action.Flow.Utils (insertDocNgrams, DocumentIdWithNgrams(..))
import Gargantext.Database.Action.Metrics (updateNgramsOccurrences, updateContextScore)
import Gargantext.Database.Action.Search (searchDocInDatabase)
import Gargantext.Database.Admin.Config (userMaster, corpusMasterName)
import Gargantext.Database.Admin.Types.Hyperdata
import Gargantext.Database.Admin.Types.Node hiding (DEBUG) -- (HyperdataDocument(..), NodeType(..), NodeId, UserId, ListId, CorpusId, RootId, MasterCorpusId, MasterUserId)
import Gargantext.Database.Prelude
import Gargantext.Database.Query.Table.ContextNodeNgrams2
import Gargantext.Database.Query.Table.Ngrams
import Gargantext.Database.Query.Table.Node
import Gargantext.Database.Query.Table.Node.Document.Insert -- (insertDocuments, ReturnId(..), addUniqIdsDoc, addUniqIdsContact, ToDbData(..))
import Gargantext.Database.Query.Table.Node.Error (HasNodeError(..))
import Gargantext.Database.Query.Table.NodeContext (selectDocNodes)
import Gargantext.Database.Query.Table.NodeNgrams (listInsertDb , getCgramsId)
import Gargantext.Database.Query.Tree.Root (getOrMkRoot, getOrMk_RootWithCorpus)
import Gargantext.Database.Schema.Context
import Gargantext.Database.Schema.Node (NodePoly(..), node_id)
import Gargantext.Database.Schema.Node (node_hyperdata)
import Gargantext.Database.Types
import Gargantext.Prelude
import Gargantext.Prelude.Crypto.Hash (Hash)
import Gargantext.System.Logging
import Gargantext.Utils.Jobs (JobHandle, MonadJobStatus(..))
import System.FilePath (FilePath)
import qualified Data.Conduit as C
import qualified Data.Conduit.List as CL
import qualified Data.Conduit.List as CList
import qualified Data.HashMap.Strict as HashMap
import qualified Data.List as List
import qualified Data.Map.Strict as Map
import qualified Data.Set as Set
import qualified Data.Text as T
import qualified Gargantext.API.Ngrams.Types as NT
import qualified Gargantext.Core.Text.Corpus.API as API
import qualified Gargantext.Data.HashMap.Strict.Utils as HashMap
import qualified Gargantext.Database.Query.Table.Node.Document.Add as Doc (add)
import qualified PUBMED.Types as PUBMED
------------------------------------------------------------------------
-- Imports for upgrade function
import Gargantext.Database.Query.Tree.Root (getRootId)
import Gargantext.Database.Query.Tree (findNodesId)
------------------------------------------------------------------------
-- TODO use internal with API name (could be old data)
data DataOrigin = InternalOrigin { _do_api :: API.ExternalAPIs }
| ExternalOrigin { _do_api :: API.ExternalAPIs }
-- TODO Web
deriving (Generic, Eq)
makeLenses ''DataOrigin
deriveJSON (unPrefix "_do_") ''DataOrigin
instance ToSchema DataOrigin where
declareNamedSchema = genericDeclareNamedSchema (unPrefixSwagger "_do_")
allDataOrigins :: [DataOrigin]
allDataOrigins = map InternalOrigin API.externalAPIs <> map ExternalOrigin API.externalAPIs
---------------
data DataText = DataOld ![NodeId]
| DataNew !(Maybe Integer, ConduitT () HyperdataDocument IO ())
--- | DataNew ![[HyperdataDocument]]
-- Show instance is not possible because of IO
printDataText :: DataText -> IO ()
printDataText (DataOld xs) = putStrLn $ show xs
printDataText (DataNew (maybeInt, conduitData)) = do
res <- C.runConduit (conduitData .| CL.consume)
putStrLn $ show (maybeInt, res)
-- TODO use the split parameter in config file
getDataText :: FlowCmdM env err m
=> DataOrigin
-> TermType Lang
-> API.RawQuery
-> Maybe PUBMED.APIKey
-> Maybe API.Limit
-> m (Either API.GetCorpusError DataText)
getDataText (ExternalOrigin api) la q mPubmedAPIKey li = do
eRes <- liftBase $ API.get api (_tt_lang la) q mPubmedAPIKey li
pure $ DataNew <$> eRes
getDataText (InternalOrigin _) _la q _ _li = do
(_masterUserId, _masterRootId, cId) <- getOrMk_RootWithCorpus
(UserName userMaster)
(Left "")
(Nothing :: Maybe HyperdataCorpus)
ids <- map fst <$> searchDocInDatabase cId (stemIt $ API.getRawQuery q)
pure $ Right $ DataOld ids
getDataText_Debug :: FlowCmdM env err m
=> DataOrigin
-> TermType Lang
-> API.RawQuery
-> Maybe API.Limit
-> m ()
getDataText_Debug a l q li = do
result <- getDataText a l q Nothing li
case result of
Left err -> liftBase $ putStrLn $ show err
Right res -> liftBase $ printDataText res
-------------------------------------------------------------------------------
flowDataText :: forall env err m.
( FlowCmdM env err m
, MonadJobStatus m
)
=> User
-> DataText
-> TermType Lang
-> CorpusId
-> Maybe FlowSocialListWith
-> JobHandle m
-> m CorpusId
flowDataText u (DataOld ids) tt cid mfslw _ = do
$(logLocM) DEBUG $ T.pack $ "Found " <> show (length ids) <> " old node IDs"
(_userId, userCorpusId, listId) <- createNodes u (Right [cid]) corpusType
_ <- Doc.add userCorpusId ids
flowCorpusUser (_tt_lang tt) u userCorpusId listId corpusType mfslw
where
corpusType = (Nothing :: Maybe HyperdataCorpus)
flowDataText u (DataNew (mLen, txtC)) tt cid mfslw jobHandle = do
$(logLocM) DEBUG $ T.pack $ "Found " <> show mLen <> " new documents to process"
for_ (mLen <&> fromInteger) (`addMoreSteps` jobHandle)
flowCorpus u (Right [cid]) tt mfslw (mLen, (transPipe liftBase txtC)) jobHandle
------------------------------------------------------------------------
-- TODO use proxy
flowAnnuaire :: (FlowCmdM env err m, MonadJobStatus m)
=> User
-> Either CorpusName [CorpusId]
-> (TermType Lang)
-> FilePath
-> JobHandle m
-> m AnnuaireId
flowAnnuaire u n l filePath jobHandle = do
-- TODO Conduit for file
docs <- liftBase $ ((readFile_Annuaire filePath) :: IO [HyperdataContact])
flow (Nothing :: Maybe HyperdataAnnuaire) u n l Nothing (Just $ fromIntegral $ length docs, yieldMany docs) jobHandle
------------------------------------------------------------------------
flowCorpusFile :: (FlowCmdM env err m, MonadJobStatus m)
=> User
-> Either CorpusName [CorpusId]
-> Limit -- Limit the number of docs (for dev purpose)
-> TermType Lang
-> FileType
-> FileFormat
-> FilePath
-> Maybe FlowSocialListWith
-> JobHandle m
-> m CorpusId
flowCorpusFile u n _l la ft ff fp mfslw jobHandle = do
eParsed <- liftBase $ parseFile ft ff fp
case eParsed of
Right parsed -> do
flowCorpus u n la mfslw (Just $ fromIntegral $ length parsed, yieldMany parsed .| mapC toHyperdataDocument) jobHandle
--let docs = splitEvery 500 $ take l parsed
--flowCorpus u n la mfslw (yieldMany $ map (map toHyperdataDocument) docs) logStatus
Left e -> panic $ "Error: " <> T.pack e
------------------------------------------------------------------------
-- | TODO improve the needed type to create/update a corpus
-- (For now, Either is enough)
flowCorpus :: (FlowCmdM env err m, FlowCorpus a, MonadJobStatus m)
=> User
-> Either CorpusName [CorpusId]
-> TermType Lang
-> Maybe FlowSocialListWith
-> (Maybe Integer, ConduitT () a m ())
-> JobHandle m
-> m CorpusId
flowCorpus = flow (Nothing :: Maybe HyperdataCorpus)
flow :: forall env err m a c.
( FlowCmdM env err m
, FlowCorpus a
, MkCorpus c
, MonadJobStatus m
)
=> Maybe c
-> User
-> Either CorpusName [CorpusId]
-> TermType Lang
-> Maybe FlowSocialListWith
-> (Maybe Integer, ConduitT () a m ())
-> JobHandle m
-> m CorpusId
flow c u cn la mfslw (mLength, docsC) jobHandle = do
(_userId, userCorpusId, listId) <- createNodes u cn c
-- TODO if public insertMasterDocs else insertUserDocs
nlpServer <- view $ nlpServerGet (_tt_lang la)
runConduit $ zipSources (yieldMany ([1..] :: [Int])) docsC
.| CList.chunksOf 100
.| mapM_C (addDocumentsWithProgress nlpServer userCorpusId)
.| sinkNull
$(logLocM) DEBUG "Calling flowCorpusUser"
flowCorpusUser (la ^. tt_lang) u userCorpusId listId c mfslw
where
addDocumentsWithProgress :: NLPServerConfig -> CorpusId -> [(Int, a)] -> m ()
addDocumentsWithProgress nlpServer userCorpusId docsChunk = do
$(logLocM) DEBUG $ T.pack $ "calling insertDoc, ([idx], mLength) = " <> show (fst <$> docsChunk, mLength)
docs <- addDocumentsToHyperCorpus nlpServer c la userCorpusId (map snd docsChunk)
markProgress (length docs) jobHandle
-- | Given a list of corpus documents and a 'NodeId' identifying the 'CorpusId', adds
-- the given documents to the corpus. Returns the Ids of the inserted documents.
addDocumentsToHyperCorpus :: ( DbCmd' env err m
, HasNodeError err
, FlowCorpus document
, MkCorpus corpus
)
=> NLPServerConfig
-> Maybe corpus
-> TermType Lang
-> CorpusId
-> [document]
-> m [DocId]
addDocumentsToHyperCorpus ncs mb_hyper la corpusId docs = do
ids <- insertMasterDocs ncs mb_hyper la docs
void $ Doc.add corpusId ids
pure ids
------------------------------------------------------------------------
createNodes :: ( DbCmd' env err m, HasNodeError err
, MkCorpus c
)
=> User
-> Either CorpusName [CorpusId]
-> Maybe c
-> m (UserId, CorpusId, ListId)
createNodes user corpusName ctype = do
-- User Flow
(userId, _rootId, userCorpusId) <- getOrMk_RootWithCorpus user corpusName ctype
-- NodeTexts is first
_tId <- insertDefaultNodeIfNotExists NodeTexts userCorpusId userId
-- printDebug "NodeTexts: " tId
-- NodeList is second
listId <- getOrMkList userCorpusId userId
-- User Graph Flow
_ <- insertDefaultNodeIfNotExists NodeGraph userCorpusId userId
-- _ <- insertDefaultNodeIfNotExists NodeDashboard userCorpusId userId
pure (userId, userCorpusId, listId)
flowCorpusUser :: ( FlowCmdM env err m
, MkCorpus c
)
=> Lang
-> User
-> CorpusId
-> ListId
-> Maybe c
-> Maybe FlowSocialListWith
-> m CorpusId
flowCorpusUser l user userCorpusId listId ctype mfslw = do
server <- view (nlpServerGet l)
-- User List Flow
(masterUserId, _masterRootId, masterCorpusId)
<- getOrMk_RootWithCorpus (UserName userMaster) (Left "") ctype
--let gp = (GroupParams l 2 3 (StopSize 3))
-- Here the PosTagAlgo should be chosen according to the Lang
_ <- case mfslw of
(Just (NoList _)) -> do
-- printDebug "Do not build list" mfslw
pure ()
_ -> do
ngs <- buildNgramsLists user userCorpusId masterCorpusId mfslw
$ GroupWithPosTag l server HashMap.empty
-- printDebug "flowCorpusUser:ngs" ngs
_userListId <- flowList_DbRepo listId ngs
_mastListId <- getOrMkList masterCorpusId masterUserId
pure ()
-- _ <- insertOccsUpdates userCorpusId mastListId
--_ <- mkPhylo userCorpusId userId
-- Annuaire Flow
-- _ <- mkAnnuaire rootUserId userId
_ <- reIndexWith userCorpusId listId NgramsTerms (Set.singleton MapTerm)
_ <- updateContextScore userCorpusId (Just listId)
_ <- updateNgramsOccurrences userCorpusId (Just listId)
pure userCorpusId
insertMasterDocs :: ( DbCmd' env err m
, HasNodeError err
, FlowCorpus a
, MkCorpus c
)
=> NLPServerConfig
-> Maybe c
-> TermType Lang
-> [a]
-> m [DocId]
insertMasterDocs ncs c lang hs = do
(masterUserId, _, masterCorpusId) <- getOrMk_RootWithCorpus (UserName userMaster) (Left corpusMasterName) c
(ids', documentsWithId) <- insertDocs masterUserId masterCorpusId (map (toNode masterUserId Nothing) hs )
_ <- Doc.add masterCorpusId ids'
-- TODO
-- create a corpus with database name (CSV or PubMed)
-- add documents to the corpus (create node_node link)
-- this will enable global database monitoring
-- maps :: IO Map Ngrams (Map NgramsType (Map NodeId Int))
mapNgramsDocs' :: HashMap ExtractedNgrams (Map NgramsType (Map NodeId (Int, TermsCount)))
<- mapNodeIdNgrams
<$> documentIdWithNgrams
(extractNgramsT ncs $ withLang lang documentsWithId)
documentsWithId
lId <- getOrMkList masterCorpusId masterUserId
-- _ <- saveDocNgramsWith lId mapNgramsDocs'
_ <- saveDocNgramsWith lId mapNgramsDocs'
-- _cooc <- insertDefaultNode NodeListCooc lId masterUserId
pure ids'
saveDocNgramsWith :: (DbCmd' env err m)
=> ListId
-> HashMap ExtractedNgrams (Map NgramsType (Map NodeId (Int, TermsCount)))
-> m ()
saveDocNgramsWith lId mapNgramsDocs' = do
--printDebug "[saveDocNgramsWith] mapNgramsDocs'" mapNgramsDocs'
let mapNgramsDocsNoCount = over (traverse . traverse . traverse) fst mapNgramsDocs'
terms2id <- insertExtractedNgrams $ HashMap.keys mapNgramsDocsNoCount
let mapNgramsDocs = HashMap.mapKeys extracted2ngrams mapNgramsDocs'
-- new
mapCgramsId <- listInsertDb lId toNodeNgramsW'
$ map (first _ngramsTerms . second Map.keys)
$ HashMap.toList mapNgramsDocs
--printDebug "saveDocNgramsWith" mapCgramsId
-- insertDocNgrams
let ngrams2insert = catMaybes [ ContextNodeNgrams2 <$> Just nId
<*> (getCgramsId mapCgramsId ngrams_type (_ngramsTerms terms''))
<*> Just (fromIntegral w :: Double)
| (terms'', mapNgramsTypes) <- HashMap.toList mapNgramsDocs
, (ngrams_type, mapNodeIdWeight) <- Map.toList mapNgramsTypes
, (nId, (w, _cnt)) <- Map.toList mapNodeIdWeight
]
-- printDebug "Ngrams2Insert" ngrams2insert
_return <- insertContextNodeNgrams2 ngrams2insert
-- to be removed
_ <- insertDocNgrams lId $ HashMap.mapKeys (indexNgrams terms2id) mapNgramsDocs
pure ()
------------------------------------------------------------------------
-- TODO Type NodeDocumentUnicised
insertDocs :: ( DbCmd' env err m
-- , FlowCorpus a
, FlowInsertDB a
, HasNodeError err
)
=> UserId
-> CorpusId
-> [a]
-> m ([ContextId], [Indexed ContextId a])
insertDocs uId cId hs = do
let docs = map addUniqId hs
newIds <- insertDb uId Nothing docs
-- printDebug "newIds" newIds
let
newIds' = map reId newIds
documentsWithId = mergeData (toInserted newIds) (Map.fromList $ map viewUniqId' docs)
_ <- Doc.add cId newIds'
pure (newIds', documentsWithId)
------------------------------------------------------------------------
viewUniqId' :: UniqId a
=> a
-> (Hash, a)
viewUniqId' d = maybe err (\h -> (h,d)) (view uniqId d)
where
err = panic "[ERROR] Database.Flow.toInsert"
toInserted :: [ReturnId]
-> Map Hash ReturnId
toInserted =
Map.fromList . map (\r -> (reUniqId r, r) )
. filter (\r -> reInserted r == True)
mergeData :: Map Hash ReturnId
-> Map Hash a
-> [Indexed NodeId a]
mergeData rs = catMaybes . map toDocumentWithId . Map.toList
where
toDocumentWithId (sha,hpd) =
Indexed <$> fmap reId (lookup sha rs)
<*> Just hpd
------------------------------------------------------------------------
------------------------------------------------------------------------
------------------------------------------------------------------------
documentIdWithNgrams :: HasNodeError err
=> (a
-> DBCmd err (HashMap b (Map NgramsType Int, TermsCount)))
-> [Indexed NodeId a]
-> DBCmd err [DocumentIdWithNgrams a b]
documentIdWithNgrams f = traverse toDocumentIdWithNgrams
where
toDocumentIdWithNgrams d = do
e <- f $ _unIndex d
pure $ DocumentIdWithNgrams d e
-- | TODO check optimization
mapNodeIdNgrams :: (Ord b, Hashable b)
=> [DocumentIdWithNgrams a b]
-> HashMap b
(Map NgramsType
(Map NodeId (Int, TermsCount))
)
mapNodeIdNgrams = HashMap.unionsWith (Map.unionWith (Map.unionWith addTuples)) . fmap f
where
-- | NOTE We are somehow multiplying 'TermsCount' here: If the
-- same ngrams term has different ngrams types, the 'TermsCount'
-- for it (which is the number of times the terms appears in a
-- document) is copied over to all its types.
f :: DocumentIdWithNgrams a b
-> HashMap b (Map NgramsType (Map NodeId (Int, TermsCount)))
f d = fmap (\(ngramsTypeMap, cnt) -> fmap (\i -> Map.singleton nId (i, cnt)) ngramsTypeMap) $ documentNgrams d
where
nId = _index $ documentWithId d
------------------------------------------------------------------------
instance ExtractNgramsT HyperdataContact
where
extractNgramsT _ncs l hc = HashMap.mapKeys (cleanExtractedNgrams 255) <$> extract l hc
where
extract :: TermType Lang -> HyperdataContact
-> DBCmd err (HashMap ExtractedNgrams (Map NgramsType Int, TermsCount))
extract _l hc' = do
let authors = map text2ngrams
$ maybe ["Nothing"] (\a -> [a])
$ view (hc_who . _Just . cw_lastName) hc'
pure $ HashMap.fromList $ [(SimpleNgrams a', (Map.singleton Authors 1, 1)) | a' <- authors ]
instance ExtractNgramsT HyperdataDocument
where
extractNgramsT :: NLPServerConfig
-> TermType Lang
-> HyperdataDocument
-> DBCmd err (HashMap ExtractedNgrams (Map NgramsType Int, TermsCount))
extractNgramsT ncs lang hd = HashMap.mapKeys (cleanExtractedNgrams 255) <$> extractNgramsT' hd
where
extractNgramsT' :: HyperdataDocument
-> DBCmd err (HashMap ExtractedNgrams (Map NgramsType Int, TermsCount))
extractNgramsT' doc = do
let source = text2ngrams
$ maybe "Nothing" identity
$ _hd_source doc
institutes = map text2ngrams
$ maybe ["Nothing"] (splitOn Institutes (doc^. hd_bdd))
$ _hd_institutes doc
authors = map text2ngrams
$ maybe ["Nothing"] (splitOn Authors (doc^. hd_bdd))
$ _hd_authors doc
termsWithCounts' <- map (\(t, cnt) -> (enrichedTerms (lang ^. tt_lang) CoreNLP NP t, cnt))
<$> concat
<$> liftBase (extractTerms ncs lang $ hasText doc)
pure $ HashMap.fromList
$ [(SimpleNgrams source, (Map.singleton Sources 1, 1)) ]
<> [(SimpleNgrams i', (Map.singleton Institutes 1, 1)) | i' <- institutes ]
<> [(SimpleNgrams a', (Map.singleton Authors 1, 1)) | a' <- authors ]
<> [(EnrichedNgrams t', (Map.singleton NgramsTerms 1, cnt')) | (t', cnt') <- termsWithCounts' ]
instance (ExtractNgramsT a, HasText a) => ExtractNgramsT (Node a)
where
extractNgramsT ncs l (Node { _node_hyperdata = h }) = extractNgramsT ncs l h
instance HasText a => HasText (Node a)
where
hasText (Node { _node_hyperdata = h }) = hasText h
-- | TODO putelsewhere
-- | Upgrade function
-- Suppose all documents are English (this is the case actually)
indexAllDocumentsWithPosTag :: FlowCmdM env err m
=> m ()
indexAllDocumentsWithPosTag = do
rootId <- getRootId (UserName userMaster)
corpusIds <- findNodesId rootId [NodeCorpus]
docs <- List.concat <$> mapM getDocumentsWithParentId corpusIds
_ <- mapM extractInsert (splitEvery 1000 docs)
pure ()
extractInsert :: FlowCmdM env err m
=> [Node HyperdataDocument] -> m ()
extractInsert docs = do
let documentsWithId = map (\doc -> Indexed (doc ^. node_id) doc) docs
let lang = EN
ncs <- view $ nlpServerGet lang
mapNgramsDocs' <- mapNodeIdNgrams
<$> documentIdWithNgrams
(extractNgramsT ncs $ withLang (Multi lang) documentsWithId)
documentsWithId
_ <- insertExtractedNgrams $ HashMap.keys mapNgramsDocs'
pure ()
-- | Re-index documents of a corpus with ngrams in the list
reIndexWith :: ( HasNodeStory env err m
, FlowCmdM env err m
)
=> CorpusId
-> ListId
-> NgramsType
-> Set ListType
-> m ()
reIndexWith cId lId nt lts = do
-- printDebug "(cId,lId,nt,lts)" (cId, lId, nt, lts)
corpus_node <- getNodeWith cId (Proxy @ HyperdataCorpus)
let corpusLang = withDefaultLanguage $ view (node_hyperdata . to _hc_lang) corpus_node
-- Getting [NgramsTerm]
ts <- List.concat
<$> map (\(k,vs) -> k:vs)
<$> HashMap.toList
<$> getTermsWith identity [lId] nt lts
-- Get all documents of the corpus
docs <- selectDocNodes cId
let
-- fromListWith (<>)
ngramsByDoc = map (HashMap.fromListWith (Map.unionWith (Map.unionWith (\(_a,b) (_a',b') -> (1,b+b')))))
$ map (map (\((k, cnt), v) -> (SimpleNgrams (text2ngrams k), over (traverse . traverse) (\p -> (p, cnt)) v)))
$ map (docNgrams corpusLang nt ts) docs
-- Saving the indexation in database
_ <- mapM (saveDocNgramsWith lId) ngramsByDoc
pure ()
docNgrams :: Lang
-> NgramsType
-> [NT.NgramsTerm]
-> Gargantext.Database.Admin.Types.Node.Context HyperdataDocument
-> [((MatchedText, TermsCount),
Map NgramsType (Map NodeId Int))]
docNgrams lang nt ts doc =
List.zip
(termsInText lang (buildPatternsWith lang ts)
$ T.unlines $ catMaybes
[ doc ^. context_hyperdata . hd_title
, doc ^. context_hyperdata . hd_abstract
]
)
(List.cycle [Map.fromList $ [(nt, Map.singleton (doc ^. context_id) 1 )]])