[refactor] too many 'return' were changed to 'pure' too fast

This fixes commit 5d5300cd which
automatically changed all 'return' to 'pure' in our code, resulting in
broken SQL etc.
parent febb6db8
Pipeline #4634 failed with stages
in 51 minutes and 44 seconds
...@@ -56,7 +56,7 @@ instance Arbitrary ShareNodeParams where ...@@ -56,7 +56,7 @@ instance Arbitrary ShareNodeParams where
------------------------------------------------------------------------ ------------------------------------------------------------------------
-- TODO permission -- TODO permission
-- TODO refactor userId which is used twice -- TODO refactor userId which is used twice
-- TODO change pure type for better warning/info/success/error handling on the front -- TODO change return type for better warning/info/success/error handling on the front
api :: (HasNodeError err, HasNLPServer env, CmdRandom env err m) api :: (HasNodeError err, HasNLPServer env, CmdRandom env err m)
=> User => User
-> NodeId -> NodeId
......
...@@ -16,7 +16,7 @@ def fast_maximal_cliques(g): ...@@ -16,7 +16,7 @@ def fast_maximal_cliques(g):
def rec_maximal_cliques(g, subv): def rec_maximal_cliques(g, subv):
mc = [] mc = []
if subv == []: # stop condition if subv == []: # stop condition
pure [[]] return [[]]
else : else :
for i in range(len(subv)): for i in range(len(subv)):
newsubv = [j for j in subv[i+1:len(subv)] newsubv = [j for j in subv[i+1:len(subv)]
...@@ -25,7 +25,7 @@ def fast_maximal_cliques(g): ...@@ -25,7 +25,7 @@ def fast_maximal_cliques(g):
for x in mci: for x in mci:
x.append(subv[i]) x.append(subv[i])
mc.append(x) mc.append(x)
pure mc return mc
def purge(clust): def purge(clust):
clustset = [set(x) for x in clust] clustset = [set(x) for x in clust]
...@@ -37,13 +37,13 @@ def fast_maximal_cliques(g): ...@@ -37,13 +37,13 @@ def fast_maximal_cliques(g):
ok = False ok = False
if ok and (not (clustset[i] in new_clust)): if ok and (not (clustset[i] in new_clust)):
new_clust.append(clustset[i]) new_clust.append(clustset[i])
pure [list(x) for x in new_clust] return [list(x) for x in new_clust]
# to optimize : rank the vertices on the degrees # to optimize : rank the vertices on the degrees
subv = [(v.index, v.degree()) for v in g.vs()] subv = [(v.index, v.degree()) for v in g.vs()]
subv.sort(key = lambda z:z[1]) subv.sort(key = lambda z:z[1])
subv = [x for (x, y) in subv] subv = [x for (x, y) in subv]
pure purge(rec_maximal_cliques(g, subv)) return purge(rec_maximal_cliques(g, subv))
-} -}
......
...@@ -62,7 +62,7 @@ randomString num = do ...@@ -62,7 +62,7 @@ randomString num = do
pure $ pack str pure $ pack str
-- | Given a list of items of type 'a', pure list with unique items -- | Given a list of items of type 'a', return list with unique items
-- (like List.nub) but tuple-d with their counts in the original list -- (like List.nub) but tuple-d with their counts in the original list
groupWithCounts :: (Ord a, Eq a) => [a] -> [(a, Int)] groupWithCounts :: (Ord a, Eq a) => [a] -> [(a, Int)]
groupWithCounts = map f groupWithCounts = map f
......
...@@ -229,7 +229,7 @@ instance DefaultFromField SqlJsonb HyperdataGraph ...@@ -229,7 +229,7 @@ instance DefaultFromField SqlJsonb HyperdataGraph
defaultFromField = fromPGSFromField defaultFromField = fromPGSFromField
----------------------------------------------------------- -----------------------------------------------------------
-- This type is used to pure graph via API -- This type is used to return graph via API
-- hyperdataGraphAPI field is not a Maybe anymore – graph is always computed -- hyperdataGraphAPI field is not a Maybe anymore – graph is always computed
data HyperdataGraphAPI = data HyperdataGraphAPI =
HyperdataGraphAPI { _hyperdataAPIGraph :: Graph HyperdataGraphAPI { _hyperdataAPIGraph :: Graph
......
...@@ -616,7 +616,7 @@ separateBranches fdt similarity lambda frequency minBranch thr rise timescale do ...@@ -616,7 +616,7 @@ separateBranches fdt similarity lambda frequency minBranch thr rise timescale do
((map (\e -> (e,True)) (fst branches')) ++ (map (\e -> (e,False)) (snd branches')))) ((map (\e -> (e,True)) (fst branches')) ++ (map (\e -> (e,False)) (snd branches'))))
else [currentBranch]) else [currentBranch])
in in
-- 6) if there is no more branch to separate tne pure [done'] else continue with [rest] -- 6) if there is no more branch to separate tne return [done'] else continue with [rest]
if null rest if null rest
then done' then done'
else separateBranches fdt similarity lambda frequency minBranch thr rise timescale docs coocs roots periods else separateBranches fdt similarity lambda frequency minBranch thr rise timescale docs coocs roots periods
......
...@@ -116,7 +116,7 @@ searchInCorpusWithNgrams _cId _lId _t _ngt _q _o _l _order = undefined ...@@ -116,7 +116,7 @@ searchInCorpusWithNgrams _cId _lId _t _ngt _q _o _l _order = undefined
-- | Compute TF-IDF for all 'ngramIds' in given 'CorpusId'. In this -- | Compute TF-IDF for all 'ngramIds' in given 'CorpusId'. In this
-- case only the "TF" part makes sense and so we only compute the -- case only the "TF" part makes sense and so we only compute the
-- ratio of "number of times our terms appear in given document" and -- ratio of "number of times our terms appear in given document" and
-- "number of all terms in document" and pure a sorted list of -- "number of all terms in document" and return a sorted list of
-- document ids -- document ids
_tfidfAll :: (HasDBid NodeType, HasNodeError err) => CorpusId -> [Int] -> DBCmd err [Int] _tfidfAll :: (HasDBid NodeType, HasNodeError err) => CorpusId -> [Int] -> DBCmd err [Int]
_tfidfAll cId ngramIds = do _tfidfAll cId ngramIds = do
......
...@@ -51,7 +51,7 @@ triggerSearchUpdate = execPGSQuery query ( toDBid NodeDocument ...@@ -51,7 +51,7 @@ triggerSearchUpdate = execPGSQuery query ( toDBid NodeDocument
ELSE ELSE
new.search := to_tsvector( 'english' , new.hyperdata::jsonb ); new.search := to_tsvector( 'english' , new.hyperdata::jsonb );
END IF; END IF;
pure new; RETURN new;
end end
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
......
...@@ -141,7 +141,7 @@ runOpaQuery q = mkCmd $ \c -> runSelect c q ...@@ -141,7 +141,7 @@ runOpaQuery q = mkCmd $ \c -> runSelect c q
runCountOpaQuery :: Select a -> DBCmd err Int runCountOpaQuery :: Select a -> DBCmd err Int
runCountOpaQuery q = do runCountOpaQuery q = do
counts <- mkCmd $ \c -> runSelect c $ countRows q counts <- mkCmd $ \c -> runSelect c $ countRows q
-- countRows is guaranteed to pure a list with exactly one row so DL.head is safe here -- countRows is guaranteed to return a list with exactly one row so DL.head is safe here
pure $ fromInt64ToInt $ DL.head counts pure $ fromInt64ToInt $ DL.head counts
formatPGSQuery :: PGS.ToRow a => PGS.Query -> a -> Cmd err DB.ByteString formatPGSQuery :: PGS.ToRow a => PGS.Query -> a -> Cmd err DB.ByteString
......
...@@ -58,7 +58,7 @@ inputSqlTypes :: [Text] ...@@ -58,7 +58,7 @@ inputSqlTypes :: [Text]
inputSqlTypes = ["int4","int4","int4","int4"] inputSqlTypes = ["int4","int4","int4","int4"]
-- | SQL query to add documents -- | SQL query to add documents
-- TODO pure id of added documents only -- TODO return id of added documents only
queryAdd :: Query queryAdd :: Query
queryAdd = [sql| queryAdd = [sql|
WITH input_rows(node_id,context_id,score,category) AS (?) WITH input_rows(node_id,context_id,score,category) AS (?)
......
...@@ -159,7 +159,7 @@ queryInsert = [sql| ...@@ -159,7 +159,7 @@ queryInsert = [sql|
, ins AS ( , ins AS (
INSERT INTO contexts (hash_id, typename,user_id,parent_id,name,date,hyperdata) INSERT INTO contexts (hash_id, typename,user_id,parent_id,name,date,hyperdata)
SELECT * FROM input_rows SELECT * FROM input_rows
ON CONFLICT (hash_id) DO NOTHING -- on unique index -- this does not pure the ids ON CONFLICT (hash_id) DO NOTHING -- on unique index -- this does not return the ids
RETURNING id,hash_id RETURNING id,hash_id
) )
...@@ -182,7 +182,7 @@ queryInsert = [sql| ...@@ -182,7 +182,7 @@ queryInsert = [sql|
-- | When documents are inserted -- | When documents are inserted
-- ReturnType after insertion -- ReturnType after insertion
data ReturnId = ReturnId { reInserted :: Bool -- if the document is inserted (True: is new, False: is not new) data ReturnId = ReturnId { reInserted :: Bool -- if the document is inserted (True: is new, False: is not new)
, reId :: NodeId -- always pure the id of the document (even new or not new) , reId :: NodeId -- always return the id of the document (even new or not new)
-- this is the uniq id in the database -- this is the uniq id in the database
, reUniqId :: Text -- Hash Id with concatenation of sha parameters , reUniqId :: Text -- Hash Id with concatenation of sha parameters
} deriving (Show, Generic) } deriving (Show, Generic)
......
...@@ -192,7 +192,7 @@ getContextsForNgramsTerms cId ngramsTerms = do ...@@ -192,7 +192,7 @@ getContextsForNgramsTerms cId ngramsTerms = do
-- | Query the `context_node_ngrams` table and pure ngrams for given -- | Query the `context_node_ngrams` table and return ngrams for given
-- `context_id` and `list_id`. -- `context_id` and `list_id`.
-- WARNING: `context_node_ngrams` can be outdated. This is because it -- WARNING: `context_node_ngrams` can be outdated. This is because it
-- is expensive to keep all ngrams matching a given context and if -- is expensive to keep all ngrams matching a given context and if
...@@ -215,7 +215,7 @@ getContextNgrams contextId listId = do ...@@ -215,7 +215,7 @@ getContextNgrams contextId listId = do
AND node_id = ? |] AND node_id = ? |]
-- | Query the `contexts` table and pure ngrams for given context_id -- | Query the `contexts` table and return ngrams for given context_id
-- and list_id that match the search tsvector. -- and list_id that match the search tsvector.
-- NOTE This is poor man's tokenization that is used as a hint for the -- NOTE This is poor man's tokenization that is used as a hint for the
-- frontend highlighter. -- frontend highlighter.
......
...@@ -7,7 +7,7 @@ Maintainer : team@gargantext.org ...@@ -7,7 +7,7 @@ Maintainer : team@gargantext.org
Stability : experimental Stability : experimental
Portability : POSIX Portability : POSIX
Let a Root Node, pure the Tree of the Node as a directed acyclic graph Let a Root Node, return the Tree of the Node as a directed acyclic graph
(Tree). (Tree).
-- TODO delete node, if not owned, then suppress the link only -- TODO delete node, if not owned, then suppress the link only
......
...@@ -119,7 +119,7 @@ pollJob limit offset jid je = do ...@@ -119,7 +119,7 @@ pollJob limit offset jid je = do
me = either (Just . T.pack . show) (const Nothing) r me = either (Just . T.pack . show) (const Nothing) r
in pure (ls, st, me) in pure (ls, st, me)
-- /NOTE/: We need to be careful with the ordering of the logs here: -- /NOTE/: We need to be careful with the ordering of the logs here:
-- we want to pure the logs ordered from the newest to the oldest, -- we want to return the logs ordered from the newest to the oldest,
-- because the API will use 'limit' to show only the newest ones, -- because the API will use 'limit' to show only the newest ones,
-- taking 'limit' of them from the front of the list. -- taking 'limit' of them from the front of the list.
-- --
......
...@@ -187,7 +187,7 @@ runJ (QueuedJob a f) = do ...@@ -187,7 +187,7 @@ runJ (QueuedJob a f) = do
let readLogs = readTVarIO logs let readLogs = readTVarIO logs
pure (RunningJob act readLogs) pure (RunningJob act readLogs)
-- | Wait for a running job to pure (blocking). -- | Wait for a running job to return (blocking).
waitJ :: RunningJob w a -> IO (Either SomeException a) waitJ :: RunningJob w a -> IO (Either SomeException a)
waitJ (RunningJob act _) = waitCatch act waitJ (RunningJob act _) = waitCatch act
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment