Commit beae3400 authored by Alexandre Delanoë's avatar Alexandre Delanoë

[MERGE] Upgrading postgresql and ngrams repo changes

parents 57a75ba6 91a2d6e2
...@@ -229,3 +229,63 @@ Playground is located at http://localhost:8008/gql ...@@ -229,3 +229,63 @@ Playground is located at http://localhost:8008/gql
} }
} }
``` ```
## PostgreSQL
### Upgrading using Docker
https://www.cloudytuts.com/tutorials/docker/how-to-upgrade-postgresql-in-docker-and-kubernetes/
To upgrade PostgreSQL in Docker containers, for example from 11.x to 14.x, simply run:
```sh
docker exec -it <container-id> pg_dumpall -U gargantua > 11-db.dump
```
Then, shut down the container, replace `image` section in
`devops/docker/docker-compose.yaml` with `postgres:14`. Also, it is a good practice to create a new volume, say `garg-pgdata14` and bind the new container to it. If you want to keep the same volume, remember about removing it like so:
```sh
docker-compose rm postgres
docker volume rm docker_garg-pgdata
```
Now, start the container and execute:
```sh
# need to drop the empty DB first, since schema will be created when restoring the dump
docker exec -i <new-container-id> dropdb -U gargantua gargandbV5
# recreate the db, but empty with no schema
docker exec -i <new-container-id> createdb -U gargantua gargandbV5
# now we can restore the dump
docker exec -i <new-container-id> psql -U gargantua -d gargandbV5 < 11-db.dump
```
### Upgrading using
There is a solution using pgupgrade_cluster but you need to manage the
clusters version 14 and 13. Hence here is a simple solution to upgrade.
First save your data:
```
sudo su postgres
pg_dumpall > gargandb.dump
```
Upgrade postgresql:
```
sudo apt install postgresql-server-14 postgresql-client-14
sudo apt remove --purge postgresql-13
```
Restore your data:
```
sudo su postgres
psql < gargandb.dump
```
Maybe you need to restore the gargantua password
```
ALTER ROLE gargantua PASSWORD 'yourPasswordIn_gargantext.ini'
```
Maybe you need to change the port to 5433 for database connection in
your gargantext.ini file.
version: '3' version: '3'
services: services:
#postgres11:
# #image: 'postgres:latest'
# image: 'postgres:11'
# network_mode: host
# #command: ["postgres", "-c", "log_statement=all"]
# #ports:
# #- 5432:5432
# environment:
# POSTGRES_USER: gargantua
# POSTGRES_PASSWORD: C8kdcUrAQy66U
# POSTGRES_DB: gargandbV5
# volumes:
# - garg-pgdata:/var/lib/postgresql/data
# - ../:/gargantext
# - ../dbs:/dbs
# - ../postgres/schema.sql:/docker-entrypoint-initdb.d/schema.sql:ro
postgres: postgres:
#image: 'postgres:latest' #image: 'postgres:latest'
image: 'postgres:11' image: 'postgres:14'
network_mode: host network_mode: host
#command: ["postgres", "-c", "log_statement=all"] #command: ["postgres", "-c", "log_statement=all"]
#ports: #ports:
...@@ -13,7 +30,7 @@ services: ...@@ -13,7 +30,7 @@ services:
POSTGRES_PASSWORD: C8kdcUrAQy66U POSTGRES_PASSWORD: C8kdcUrAQy66U
POSTGRES_DB: gargandbV5 POSTGRES_DB: gargandbV5
volumes: volumes:
- garg-pgdata:/var/lib/postgresql/data - garg-pgdata14:/var/lib/postgresql-11/data
- ../:/gargantext - ../:/gargantext
- ../dbs:/dbs - ../dbs:/dbs
- ../postgres/schema.sql:/docker-entrypoint-initdb.d/schema.sql:ro - ../postgres/schema.sql:/docker-entrypoint-initdb.d/schema.sql:ro
...@@ -44,5 +61,6 @@ services: ...@@ -44,5 +61,6 @@ services:
- 5000:5000 - 5000:5000
volumes: volumes:
garg-pgdata: #garg-pgdata:
garg-pgdata14:
js-cache: js-cache:
-- Start a new transaction. In case data migration goes wrong, we are
-- back to our original table.
BEGIN;
-- we will migrate data here
-- rename old table and create a new one
ALTER TABLE public.node_stories RENAME TO node_stories_old;
CREATE TABLE public.node_stories (
id SERIAL,
node_id INTEGER NOT NULL,
version INTEGER NOT NULL,
ngrams_type_id INTEGER NOT NULL,
ngrams_id INTEGER NOT NULL,
--children TEXT[],
ngrams_repo_element jsonb DEFAULT '{}'::jsonb NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY (node_id) REFERENCES public.nodes(id) ON DELETE CASCADE,
FOREIGN KEY (ngrams_id) REFERENCES public.ngrams(id) ON DELETE CASCADE
);
ALTER TABLE public.node_stories OWNER TO gargantua;
CREATE UNIQUE INDEX ON public.node_stories USING btree (node_id, ngrams_type_id, ngrams_id);
-- Authors (ngrams_type_id = 1), see G.D.S.Ngrams.hs -> ngramsTypeId
INSERT INTO public.node_stories
(node_id, version, ngrams_type_id, ngrams_id, ngrams_repo_element)
SELECT node_id, (archive->'version')::int, 1, ngrams.id, j.value
FROM node_stories_old
CROSS JOIN jsonb_each(archive->'state'->'Authors') AS j
JOIN ngrams ON terms = j.key;
-- we will leave children for later, small steps
-- INSERT INTO public.node_stories
-- (node_id, version, ngrams_type_id, ngrams_id, children, ngrams_repo_element)
-- SELECT node_id, (archive->'version')::int, 1, ngrams.id, c.children, (j.value - 'children')
-- FROM node_stories_old
-- CROSS JOIN jsonb_each(archive->'state'->'Authors') AS j
-- CROSS JOIN LATERAL (SELECT array_agg(d.elem) AS children FROM jsonb_array_elements_text(j.value->'children') AS d(elem)) AS c
-- JOIN ngrams ON terms = j.key;
-- Institutes (ngrams_type_id = 2)
INSERT INTO public.node_stories
(node_id, version, ngrams_type_id, ngrams_id, ngrams_repo_element)
SELECT node_id, (archive->'version')::int, 2, ngrams.id, j.value
FROM node_stories_old
CROSS JOIN jsonb_each(archive->'state'->'Institutes') AS j
JOIN ngrams ON terms = j.key;
-- Sources (ngrams_type_id = 3)
INSERT INTO public.node_stories
(node_id, version, ngrams_type_id, ngrams_id, ngrams_repo_element)
SELECT node_id, (archive->'version')::int, 3, ngrams.id, j.value
FROM node_stories_old
CROSS JOIN jsonb_each(archive->'state'->'Sources') AS j
JOIN ngrams ON terms = j.key;
-- NgramsTerms (ngrams_type_id = 4)
INSERT INTO public.node_stories
(node_id, version, ngrams_type_id, ngrams_id, ngrams_repo_element)
SELECT node_id, (archive->'version')::int, 4, ngrams.id, j.value
FROM node_stories_old
CROSS JOIN jsonb_each(archive->'state'->'NgramsTerms') AS j
JOIN ngrams ON terms = j.key;
-- finally, write out the stuff
COMMIT;
...@@ -36,4 +36,3 @@ ALTER TABLE public.node_story_archive_history OWNER TO gargantua; ...@@ -36,4 +36,3 @@ ALTER TABLE public.node_story_archive_history OWNER TO gargantua;
-- UNION (SELECT node_id, sid, 3 AS ngrams_type_id, "Sources" AS patch FROM q WHERE "Sources" IS NOT NULL) -- UNION (SELECT node_id, sid, 3 AS ngrams_type_id, "Sources" AS patch FROM q WHERE "Sources" IS NOT NULL)
-- ORDER BY node_id, ngrams_type_id, sid -- ORDER BY node_id, ngrams_type_id, sid
-- ) AS t; -- ) AS t;
...@@ -5,7 +5,7 @@ cabal-version: 1.12 ...@@ -5,7 +5,7 @@ cabal-version: 1.12
-- see: https://github.com/sol/hpack -- see: https://github.com/sol/hpack
name: gargantext name: gargantext
version: 0.0.6.1 version: 0.0.6
synopsis: Search, map, share synopsis: Search, map, share
description: Please see README.md description: Please see README.md
category: Data category: Data
...@@ -76,7 +76,6 @@ library ...@@ -76,7 +76,6 @@ library
Gargantext.Core.Text.Metrics.TFICF Gargantext.Core.Text.Metrics.TFICF
Gargantext.Core.Text.Metrics.CharByChar Gargantext.Core.Text.Metrics.CharByChar
Gargantext.Core.Text.Metrics.Count Gargantext.Core.Text.Metrics.Count
Gargantext.Core.Text.Prepare
Gargantext.Core.Text.Search Gargantext.Core.Text.Search
Gargantext.Core.Text.Terms Gargantext.Core.Text.Terms
Gargantext.Core.Text.Terms.Mono Gargantext.Core.Text.Terms.Mono
...@@ -97,7 +96,6 @@ library ...@@ -97,7 +96,6 @@ library
Gargantext.Core.Viz.Phylo.SynchronicClustering Gargantext.Core.Viz.Phylo.SynchronicClustering
Gargantext.Core.Viz.Types Gargantext.Core.Viz.Types
other-modules: other-modules:
-- ConcurrentTest
Gargantext.API.Admin.Auth Gargantext.API.Admin.Auth
Gargantext.API.Admin.FrontEnd Gargantext.API.Admin.FrontEnd
Gargantext.API.Admin.Orchestrator Gargantext.API.Admin.Orchestrator
...@@ -318,8 +316,8 @@ library ...@@ -318,8 +316,8 @@ library
Gargantext.Database.Types Gargantext.Database.Types
Gargantext.Utils.Aeson Gargantext.Utils.Aeson
Gargantext.Utils.JohnSnowNLP Gargantext.Utils.JohnSnowNLP
Gargantext.Utils.Servant
Gargantext.Utils.SpacyNLP Gargantext.Utils.SpacyNLP
Gargantext.Utils.Servant
Gargantext.Utils.UTCTime Gargantext.Utils.UTCTime
Paths_gargantext Paths_gargantext
hs-source-dirs: hs-source-dirs:
...@@ -493,9 +491,9 @@ library ...@@ -493,9 +491,9 @@ library
, transformers-base , transformers-base
, tuple , tuple
, unordered-containers , unordered-containers
, uri-encode
, utf8-string , utf8-string
, uuid , uuid
, uri-encode
, validity , validity
, vector , vector
, wai , wai
......
...@@ -29,6 +29,7 @@ module Gargantext.API.Ngrams ...@@ -29,6 +29,7 @@ module Gargantext.API.Ngrams
, TableNgramsApiPut , TableNgramsApiPut
, getTableNgrams , getTableNgrams
, getTableNgramsCorpus
, setListNgrams , setListNgrams
--, rmListNgrams TODO fix before exporting --, rmListNgrams TODO fix before exporting
, apiNgramsTableCorpus , apiNgramsTableCorpus
...@@ -258,15 +259,6 @@ setListNgrams listId ngramsType ns = do ...@@ -258,15 +259,6 @@ setListNgrams listId ngramsType ns = do
saveNodeStory saveNodeStory
currentVersion :: HasNodeStory env err m
=> ListId -> m Version
currentVersion listId = do
--nls <- getRepo [listId]
pool <- view connPool
nls <- liftBase $ getNodeStory pool listId
pure $ nls ^. unNodeStory . at listId . _Just . a_version
newNgramsFromNgramsStatePatch :: NgramsStatePatch' -> [Ngrams] newNgramsFromNgramsStatePatch :: NgramsStatePatch' -> [Ngrams]
newNgramsFromNgramsStatePatch p = newNgramsFromNgramsStatePatch p =
[ text2ngrams (unNgramsTerm n) [ text2ngrams (unNgramsTerm n)
......
...@@ -19,9 +19,11 @@ import Control.Lens (_Just, (^.), at, view, At, Index, IxValue) ...@@ -19,9 +19,11 @@ import Control.Lens (_Just, (^.), at, view, At, Index, IxValue)
import Control.Monad.Reader import Control.Monad.Reader
import Data.HashMap.Strict (HashMap) import Data.HashMap.Strict (HashMap)
import Data.Hashable (Hashable) import Data.Hashable (Hashable)
import Data.Pool (withResource)
import Data.Set (Set) import Data.Set (Set)
import Data.Validity import Data.Validity
import Gargantext.API.Ngrams.Types import Gargantext.API.Ngrams.Types
import Gargantext.Core.NodeStory
import Gargantext.Core.Types (ListType(..), NodeId, NodeType(..), ListId) import Gargantext.Core.Types (ListType(..), NodeId, NodeType(..), ListId)
import Gargantext.Database.Prelude (CmdM, HasConnectionPool(..)) import Gargantext.Database.Prelude (CmdM, HasConnectionPool(..))
import Gargantext.Database.Schema.Ngrams (NgramsType) import Gargantext.Database.Schema.Ngrams (NgramsType)
...@@ -29,7 +31,6 @@ import Gargantext.Prelude ...@@ -29,7 +31,6 @@ import Gargantext.Prelude
import qualified Data.HashMap.Strict as HM import qualified Data.HashMap.Strict as HM
import qualified Data.Map.Strict as Map import qualified Data.Map.Strict as Map
import qualified Data.Set as Set import qualified Data.Set as Set
import Gargantext.Core.NodeStory
import qualified Gargantext.Core.NodeStoryFile as NSF import qualified Gargantext.Core.NodeStoryFile as NSF
...@@ -202,15 +203,16 @@ migrateFromDirToDb :: (CmdM env err m, HasNodeStory env err m) ...@@ -202,15 +203,16 @@ migrateFromDirToDb :: (CmdM env err m, HasNodeStory env err m)
=> m () => m ()
migrateFromDirToDb = do migrateFromDirToDb = do
pool <- view connPool pool <- view connPool
listIds <- liftBase $ getNodesIdWithType pool NodeList withResource pool $ \c -> do
listIds <- liftBase $ getNodesIdWithType c NodeList
printDebug "[migrateFromDirToDb] listIds" listIds printDebug "[migrateFromDirToDb] listIds" listIds
(NodeStory nls) <- NSF.getRepoReadConfig listIds (NodeStory nls) <- NSF.getRepoReadConfig listIds
printDebug "[migrateFromDirToDb] nls" nls printDebug "[migrateFromDirToDb] nls" nls
_ <- mapM (\(nId, a) -> do _ <- mapM (\(nId, a) -> do
n <- liftBase $ nodeExists pool nId n <- liftBase $ nodeExists c nId
case n of case n of
False -> pure 0 False -> pure ()
True -> liftBase $ upsertNodeArchive pool nId a True -> liftBase $ upsertNodeStories c nId a
) $ Map.toList nls ) $ Map.toList nls
--_ <- nodeStoryIncs (Just $ NodeStory nls) listIds --_ <- nodeStoryIncs (Just $ NodeStory nls) listIds
pure () pure ()
...@@ -28,7 +28,7 @@ import Data.String (IsString, fromString) ...@@ -28,7 +28,7 @@ import Data.String (IsString, fromString)
import Data.Swagger hiding (version, patch) import Data.Swagger hiding (version, patch)
import Data.Text (Text, pack, strip) import Data.Text (Text, pack, strip)
import Data.Validity import Data.Validity
import Database.PostgreSQL.Simple.FromField (FromField, fromField, fromJSONField, ResultError(ConversionFailed), returnError) import Database.PostgreSQL.Simple.FromField (FromField, fromField, fromJSONField)
import Database.PostgreSQL.Simple.ToField (ToField, toJSONField, toField) import Database.PostgreSQL.Simple.ToField (ToField, toJSONField, toField)
import GHC.Generics (Generic) import GHC.Generics (Generic)
import Gargantext.Core.Text (size) import Gargantext.Core.Text (size)
...@@ -44,7 +44,6 @@ import Servant.Job.Utils (jsonOptions) ...@@ -44,7 +44,6 @@ import Servant.Job.Utils (jsonOptions)
import Test.QuickCheck (elements, frequency) import Test.QuickCheck (elements, frequency)
import Test.QuickCheck.Arbitrary (Arbitrary, arbitrary) import Test.QuickCheck.Arbitrary (Arbitrary, arbitrary)
import qualified Data.HashMap.Strict.InsOrd as InsOrdHashMap import qualified Data.HashMap.Strict.InsOrd as InsOrdHashMap
import qualified Data.List as List
import qualified Data.Map.Strict as Map import qualified Data.Map.Strict as Map
import qualified Data.Map.Strict.Patch as PM import qualified Data.Map.Strict.Patch as PM
import qualified Data.Set as Set import qualified Data.Set as Set
...@@ -124,7 +123,7 @@ instance (ToJSONKey a, ToSchema a) => ToSchema (MSet a) where ...@@ -124,7 +123,7 @@ instance (ToJSONKey a, ToSchema a) => ToSchema (MSet a) where
------------------------------------------------------------------------ ------------------------------------------------------------------------
newtype NgramsTerm = NgramsTerm { unNgramsTerm :: Text } newtype NgramsTerm = NgramsTerm { unNgramsTerm :: Text }
deriving (Ord, Eq, Show, Generic, ToJSONKey, ToJSON, FromJSON, Semigroup, Arbitrary, Serialise, ToSchema, Hashable, NFData) deriving (Ord, Eq, Show, Generic, ToJSONKey, ToJSON, FromJSON, Semigroup, Arbitrary, Serialise, ToSchema, Hashable, NFData, FromField, ToField)
instance IsHashable NgramsTerm where instance IsHashable NgramsTerm where
hash (NgramsTerm t) = hash t hash (NgramsTerm t) = hash t
instance Monoid NgramsTerm where instance Monoid NgramsTerm where
...@@ -133,18 +132,6 @@ instance FromJSONKey NgramsTerm where ...@@ -133,18 +132,6 @@ instance FromJSONKey NgramsTerm where
fromJSONKey = FromJSONKeyTextParser $ \t -> pure $ NgramsTerm $ strip t fromJSONKey = FromJSONKeyTextParser $ \t -> pure $ NgramsTerm $ strip t
instance IsString NgramsTerm where instance IsString NgramsTerm where
fromString s = NgramsTerm $ pack s fromString s = NgramsTerm $ pack s
instance FromField NgramsTerm
where
fromField field mb = do
v <- fromField field mb
case fromJSON v of
Success a -> pure $ NgramsTerm $ strip a
Error _err -> returnError ConversionFailed field
$ List.intercalate " " [ "cannot parse hyperdata for JSON: "
, show v
]
instance ToField NgramsTerm where
toField (NgramsTerm n) = toField n
data RootParent = RootParent data RootParent = RootParent
...@@ -164,19 +151,20 @@ data NgramsRepoElement = NgramsRepoElement ...@@ -164,19 +151,20 @@ data NgramsRepoElement = NgramsRepoElement
, _nre_children :: !(MSet NgramsTerm) , _nre_children :: !(MSet NgramsTerm)
} }
deriving (Ord, Eq, Show, Generic) deriving (Ord, Eq, Show, Generic)
deriveJSON (unPrefix "_nre_") ''NgramsRepoElement deriveJSON (unPrefix "_nre_") ''NgramsRepoElement
-- TODO -- TODO
-- if ngrams & not size => size -- if ngrams & not size => size
-- drop occurrences -- drop occurrences
makeLenses ''NgramsRepoElement makeLenses ''NgramsRepoElement
instance ToSchema NgramsRepoElement where instance ToSchema NgramsRepoElement where
declareNamedSchema = genericDeclareNamedSchema (unPrefixSwagger "_nre_") declareNamedSchema = genericDeclareNamedSchema (unPrefixSwagger "_nre_")
instance Serialise NgramsRepoElement
instance FromField NgramsRepoElement where
fromField = fromJSONField
instance ToField NgramsRepoElement where
toField = toJSONField
instance Serialise (MSet NgramsTerm) instance Serialise (MSet NgramsTerm)
instance Serialise NgramsRepoElement
data NgramsElement = data NgramsElement =
NgramsElement { _ne_ngrams :: NgramsTerm NgramsElement { _ne_ngrams :: NgramsTerm
......
...@@ -67,49 +67,57 @@ module Gargantext.Core.NodeStory ...@@ -67,49 +67,57 @@ module Gargantext.Core.NodeStory
, getNodeArchiveHistory , getNodeArchiveHistory
, Archive(..) , Archive(..)
, initArchive , initArchive
, insertArchiveList
, deleteArchiveList
, updateArchiveList
, a_history , a_history
, a_state , a_state
, a_version , a_version
, nodeExists , nodeExists
, runPGSQuery
, runPGSAdvisoryLock
, runPGSAdvisoryUnlock
, runPGSAdvisoryXactLock
, getNodesIdWithType , getNodesIdWithType
, readNodeStoryEnv , readNodeStoryEnv
, upsertNodeArchive , upsertNodeStories
, getNodeStory ) , getNodeStory
, nodeStoriesQuery
, currentVersion )
where where
-- import Debug.Trace (traceShow) -- import Debug.Trace (traceShow)
import Control.Debounce (mkDebounce, defaultDebounceSettings, debounceFreq, debounceAction) import Control.Debounce (mkDebounce, defaultDebounceSettings, debounceFreq, debounceAction)
import Codec.Serialise.Class import Codec.Serialise.Class
import Control.Arrow (returnA) import Control.Concurrent (MVar(), newMVar, modifyMVar_)
import Control.Concurrent (MVar(), withMVar, newMVar, modifyMVar_)
import Control.Exception (catch, throw, SomeException(..)) import Control.Exception (catch, throw, SomeException(..))
import Control.Lens (makeLenses, Getter, (^.), (.~), traverse) import Control.Lens (makeLenses, Getter, (^.), (.~), (%~), _Just, at, traverse, view)
import Control.Monad.Except import Control.Monad.Except
import Control.Monad.Reader import Control.Monad.Reader
import Data.Aeson hiding ((.=), decode) import Data.Aeson hiding ((.=), decode)
import Data.ByteString.Char8 (hPutStrLn) import Data.ByteString.Char8 (hPutStrLn)
import Data.Map.Strict (Map) import Data.Map.Strict (Map)
import Data.Maybe (catMaybes, mapMaybe) import Data.Maybe (catMaybes)
import Data.Monoid import Data.Monoid
import Data.Pool (Pool, withResource) import Data.Pool (Pool, withResource)
import Data.Semigroup import Data.Semigroup
import qualified Database.PostgreSQL.Simple as PGS
import Database.PostgreSQL.Simple.SqlQQ (sql) import Database.PostgreSQL.Simple.SqlQQ (sql)
import Database.PostgreSQL.Simple.FromField (FromField(fromField), fromJSONField) import Database.PostgreSQL.Simple.FromField (FromField(fromField), fromJSONField)
import Data.Profunctor.Product.TH (makeAdaptorAndInstance) import Data.Profunctor.Product.TH (makeAdaptorAndInstance)
import GHC.Generics (Generic) import GHC.Generics (Generic)
import Gargantext.API.Ngrams.Types import Gargantext.API.Ngrams.Types
import Gargantext.Core.Types (NodeId(..), NodeType) import Gargantext.Core.Types (ListId, NodeId(..), NodeType)
import Gargantext.Core.Utils.Prefix (unPrefix) import Gargantext.Core.Utils.Prefix (unPrefix)
import Gargantext.Database.Admin.Config (nodeTypeId) import Gargantext.Database.Prelude (CmdM', HasConnectionPool(..), HasConfig)
import Gargantext.Database.Prelude (CmdM', HasConnectionPool, HasConfig)
import Gargantext.Database.Query.Table.Node.Error (HasNodeError()) import Gargantext.Database.Query.Table.Node.Error (HasNodeError())
import Gargantext.Prelude import Gargantext.Prelude
import Opaleye (Column, DefaultFromField(..), Insert(..), Select, SqlInt4, SqlJsonb, Table, Update(..), (.==), fromPGSFromField, rCount, restrict, runInsert, runSelect, runUpdate, selectTable, sqlInt4, sqlValueJSONB, tableField, updateEasy) import Opaleye (DefaultFromField(..), SqlJsonb, fromPGSFromField)
import Opaleye.Internal.Table (Table(..))
import System.IO (stderr) import System.IO (stderr)
import qualified Data.Map.Strict as Map import qualified Data.Map.Strict as Map
import qualified Data.Map.Strict.Patch as PM import qualified Data.Map.Strict.Patch as PM
import qualified Data.Set as Set
import qualified Data.Text as Text
import qualified Database.PostgreSQL.Simple as PGS
import qualified Gargantext.Database.Query.Table.Ngrams as TableNgrams import qualified Gargantext.Database.Query.Table.Ngrams as TableNgrams
------------------------------------------------------------------------ ------------------------------------------------------------------------
...@@ -185,6 +193,13 @@ instance DefaultFromField SqlJsonb (Archive NgramsState' NgramsStatePatch') ...@@ -185,6 +193,13 @@ instance DefaultFromField SqlJsonb (Archive NgramsState' NgramsStatePatch')
where where
defaultFromField = fromPGSFromField defaultFromField = fromPGSFromField
-- | Combine `NgramsState'`. This is because the structure is (Map
-- NgramsType (Map ...)) and the default `(<>)` operator is
-- left-biased
-- (https://hackage.haskell.org/package/containers-0.6.6/docs/Data-Map-Internal.html#v:union)
combineState :: NgramsState' -> NgramsState' -> NgramsState'
combineState = Map.unionWith (<>)
-- TODO Semigroup instance for unions -- TODO Semigroup instance for unions
-- TODO check this -- TODO check this
instance (Semigroup s, Semigroup p) => Semigroup (Archive s p) where instance (Semigroup s, Semigroup p) => Semigroup (Archive s p) where
...@@ -239,138 +254,282 @@ makeLenses ''Archive ...@@ -239,138 +254,282 @@ makeLenses ''Archive
----------------------------------------- -----------------------------------------
data NodeStoryPoly a b = NodeStoryDB { node_id :: a data NodeStoryPoly nid v ngtid ngid nre =
, archive :: b } NodeStoryDB { node_id :: nid
, version :: v
, ngrams_type_id :: ngtid
, ngrams_id :: ngid
, ngrams_repo_element :: nre }
deriving (Eq) deriving (Eq)
type ArchiveQ = Archive NgramsState' NgramsStatePatch' data NodeStoryArchivePoly nid a =
NodeStoryArchiveDB { a_node_id :: nid
type NodeStoryWrite = NodeStoryPoly (Column SqlInt4) (Column SqlJsonb) , archive :: a }
type NodeStoryRead = NodeStoryPoly (Column SqlInt4) (Column SqlJsonb) deriving (Eq)
$(makeAdaptorAndInstance "pNodeStory" ''NodeStoryPoly) $(makeAdaptorAndInstance "pNodeStory" ''NodeStoryPoly)
$(makeAdaptorAndInstance "pNodeArchiveStory" ''NodeStoryArchivePoly)
-- type NodeStoryWrite = NodeStoryPoly (Column SqlInt4) (Column SqlInt4) (Column SqlInt4) (Column SqlInt4) (Column SqlJsonb)
-- type NodeStoryRead = NodeStoryPoly (Column SqlInt4) (Column SqlInt4) (Column SqlInt4) (Column SqlInt4) (Column SqlJsonb)
-- type NodeStoryArchiveWrite = NodeStoryArchivePoly (Column SqlInt4) (Column SqlJsonb)
-- type NodeStoryArchiveRead = NodeStoryArchivePoly (Column SqlInt4) (Column SqlJsonb)
type ArchiveList = Archive NgramsState' NgramsStatePatch'
-- DB stuff
runPGSExecuteMany :: (PGS.ToRow q) => Pool PGS.Connection -> PGS.Query -> [q] -> IO Int64 runPGSExecute :: (PGS.ToRow q) => PGS.Connection -> PGS.Query -> q -> IO Int64
runPGSExecuteMany pool qs a = withResource pool $ \c -> catch (PGS.executeMany c qs a) (printError c) runPGSExecute c qs a = catch (PGS.execute c qs a) printError
where where
printError _c (SomeException e) = do printError (SomeException e) = do
--q' <- PGS.formatQuery c qs a --q' <- PGS.formatQuery c qs a
--hPutStrLn stderr q' --hPutStrLn stderr q'
throw (SomeException e) throw (SomeException e)
runPGSQuery :: (PGS.FromRow r, PGS.ToRow q) => Pool PGS.Connection -> PGS.Query -> q -> IO [r] runPGSExecuteMany :: (PGS.ToRow q) => PGS.Connection -> PGS.Query -> [q] -> IO Int64
runPGSQuery pool q a = withResource pool $ \c -> catch (PGS.query c q a) (printError c) runPGSExecuteMany c qs a = catch (PGS.executeMany c qs a) printError
where where
printError c (SomeException e) = do printError (SomeException e) = do
--q' <- PGS.formatQuery c qs a
--hPutStrLn stderr q'
throw (SomeException e)
runPGSQuery :: (PGS.FromRow r, PGS.ToRow q) => PGS.Connection -> PGS.Query -> q -> IO [r]
runPGSQuery c q a = catch (PGS.query c q a) printError
where
printError (SomeException e) = do
q' <- PGS.formatQuery c q a q' <- PGS.formatQuery c q a
hPutStrLn stderr q' hPutStrLn stderr q'
throw (SomeException e) throw (SomeException e)
nodeExists :: Pool PGS.Connection -> NodeId -> IO Bool runPGSAdvisoryLock :: PGS.Connection -> Int -> IO ()
nodeExists pool nId = (== [PGS.Only True]) runPGSAdvisoryLock c id = do
<$> runPGSQuery pool [sql|SELECT true FROM nodes WHERE id = ? AND ? |] (nId, True) _ <- runPGSQuery c [sql| SELECT pg_advisory_lock(?) |] (PGS.Only id) :: IO [PGS.Only ()]
pure ()
getNodesIdWithType :: Pool PGS.Connection -> NodeType -> IO [NodeId] runPGSAdvisoryUnlock :: PGS.Connection -> Int -> IO ()
getNodesIdWithType pool nt = do runPGSAdvisoryUnlock c id = do
ns <- runPGSQuery pool query (nodeTypeId nt, True) _ <- runPGSQuery c [sql| SELECT pg_advisory_unlock(?) |] (PGS.Only id) :: IO [PGS.Only Bool]
pure ()
runPGSAdvisoryXactLock :: PGS.Connection -> Int -> IO ()
runPGSAdvisoryXactLock c id = do
_ <- runPGSQuery c [sql| SELECT pg_advisory_xact_lock(?) |] (PGS.Only id) :: IO [PGS.Only ()]
pure ()
nodeExists :: PGS.Connection -> NodeId -> IO Bool
nodeExists c nId = (== [PGS.Only True])
<$> runPGSQuery c [sql| SELECT true FROM nodes WHERE id = ? LIMIT 1 |] (PGS.Only nId)
getNodesIdWithType :: PGS.Connection -> NodeType -> IO [NodeId]
getNodesIdWithType c nt = do
ns <- runPGSQuery c query (PGS.Only nt)
pure $ map (\(PGS.Only nId) -> NodeId nId) ns pure $ map (\(PGS.Only nId) -> NodeId nId) ns
where where
query :: PGS.Query query :: PGS.Query
query = [sql|SELECT id FROM nodes WHERE typename = ? AND ? |] query = [sql| SELECT id FROM nodes WHERE typename = ? |]
nodeStoryTable :: Table NodeStoryRead NodeStoryWrite -- nodeStoryTable :: Table NodeStoryRead NodeStoryWrite
nodeStoryTable = -- nodeStoryTable =
Table "node_stories" -- Table "node_stories"
( pNodeStory NodeStoryDB { node_id = tableField "node_id" -- ( pNodeStory NodeStoryDB { node_id = tableField "node_id"
, archive = tableField "archive" } ) -- , version = tableField "version"
-- , ngrams_type_id = tableField "ngrams_type_id"
-- , ngrams_id = tableField "ngrams_id"
-- , ngrams_repo_element = tableField "ngrams_repo_element"
-- } )
nodeStorySelect :: Select NodeStoryRead -- nodeStoryArchiveTable :: Table NodeStoryArchiveRead NodeStoryArchiveWrite
nodeStorySelect = selectTable nodeStoryTable -- nodeStoryArchiveTable =
-- Table "node_story_archive_history"
-- ( pNodeArchiveStory NodeStoryArchiveDB { a_node_id = tableField "node_id"
-- , archive = tableField "archive" } )
-- nodeStorySelect :: Select NodeStoryRead
-- nodeStorySelect = selectTable nodeStoryTable
-- TODO Check ordering, "first patch in the _a_history list is the most recent" -- TODO Check ordering, "first patch in the _a_history list is the most recent"
getNodeArchiveHistory :: Pool PGS.Connection -> NodeId -> IO [NgramsStatePatch'] getNodeArchiveHistory :: PGS.Connection -> NodeId -> IO [NgramsStatePatch']
getNodeArchiveHistory pool nodeId = do getNodeArchiveHistory c nodeId = do
as <- runPGSQuery pool query (nodeId, True) as <- runPGSQuery c query (PGS.Only nodeId) :: IO [(TableNgrams.NgramsType, NgramsTerm, NgramsPatch)]
let asTuples = mapMaybe (\(ngrams_type_id, ngrams, patch) -> (\ntId -> (ntId, ngrams, patch)) <$> (TableNgrams.fromNgramsTypeId ngrams_type_id)) as pure $ (\(ngramsType, terms, patch) -> fst $ PM.singleton ngramsType (NgramsTablePatch $ fst $ PM.singleton terms patch)) <$> as
pure $ (\(ntId, terms, patch) -> fst $ PM.singleton ntId (NgramsTablePatch $ fst $ PM.singleton terms patch)) <$> asTuples
where where
query :: PGS.Query query :: PGS.Query
query = [sql|SELECT ngrams_type_id, terms, patch query = [sql| SELECT ngrams_type_id, terms, patch
FROM node_story_archive_history FROM node_story_archive_history
JOIN ngrams ON ngrams.id = ngrams_id JOIN ngrams ON ngrams.id = ngrams_id
WHERE node_id = ? AND ? |] WHERE node_id = ? |]
ngramsIdQuery :: PGS.Query
ngramsIdQuery = [sql| SELECT id FROM ngrams WHERE terms = ? |]
insertNodeArchiveHistory :: Pool PGS.Connection -> NodeId -> [NgramsStatePatch'] -> IO () insertNodeArchiveHistory :: PGS.Connection -> NodeId -> [NgramsStatePatch'] -> IO ()
insertNodeArchiveHistory _ _ [] = pure () insertNodeArchiveHistory _ _ [] = pure ()
insertNodeArchiveHistory pool nodeId (h:hs) = do insertNodeArchiveHistory c nodeId (h:hs) = do
let tuples = mconcat $ (\(nType, (NgramsTablePatch patch)) -> let tuples = mconcat $ (\(nType, (NgramsTablePatch patch)) ->
(\(term, p) -> (\(term, p) ->
(nodeId, TableNgrams.ngramsTypeId nType, term, p)) <$> PM.toList patch) <$> PM.toList h :: [(NodeId, TableNgrams.NgramsTypeId, NgramsTerm, NgramsPatch)] (nodeId, nType, term, p)) <$> PM.toList patch) <$> PM.toList h :: [(NodeId, TableNgrams.NgramsType, NgramsTerm, NgramsPatch)]
tuplesM <- mapM (\(nId, nTypeId, term, patch) -> do tuplesM <- mapM (\(nId, nType, term, patch) -> do
ngrams <- runPGSQuery pool ngramsQuery (term, True) ngrams <- runPGSQuery c ngramsIdQuery (PGS.Only term)
pure $ (\(PGS.Only termId) -> (nId, nTypeId, termId, term, patch)) <$> (headMay ngrams) pure $ (\(PGS.Only termId) -> (nId, nType, termId, term, patch)) <$> (headMay ngrams)
) tuples :: IO [Maybe (NodeId, TableNgrams.NgramsTypeId, Int, NgramsTerm, NgramsPatch)] ) tuples :: IO [Maybe (NodeId, TableNgrams.NgramsType, Int, NgramsTerm, NgramsPatch)]
_ <- runPGSExecuteMany pool query $ ((\(nId, nTypeId, termId, _term, patch) -> (nId, nTypeId, termId, patch)) <$> (catMaybes tuplesM)) _ <- runPGSExecuteMany c query $ ((\(nId, nType, termId, _term, patch) -> (nId, nType, termId, patch)) <$> (catMaybes tuplesM))
_ <- insertNodeArchiveHistory pool nodeId hs _ <- insertNodeArchiveHistory c nodeId hs
pure () pure ()
where where
ngramsQuery :: PGS.Query
ngramsQuery = [sql| SELECT id FROM ngrams WHERE terms = ? AND ? |]
query :: PGS.Query query :: PGS.Query
query = [sql| INSERT INTO node_story_archive_history(node_id, ngrams_type_id, ngrams_id, patch) VALUES (?, ?, ?, ?) |] query = [sql| INSERT INTO node_story_archive_history(node_id, ngrams_type_id, ngrams_id, patch) VALUES (?, ?, ?, ?) |]
getNodeStory :: Pool PGS.Connection -> NodeId -> IO NodeListStory getNodeStory :: PGS.Connection -> NodeId -> IO NodeListStory
getNodeStory pool (NodeId nodeId) = do getNodeStory c nId@(NodeId nodeId) = do
res <- withResource pool $ \c -> runSelect c query :: IO [NodeStoryPoly NodeId ArchiveQ] --res <- withResource pool $ \c -> runSelect c query :: IO [NodeStoryPoly NodeId Version Int Int NgramsRepoElement]
withArchive <- mapM (\(NodeStoryDB { node_id = nId, archive = Archive { .. } }) -> do res <- runPGSQuery c nodeStoriesQuery (PGS.Only nodeId) :: IO [(Version, TableNgrams.NgramsType, NgramsTerm, NgramsRepoElement)]
--a <- getNodeArchiveHistory pool nId -- We have multiple rows with same node_id and different (ngrams_type_id, ngrams_id).
let a = [] :: [NgramsStatePatch'] -- Need to create a map: {<node_id>: {<ngrams_type_id>: {<ngrams_id>: <data>}}}
-- Don't read whole history. Only state is needed and most recent changes. let dbData = map (\(version, ngramsType, ngrams, ngrams_repo_element) ->
pure (nId, Archive { _a_history = a, .. })) res Archive { _a_version = version
pure $ NodeStory $ Map.fromListWith (<>) withArchive , _a_history = []
, _a_state = Map.singleton ngramsType $ Map.singleton ngrams ngrams_repo_element }) res
-- TODO (<>) for Archive doesn't concatenate states!
-- NOTE When concatenating, check that the same version is for all states
pure $ NodeStory $ Map.singleton nId $ foldl combine mempty dbData
--pure $ NodeStory $ Map.fromListWith (<>) $ (\(NodeStoryDB nId a) -> (nId, a)) <$> res --pure $ NodeStory $ Map.fromListWith (<>) $ (\(NodeStoryDB nId a) -> (nId, a)) <$> res
where where
query :: Select NodeStoryRead -- query :: Select NodeStoryRead
query = proc () -> do -- query = proc () -> do
row@(NodeStoryDB node_id _) <- nodeStorySelect -< () -- row@(NodeStoryDB node_id _) <- nodeStorySelect -< ()
restrict -< node_id .== sqlInt4 nodeId -- restrict -< node_id .== sqlInt4 nodeId
returnA -< row -- returnA -< row
combine a1 a2 = a1 & a_state %~ combineState (a2 ^. a_state)
insertNodeArchive :: Pool PGS.Connection -> NodeId -> ArchiveQ -> IO Int64 & a_version .~ (a2 ^. a_version) -- version should be updated from list, not taken from the empty Archive
insertNodeArchive pool nodeId@(NodeId nId) (Archive {..}) = do
ret <- withResource pool $ \c -> runInsert c insert nodeStoriesQuery :: PGS.Query
-- NOTE: It is assumed that the most recent change is the first in the nodeStoriesQuery = [sql| SELECT version, ngrams_type_id, terms, ngrams_repo_element
-- list, so we save these in reverse order FROM node_stories
insertNodeArchiveHistory pool nodeId $ reverse _a_history JOIN ngrams ON ngrams.id = ngrams_id
pure ret WHERE node_id = ? |]
type ArchiveStateList = [(TableNgrams.NgramsType, NgramsTerm, NgramsRepoElement)]
-- Functions to convert archive state (which is a Map NgramsType (Map
-- NgramsTerm NgramsRepoElement)) to/from a flat list
archiveStateAsList :: NgramsState' -> ArchiveStateList
archiveStateAsList s = mconcat $ (\(nt, ntm) -> (\(n, nre) -> (nt, n, nre)) <$> Map.toList ntm) <$> Map.toList s
archiveStateFromList :: ArchiveStateList -> NgramsState'
archiveStateFromList l = Map.fromListWith (<>) $ (\(nt, t, nre) -> (nt, Map.singleton t nre)) <$> l
-- | This function inserts whole new node story and archive for given node_id.
insertNodeStory :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
insertNodeStory c (NodeId nId) a = do
_ <- mapM (\(ngramsType, ngrams, ngramsRepoElement) -> do
termIdM <- runPGSQuery c ngramsIdQuery (PGS.Only ngrams) :: IO [PGS.Only Int64]
case headMay termIdM of
Nothing -> pure 0
Just (PGS.Only termId) -> runPGSExecuteMany c query [(nId, a ^. a_version, ngramsType, termId, ngramsRepoElement)]) $ archiveStateAsList $ a ^. a_state
-- runInsert c $ insert ngramsType ngrams ngramsRepoElement) $ archiveStateAsList _a_state
pure ()
where where
emptyHistory = [] :: [NgramsStatePatch'] query :: PGS.Query
insert = Insert { iTable = nodeStoryTable query = [sql| INSERT INTO node_stories(node_id, ngrams_type_id, ngrams_id, ngrams_repo_element) VALUES (?, ?, ?, ?) |]
, iRows = [NodeStoryDB { node_id = sqlInt4 nId -- insert ngramsType ngrams ngramsRepoElement =
, archive = sqlValueJSONB $ Archive { _a_history = emptyHistory -- Insert { iTable = nodeStoryTable
, .. } }] -- , iRows = [NodeStoryDB { node_id = sqlInt4 nId
, iReturning = rCount -- , version = sqlInt4 _a_version
, iOnConflict = Nothing } -- , ngrams_type_id = sqlInt4 $ TableNgrams.ngramsTypeId ngramsType
-- , ngrams_id = ...
updateNodeArchive :: Pool PGS.Connection -> NodeId -> ArchiveQ -> IO Int64 -- , ngrams_repo_element = sqlValueJSONB ngramsRepoElement
updateNodeArchive pool nodeId@(NodeId nId) (Archive {..}) = do -- }]
ret <- withResource pool $ \c -> runUpdate c update -- , iReturning = rCount
-- NOTE: It is assumed that the most recent change is the first in the -- , iOnConflict = Nothing }
-- list, so we save these in reverse order
insertNodeArchiveHistory pool nodeId $ reverse _a_history insertArchiveList :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
pure ret insertArchiveList c nodeId a = do
_ <- runPGSExecuteMany c query $ (\(nt, n, nre) -> (nodeId, a ^. a_version, nt, nre, n)) <$> (archiveStateAsList $ a ^. a_state)
pure ()
where where
emptyHistory = [] :: [NgramsStatePatch'] query :: PGS.Query
update = Update { uTable = nodeStoryTable query = [sql| INSERT INTO node_stories(node_id, version, ngrams_type_id, ngrams_id, ngrams_repo_element)
, uUpdateWith = updateEasy (\(NodeStoryDB { node_id }) -> NodeStoryDB { archive = sqlValueJSONB $ Archive { _a_history = emptyHistory SELECT ?, ?, ?, ngrams.id, ? FROM ngrams WHERE terms = ? |]
, ..}
, .. }) deleteArchiveList :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
, uWhere = (\row -> node_id row .== sqlInt4 nId) deleteArchiveList c nodeId a = do
, uReturning = rCount } _ <- runPGSExecuteMany c query $ (\(nt, n, _) -> (nodeId, nt, n)) <$> (archiveStateAsList $ a ^. a_state)
pure ()
where
query :: PGS.Query
query = [sql| WITH (SELECT id FROM ngrams WHERE terms = ?) AS ngrams
DELETE FROM node_stories
WHERE node_id = ? AND ngrams_type_id = ? AND ngrams_id IN (SELECT id FROM ngrams WHERE terms = ?) |]
updateArchiveList :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
updateArchiveList c nodeId a = do
let params = (\(nt, n, nre) -> (nre, nodeId, nt, n)) <$> (archiveStateAsList $ a ^. a_state)
--q <- PGS.format c query params
--printDebug "[updateArchiveList] query" q
_ <- mapM (\p -> runPGSExecute c query p) params
pure ()
where
query :: PGS.Query
query = [sql| UPDATE node_stories
SET ngrams_repo_element = ?
WHERE node_id = ? AND ngrams_type_id = ? AND ngrams_id IN (SELECT id FROM ngrams WHERE terms = ?) |]
-- | This function updates the node story and archive for given node_id.
updateNodeStory :: PGS.Connection -> NodeId -> ArchiveList -> ArchiveList -> IO ()
updateNodeStory c nodeId@(NodeId _nId) currentArchive newArchive = do
-- STEPS
-- 0. We assume we're inside an advisory lock
-- 1. Find differences (inserts/updates/deletes)
let currentList = archiveStateAsList $ currentArchive ^. a_state
let newList = archiveStateAsList $ newArchive ^. a_state
let currentSet = Set.fromList $ (\(nt, n, _) -> (nt, n)) <$> currentList
let newSet = Set.fromList $ (\(nt, n, _) -> (nt, n)) <$> newList
let inserts = filter (\(nt, n, _) -> Set.member (nt, n) $ Set.difference newSet currentSet) newList
printDebug "[updateNodeStory] inserts" inserts
let deletes = filter (\(nt, n, _) -> Set.member (nt, n) $ Set.difference currentSet newSet) currentList
printDebug "[updateNodeStory] deletes" deletes
-- updates are the things that are in new but not in current
let updates = Set.toList $ Set.difference (Set.fromList newList) (Set.fromList currentList)
printDebug "[updateNodeStory] updates" $ Text.unlines $ (Text.pack . show) <$> updates
-- 2. Perform inserts/deletes/updates
insertArchiveList c nodeId $ Archive { _a_version = newArchive ^. a_version
, _a_history = []
, _a_state = archiveStateFromList inserts }
printDebug "[updateNodeStory] insert applied" ()
-- TODO Use currentArchive ^. a_version in delete and report error
-- if entries with (node_id, ngrams_type_id, ngrams_id) but
-- different version are found.
deleteArchiveList c nodeId $ Archive { _a_version = newArchive ^. a_version
, _a_history = []
, _a_state = archiveStateFromList deletes }
printDebug "[updateNodeStory] delete applied" ()
updateArchiveList c nodeId $ Archive { _a_version = newArchive ^. a_version
, _a_history = []
, _a_state = archiveStateFromList updates }
printDebug "[updateNodeStory] update applied" ()
pure ()
-- where
-- update = Update { uTable = nodeStoryTable
-- , uUpdateWith = updateEasy (\(NodeStoryDB { node_id }) ->
-- NodeStoryDB { archive = sqlValueJSONB $ Archive { _a_history = emptyHistory
-- , ..}
-- , .. })
-- , uWhere = (\row -> node_id row .== sqlInt4 nId)
-- , uReturning = rCount }
-- nodeStoryRemove :: Pool PGS.Connection -> NodeId -> IO Int64 -- nodeStoryRemove :: Pool PGS.Connection -> NodeId -> IO Int64
-- nodeStoryRemove pool (NodeId nId) = withResource pool $ \c -> runDelete c delete -- nodeStoryRemove pool (NodeId nId) = withResource pool $ \c -> runDelete c delete
...@@ -379,34 +538,50 @@ updateNodeArchive pool nodeId@(NodeId nId) (Archive {..}) = do ...@@ -379,34 +538,50 @@ updateNodeArchive pool nodeId@(NodeId nId) (Archive {..}) = do
-- , dWhere = (\row -> node_id row .== sqlInt4 nId) -- , dWhere = (\row -> node_id row .== sqlInt4 nId)
-- , dReturning = rCount } -- , dReturning = rCount }
upsertNodeArchive :: Pool PGS.Connection -> NodeId -> ArchiveQ -> IO Int64 upsertNodeStories :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
upsertNodeArchive pool nId a = do upsertNodeStories c nodeId@(NodeId nId) newArchive = do
(NodeStory m) <- getNodeStory pool nId printDebug "[upsertNodeStories] START nId" nId
case Map.lookup nId m of PGS.withTransaction c $ do
Nothing -> insertNodeArchive pool nId a printDebug "[upsertNodeStories] locking nId" nId
Just _ -> updateNodeArchive pool nId a runPGSAdvisoryXactLock c nId
-- whether it's insert or update, we can insert node archive history already
-- NOTE: It is assumed that the most recent change is the first in the
-- list, so we save these in reverse order
insertNodeArchiveHistory c nodeId $ reverse $ newArchive ^. a_history
(NodeStory m) <- getNodeStory c nodeId
case Map.lookup nodeId m of
Nothing -> do
_ <- insertNodeStory c nodeId newArchive
pure ()
Just currentArchive -> do
_ <- updateNodeStory c nodeId currentArchive newArchive
pure ()
writeNodeStories :: Pool PGS.Connection -> NodeListStory -> IO () printDebug "[upsertNodeStories] STOP nId" nId
writeNodeStories pool (NodeStory nls) = do
_ <- mapM (\(nId, a) -> upsertNodeArchive pool nId a) $ Map.toList nls writeNodeStories :: PGS.Connection -> NodeListStory -> IO ()
writeNodeStories c (NodeStory nls) = do
_ <- mapM (\(nId, a) -> upsertNodeStories c nId a) $ Map.toList nls
pure () pure ()
-- | Returns a `NodeListStory`, updating the given one for given `NodeId` -- | Returns a `NodeListStory`, updating the given one for given `NodeId`
nodeStoryInc :: Pool PGS.Connection -> Maybe NodeListStory -> NodeId -> IO NodeListStory nodeStoryInc :: PGS.Connection -> Maybe NodeListStory -> NodeId -> IO NodeListStory
nodeStoryInc pool Nothing nId = getNodeStory pool nId nodeStoryInc c Nothing nId = getNodeStory c nId
nodeStoryInc pool (Just ns@(NodeStory nls)) nId = do nodeStoryInc c (Just ns@(NodeStory nls)) nId = do
case Map.lookup nId nls of case Map.lookup nId nls of
Nothing -> do Nothing -> do
(NodeStory nls') <- getNodeStory pool nId (NodeStory nls') <- getNodeStory c nId
pure $ NodeStory $ Map.union nls nls' pure $ NodeStory $ Map.union nls nls'
Just _ -> pure ns Just _ -> pure ns
nodeStoryIncs :: Pool PGS.Connection -> Maybe NodeListStory -> [NodeId] -> IO NodeListStory nodeStoryIncs :: PGS.Connection -> Maybe NodeListStory -> [NodeId] -> IO NodeListStory
nodeStoryIncs _ Nothing [] = pure $ NodeStory $ Map.empty nodeStoryIncs _ Nothing [] = pure $ NodeStory $ Map.empty
nodeStoryIncs pool (Just nls) ns = foldM (\m n -> nodeStoryInc pool (Just m) n) nls ns nodeStoryIncs c (Just nls) ns = foldM (\m n -> nodeStoryInc c (Just m) n) nls ns
nodeStoryIncs pool Nothing (ni:ns) = do nodeStoryIncs c Nothing (ni:ns) = do
m <- getNodeStory pool ni m <- getNodeStory c ni
nodeStoryIncs pool (Just m) ns nodeStoryIncs c (Just m) ns
-- nodeStoryDec :: Pool PGS.Connection -> NodeListStory -> NodeId -> IO NodeListStory -- nodeStoryDec :: Pool PGS.Connection -> NodeListStory -> NodeId -> IO NodeListStory
-- nodeStoryDec pool ns@(NodeStory nls) ni = do -- nodeStoryDec pool ns@(NodeStory nls) ni = do
...@@ -436,10 +611,10 @@ readNodeStoryEnv pool = do ...@@ -436,10 +611,10 @@ readNodeStoryEnv pool = do
nodeStoryVar :: Pool PGS.Connection -> Maybe (MVar NodeListStory) -> [NodeId] -> IO (MVar NodeListStory) nodeStoryVar :: Pool PGS.Connection -> Maybe (MVar NodeListStory) -> [NodeId] -> IO (MVar NodeListStory)
nodeStoryVar pool Nothing nIds = do nodeStoryVar pool Nothing nIds = do
state <- nodeStoryIncs pool Nothing nIds state <- withResource pool $ \c -> nodeStoryIncs c Nothing nIds
newMVar state newMVar state
nodeStoryVar pool (Just mv) nIds = do nodeStoryVar pool (Just mv) nIds = do
_ <- modifyMVar_ mv $ \nsl -> (nodeStoryIncs pool (Just nsl) nIds) _ <- withResource pool $ \c -> modifyMVar_ mv $ \nsl -> (nodeStoryIncs c (Just nsl) nIds)
pure mv pure mv
-- Debounce is useful since it could delay the saving to some later -- Debounce is useful since it could delay the saving to some later
...@@ -449,9 +624,15 @@ mkNodeStorySaver pool mvns = mkDebounce settings ...@@ -449,9 +624,15 @@ mkNodeStorySaver pool mvns = mkDebounce settings
where where
settings = defaultDebounceSettings settings = defaultDebounceSettings
{ debounceAction = do { debounceAction = do
withMVar mvns (\ns -> writeNodeStories pool ns) -- NOTE: Lock MVar first, then use resource pool.
withMVar mvns (\ns -> printDebug "[mkNodeStorySaver] debounce nodestory" ns) -- Otherwise we could wait for MVar, while
modifyMVar_ mvns $ \ns -> pure $ clearHistory ns -- blocking the pool connection.
modifyMVar_ mvns $ \ns -> do
withResource pool $ \c -> do
--printDebug "[mkNodeStorySaver] will call writeNodeStories, ns" ns
writeNodeStories c ns
pure $ clearHistory ns
--withMVar mvns (\ns -> printDebug "[mkNodeStorySaver] debounce nodestory" ns)
, debounceFreq = 1*minute , debounceFreq = 1*minute
} }
minute = 60*second minute = 60*second
...@@ -462,6 +643,13 @@ clearHistory (NodeStory ns) = NodeStory $ ns & (traverse . a_history) .~ emptyHi ...@@ -462,6 +643,13 @@ clearHistory (NodeStory ns) = NodeStory $ ns & (traverse . a_history) .~ emptyHi
where where
emptyHistory = [] :: [NgramsStatePatch'] emptyHistory = [] :: [NgramsStatePatch']
currentVersion :: (HasNodeStory env err m) => ListId -> m Version
currentVersion listId = do
pool <- view connPool
nls <- withResource pool $ \c -> liftBase $ getNodeStory c listId
pure $ nls ^. unNodeStory . at listId . _Just . a_version
-- mkNodeStorySaver :: MVar NodeListStory -> Cmd err (Cmd err ()) -- mkNodeStorySaver :: MVar NodeListStory -> Cmd err (Cmd err ())
-- mkNodeStorySaver mvns = mkDebounce settings -- mkNodeStorySaver mvns = mkDebounce settings
-- where -- where
......
...@@ -27,7 +27,7 @@ import Gargantext.API.Ngrams.Tools (filterListWithRoot, groupNodesByNgrams, Diag ...@@ -27,7 +27,7 @@ import Gargantext.API.Ngrams.Tools (filterListWithRoot, groupNodesByNgrams, Diag
import Gargantext.Database.Prelude (runPGSQuery{-, formatPGSQuery-}) import Gargantext.Database.Prelude (runPGSQuery{-, formatPGSQuery-})
import Gargantext.API.Ngrams.Types (TabType(..), ngramsTypeFromTabType, NgramsTerm(..)) import Gargantext.API.Ngrams.Types (TabType(..), ngramsTypeFromTabType, NgramsTerm(..))
import Gargantext.Core.Mail.Types (HasMail) import Gargantext.Core.Mail.Types (HasMail)
import Gargantext.Core.NodeStory import Gargantext.Core.NodeStory hiding (runPGSQuery)
import Gargantext.Core.Text.Metrics (scored, Scored(..), {-localMetrics, toScored-}) import Gargantext.Core.Text.Metrics (scored, Scored(..), {-localMetrics, toScored-})
import Database.PostgreSQL.Simple.ToField (toField, Action{-, ToField-}) import Database.PostgreSQL.Simple.ToField (toField, Action{-, ToField-})
import Gargantext.Core.Types (ListType(..), Limit, NodeType(..), ContextId) import Gargantext.Core.Types (ListType(..), Limit, NodeType(..), ContextId)
...@@ -243,6 +243,3 @@ getNgrams lId tabType = do ...@@ -243,6 +243,3 @@ getNgrams lId tabType = do
take' :: Maybe Int -> [a] -> [a] take' :: Maybe Int -> [a] -> [a]
take' Nothing xs = xs take' Nothing xs = xs
take' (Just n) xs = take n xs take' (Just n) xs = take n xs
...@@ -113,4 +113,3 @@ nodeTypes = [ (n, toDBid n) | n <- allNodeTypes ] ...@@ -113,4 +113,3 @@ nodeTypes = [ (n, toDBid n) | n <- allNodeTypes ]
fromNodeTypeId :: NodeTypeId -> NodeType fromNodeTypeId :: NodeTypeId -> NodeType
fromNodeTypeId tId = fromMaybe (panic $ pack $ "Type Id " <> show tId <> " does not exist") fromNodeTypeId tId = fromMaybe (panic $ pack $ "Type Id " <> show tId <> " does not exist")
(lookup tId nodeTypeInv) (lookup tId nodeTypeInv)
...@@ -30,8 +30,8 @@ import Data.Morpheus.Types (GQLType) ...@@ -30,8 +30,8 @@ import Data.Morpheus.Types (GQLType)
import Data.Swagger import Data.Swagger
import Data.Text (Text, unpack, pack) import Data.Text (Text, unpack, pack)
import Data.Time (UTCTime) import Data.Time (UTCTime)
import Database.PostgreSQL.Simple.FromField (FromField, fromField) import Database.PostgreSQL.Simple.FromField (FromField, fromField, fromJSONField)
import Database.PostgreSQL.Simple.ToField (ToField, toField) import Database.PostgreSQL.Simple.ToField (ToField, toField, toJSONField)
import GHC.Generics (Generic) import GHC.Generics (Generic)
import Gargantext.Core.Utils.Prefix (unPrefix, unPrefixSwagger, wellNamedSchema) import Gargantext.Core.Utils.Prefix (unPrefix, unPrefixSwagger, wellNamedSchema)
import Gargantext.Database.Schema.Context import Gargantext.Database.Schema.Context
...@@ -211,14 +211,6 @@ pgContextId = pgNodeId ...@@ -211,14 +211,6 @@ pgContextId = pgNodeId
------------------------------------------------------------------------ ------------------------------------------------------------------------
newtype NodeId = NodeId Int newtype NodeId = NodeId Int
deriving (Read, Generic, Num, Eq, Ord, Enum, ToJSONKey, FromJSONKey, ToJSON, FromJSON, Hashable, Csv.ToField) deriving (Read, Generic, Num, Eq, Ord, Enum, ToJSONKey, FromJSONKey, ToJSON, FromJSON, Hashable, Csv.ToField)
-- TODO make another type
type ContextId = NodeId
newtype NodeContextId = NodeContextId Int
deriving (Read, Generic, Num, Eq, Ord, Enum, ToJSONKey, FromJSONKey, ToJSON, FromJSON, Hashable, Csv.ToField)
instance GQLType NodeId instance GQLType NodeId
instance Show NodeId where instance Show NodeId where
show (NodeId n) = "nodeId-" <> show n show (NodeId n) = "nodeId-" <> show n
...@@ -232,6 +224,14 @@ instance FromField NodeId where ...@@ -232,6 +224,14 @@ instance FromField NodeId where
then return $ NodeId n then return $ NodeId n
else mzero else mzero
instance ToSchema NodeId instance ToSchema NodeId
-- TODO make another type
type ContextId = NodeId
newtype NodeContextId = NodeContextId Int
deriving (Read, Generic, Num, Eq, Ord, Enum, ToJSONKey, FromJSONKey, ToJSON, FromJSON, Hashable, Csv.ToField)
--instance Csv.ToField NodeId where --instance Csv.ToField NodeId where
-- toField (NodeId nodeId) = Csv.toField nodeId -- toField (NodeId nodeId) = Csv.toField nodeId
...@@ -357,6 +357,20 @@ data NodeType = NodeUser ...@@ -357,6 +357,20 @@ data NodeType = NodeUser
deriving (Show, Read, Eq, Generic, Bounded, Enum) deriving (Show, Read, Eq, Generic, Bounded, Enum)
instance GQLType NodeType instance GQLType NodeType
instance FromJSON NodeType
instance ToJSON NodeType
instance FromHttpApiData NodeType where
parseUrlPiece = Right . read . unpack
instance ToHttpApiData NodeType where
toUrlPiece = pack . show
instance ToParamSchema NodeType
instance ToSchema NodeType
instance Arbitrary NodeType where
arbitrary = elements allNodeTypes
instance FromField NodeType where
fromField = fromJSONField
instance ToField NodeType where
toField = toJSONField
allNodeTypes :: [NodeType] allNodeTypes :: [NodeType]
...@@ -394,21 +408,6 @@ defaultName NodeFrameNotebook = "Code" ...@@ -394,21 +408,6 @@ defaultName NodeFrameNotebook = "Code"
defaultName NodeFile = "File" defaultName NodeFile = "File"
instance FromJSON NodeType
instance ToJSON NodeType
instance FromHttpApiData NodeType where
parseUrlPiece = Right . read . unpack
instance ToHttpApiData NodeType where
toUrlPiece = pack . show
instance ToParamSchema NodeType
instance ToSchema NodeType
instance Arbitrary NodeType where
arbitrary = elements allNodeTypes
------------------------------------------------------------------------ ------------------------------------------------------------------------
-- Instances -- Instances
...@@ -451,4 +450,3 @@ instance DefaultFromField SqlText (Maybe Hash) ...@@ -451,4 +450,3 @@ instance DefaultFromField SqlText (Maybe Hash)
context2node :: Context a -> Node a context2node :: Context a -> Node a
context2node (Context ci ch ct cu cp cn cd chy) = Node ci ch ct cu cp cn cd chy context2node (Context ci ch ct cu cp cn cd chy) = Node ci ch ct cu cp cn cd chy
...@@ -132,4 +132,3 @@ selectNgramsId' ns = runPGSQuery querySelectNgramsId ( PGS.Only ...@@ -132,4 +132,3 @@ selectNgramsId' ns = runPGSQuery querySelectNgramsId ( PGS.Only
JOIN input_rows ir ON ir.terms = n.terms JOIN input_rows ir ON ir.terms = n.terms
GROUP BY n.terms, n.id GROUP BY n.terms, n.id
|] |]
...@@ -26,7 +26,6 @@ import Control.Lens (set, view) ...@@ -26,7 +26,6 @@ import Control.Lens (set, view)
import Data.Aeson import Data.Aeson
import Data.Maybe (fromMaybe) import Data.Maybe (fromMaybe)
import Data.Text (Text) import Data.Text (Text)
import qualified Database.PostgreSQL.Simple as DPS
import Database.PostgreSQL.Simple.SqlQQ (sql) import Database.PostgreSQL.Simple.SqlQQ (sql)
import Opaleye hiding (FromField) import Opaleye hiding (FromField)
import Prelude hiding (null, id, map, sum) import Prelude hiding (null, id, map, sum)
...@@ -41,6 +40,9 @@ import Gargantext.Database.Query.Table.Node.Error ...@@ -41,6 +40,9 @@ import Gargantext.Database.Query.Table.Node.Error
import Gargantext.Database.Schema.Node import Gargantext.Database.Schema.Node
import Gargantext.Prelude hiding (sum, head) import Gargantext.Prelude hiding (sum, head)
import qualified Database.PostgreSQL.Simple as PGS
queryNodeSearchTable :: Select NodeSearchRead queryNodeSearchTable :: Select NodeSearchRead
queryNodeSearchTable = selectTable nodeTableSearch queryNodeSearchTable = selectTable nodeTableSearch
...@@ -123,7 +125,7 @@ getClosestParentIdByType :: HasDBid NodeType ...@@ -123,7 +125,7 @@ getClosestParentIdByType :: HasDBid NodeType
-> NodeType -> NodeType
-> Cmd err (Maybe NodeId) -> Cmd err (Maybe NodeId)
getClosestParentIdByType nId nType = do getClosestParentIdByType nId nType = do
result <- runPGSQuery query (nId, 0 :: Int) result <- runPGSQuery query (PGS.Only nId)
case result of case result of
[(NodeId parentId, pTypename)] -> do [(NodeId parentId, pTypename)] -> do
if toDBid nType == pTypename then if toDBid nType == pTypename then
...@@ -132,12 +134,12 @@ getClosestParentIdByType nId nType = do ...@@ -132,12 +134,12 @@ getClosestParentIdByType nId nType = do
getClosestParentIdByType (NodeId parentId) nType getClosestParentIdByType (NodeId parentId) nType
_ -> pure Nothing _ -> pure Nothing
where where
query :: DPS.Query query :: PGS.Query
query = [sql| query = [sql|
SELECT n2.id, n2.typename SELECT n2.id, n2.typename
FROM nodes n1 FROM nodes n1
JOIN nodes n2 ON n1.parent_id = n2.id JOIN nodes n2 ON n1.parent_id = n2.id
WHERE n1.id = ? AND 0 = ?; WHERE n1.id = ?;
|] |]
-- | Similar to `getClosestParentIdByType` but includes current node -- | Similar to `getClosestParentIdByType` but includes current node
...@@ -147,7 +149,7 @@ getClosestParentIdByType' :: HasDBid NodeType ...@@ -147,7 +149,7 @@ getClosestParentIdByType' :: HasDBid NodeType
-> NodeType -> NodeType
-> Cmd err (Maybe NodeId) -> Cmd err (Maybe NodeId)
getClosestParentIdByType' nId nType = do getClosestParentIdByType' nId nType = do
result <- runPGSQuery query (nId, 0 :: Int) result <- runPGSQuery query (PGS.Only nId)
case result of case result of
[(NodeId id, pTypename)] -> do [(NodeId id, pTypename)] -> do
if toDBid nType == pTypename then if toDBid nType == pTypename then
...@@ -156,11 +158,11 @@ getClosestParentIdByType' nId nType = do ...@@ -156,11 +158,11 @@ getClosestParentIdByType' nId nType = do
getClosestParentIdByType nId nType getClosestParentIdByType nId nType
_ -> pure Nothing _ -> pure Nothing
where where
query :: DPS.Query query :: PGS.Query
query = [sql| query = [sql|
SELECT n.id, n.typename SELECT n.id, n.typename
FROM nodes n FROM nodes n
WHERE n.id = ? AND 0 = ?; WHERE n.id = ?;
|] |]
-- | Given a node id, find all it's children (no matter how deep) of -- | Given a node id, find all it's children (no matter how deep) of
...@@ -170,15 +172,15 @@ getChildrenByType :: HasDBid NodeType ...@@ -170,15 +172,15 @@ getChildrenByType :: HasDBid NodeType
-> NodeType -> NodeType
-> Cmd err [NodeId] -> Cmd err [NodeId]
getChildrenByType nId nType = do getChildrenByType nId nType = do
result <- runPGSQuery query (nId, 0 :: Int) result <- runPGSQuery query (PGS.Only nId)
children_lst <- mapM (\(id, _) -> getChildrenByType id nType) result children_lst <- mapM (\(id, _) -> getChildrenByType id nType) result
pure $ concat $ [fst <$> filter (\(_, pTypename) -> pTypename == toDBid nType) result] ++ children_lst pure $ concat $ [fst <$> filter (\(_, pTypename) -> pTypename == toDBid nType) result] ++ children_lst
where where
query :: DPS.Query query :: PGS.Query
query = [sql| query = [sql|
SELECT n.id, n.typename SELECT n.id, n.typename
FROM nodes n FROM nodes n
WHERE n.parent_id = ? AND 0 = ?; WHERE n.parent_id = ?;
|] |]
------------------------------------------------------------------------ ------------------------------------------------------------------------
...@@ -231,8 +233,8 @@ selectNodesIdWithType nt = proc () -> do ...@@ -231,8 +233,8 @@ selectNodesIdWithType nt = proc () -> do
------------------------------------------------------------------------ ------------------------------------------------------------------------
nodeExists :: (HasNodeError err) => NodeId -> Cmd err Bool nodeExists :: (HasNodeError err) => NodeId -> Cmd err Bool
nodeExists nId = (== [DPS.Only True]) nodeExists nId = (== [PGS.Only True])
<$> runPGSQuery [sql|SELECT true FROM nodes WHERE id = ? AND ?|] (nId, True) <$> runPGSQuery [sql|SELECT true FROM nodes WHERE id = ? |] (PGS.Only nId)
getNode :: HasNodeError err => NodeId -> Cmd err (Node Value) getNode :: HasNodeError err => NodeId -> Cmd err (Node Value)
getNode nId = do getNode nId = do
...@@ -397,4 +399,3 @@ defaultListMaybe cId = headMay <$> map (view node_id ) <$> getListsWithParentId ...@@ -397,4 +399,3 @@ defaultListMaybe cId = headMay <$> map (view node_id ) <$> getListsWithParentId
getListsWithParentId :: HasDBid NodeType => NodeId -> Cmd err [Node HyperdataList] getListsWithParentId :: HasDBid NodeType => NodeId -> Cmd err [Node HyperdataList]
getListsWithParentId n = runOpaQuery $ selectNodesWith' n (Just NodeList) getListsWithParentId n = runOpaQuery $ selectNodesWith' n (Just NodeList)
...@@ -20,25 +20,27 @@ Ngrams connection to the Database. ...@@ -20,25 +20,27 @@ Ngrams connection to the Database.
module Gargantext.Database.Schema.Ngrams module Gargantext.Database.Schema.Ngrams
where where
import Data.Maybe (fromMaybe)
import Data.HashMap.Strict (HashMap)
import Data.Hashable (Hashable)
import Codec.Serialise (Serialise()) import Codec.Serialise (Serialise())
import Control.Lens (over) import Control.Lens (over)
import Control.Monad (mzero) import Control.Monad (mzero)
import Data.Maybe (fromMaybe)
import Data.HashMap.Strict (HashMap)
import Data.Hashable (Hashable)
import Data.Aeson import Data.Aeson
import Data.Aeson.Types (toJSONKeyText) import Data.Aeson.Types (toJSONKeyText)
import Data.Map (fromList, lookup) import Data.Map (fromList, lookup)
import Database.PostgreSQL.Simple.FromField (returnError, ResultError(..))
import Data.Text (Text, splitOn, pack, strip) import Data.Text (Text, splitOn, pack, strip)
import Gargantext.Core.Types (TODO(..), Typed(..)) import Gargantext.Core.Types (TODO(..), Typed(..))
import Gargantext.Prelude import Gargantext.Prelude
import Servant (FromHttpApiData(..), Proxy(..), ToHttpApiData(..)) import Servant (FromHttpApiData(..), Proxy(..), ToHttpApiData(..))
import Text.Read (read)
import Gargantext.Core (HasDBid(..)) import Gargantext.Core (HasDBid(..))
import Gargantext.Database.Types import Gargantext.Database.Types
import Gargantext.Database.Schema.Prelude import Gargantext.Database.Schema.Prelude
import qualified Database.PostgreSQL.Simple as PGS import Text.Read (read)
import qualified Data.ByteString.Char8 as B
import qualified Data.HashMap.Strict as HashMap import qualified Data.HashMap.Strict as HashMap
import qualified Database.PostgreSQL.Simple as PGS
type NgramsId = Int type NgramsId = Int
...@@ -82,8 +84,34 @@ ngramsTable = Table "ngrams" (pNgramsDb NgramsDB { _ngrams_id = optionalTable ...@@ -82,8 +84,34 @@ ngramsTable = Table "ngrams" (pNgramsDb NgramsDB { _ngrams_id = optionalTable
-- ngrams in text fields of documents has Terms Type (i.e. either title or abstract) -- ngrams in text fields of documents has Terms Type (i.e. either title or abstract)
data NgramsType = Authors | Institutes | Sources | NgramsTerms data NgramsType = Authors | Institutes | Sources | NgramsTerms
deriving (Eq, Show, Read, Ord, Enum, Bounded, Generic) deriving (Eq, Show, Read, Ord, Enum, Bounded, Generic)
instance Serialise NgramsType instance Serialise NgramsType
instance FromJSON NgramsType
instance FromJSONKey NgramsType where
fromJSONKey = FromJSONKeyTextParser (parseJSON . String)
instance ToJSON NgramsType
instance ToJSONKey NgramsType where
toJSONKey = toJSONKeyText (pack . show)
instance FromHttpApiData NgramsType where
parseUrlPiece n = pure $ (read . cs) n
instance ToHttpApiData NgramsType where
toUrlPiece = pack . show
instance ToParamSchema NgramsType where
toParamSchema _ = toParamSchema (Proxy :: Proxy TODO)
-- map NgramsType to its assigned id
instance FromField NgramsType where
fromField fld mdata =
case B.unpack `fmap` mdata of
Nothing -> returnError UnexpectedNull fld ""
Just dat -> do
n <- fromField fld mdata
if (n :: Int) > 0 then
case fromNgramsTypeId (NgramsTypeId n) of
Nothing -> returnError ConversionFailed fld dat
Just nt -> pure nt
else
returnError ConversionFailed fld dat
instance ToField NgramsType where
toField nt = toField $ ngramsTypeId nt
ngramsTypes :: [NgramsType] ngramsTypes :: [NgramsType]
...@@ -96,33 +124,13 @@ instance ToSchema NgramsType ...@@ -96,33 +124,13 @@ instance ToSchema NgramsType
newtype NgramsTypeId = NgramsTypeId Int newtype NgramsTypeId = NgramsTypeId Int
deriving (Eq, Show, Ord, Num) deriving (Eq, Show, Ord, Num)
instance ToField NgramsTypeId where instance ToField NgramsTypeId where
toField (NgramsTypeId n) = toField n toField (NgramsTypeId n) = toField n
instance FromField NgramsTypeId where instance FromField NgramsTypeId where
fromField fld mdata = do fromField fld mdata = do
n <- fromField fld mdata n <- fromField fld mdata
if (n :: Int) > 0 then return $ NgramsTypeId n if (n :: Int) > 0 then return $ NgramsTypeId n
else mzero else mzero
instance FromJSON NgramsType
instance FromJSONKey NgramsType where
fromJSONKey = FromJSONKeyTextParser (parseJSON . String)
instance ToJSON NgramsType
instance ToJSONKey NgramsType where
toJSONKey = toJSONKeyText (pack . show)
instance FromHttpApiData NgramsType where
parseUrlPiece n = pure $ (read . cs) n
instance ToHttpApiData NgramsType where
toUrlPiece = pack . show
instance ToParamSchema NgramsType where
toParamSchema _ = toParamSchema (Proxy :: Proxy TODO)
instance DefaultFromField (Nullable SqlInt4) NgramsTypeId instance DefaultFromField (Nullable SqlInt4) NgramsTypeId
where where
defaultFromField = fromPGSFromField defaultFromField = fromPGSFromField
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment