Verified Commit 613b47b7 authored by Przemyslaw Kaminski's avatar Przemyslaw Kaminski

Merge branch 'dev' into 362-dev-sqlite

parents fcf968af 5225daf6
Pipeline #7504 passed with stages
in 48 minutes and 20 seconds
# Optimising CI speed by using tips from https://blog.nimbleways.com/let-s-make-faster-gitlab-ci-cd-pipelines/ # Optimising CI speed by using tips from https://blog.nimbleways.com/let-s-make-faster-gitlab-ci-cd-pipelines/
#image: cgenie/gargantext:9.4.8 image: cgenie/gargantext:9.6.6
image: adinapoli/gargantext:v3.5
variables: variables:
STACK_ROOT: "${CI_PROJECT_DIR}/.stack-root" STACK_ROOT: "${CI_PROJECT_DIR}/.stack-root"
STACK_OPTS: "--system-ghc" STACK_OPTS: "--system-ghc"
STORE_DIR: "${CI_PROJECT_DIR}/.cabal" STORE_DIR: "${CI_PROJECT_DIR}/.cabal"
CABAL_DIR: "${CI_PROJECT_DIR}/.cabal" CABAL_DIR: "${CI_PROJECT_DIR}/.cabal"
CORENLP: "4.5.4"
FF_USE_FASTZIP: "true" FF_USE_FASTZIP: "true"
ARTIFACT_COMPRESSION_LEVEL: "fast" ARTIFACT_COMPRESSION_LEVEL: "fast"
CACHE_COMPRESSION_LEVEL: "fast" CACHE_COMPRESSION_LEVEL: "fast"
...@@ -95,17 +93,11 @@ test: ...@@ -95,17 +93,11 @@ test:
nix-shell --run "./bin/update-project-dependencies $STORE_DIR" nix-shell --run "./bin/update-project-dependencies $STORE_DIR"
mkdir -p /root/.cache/cabal/logs mkdir -p /root/.cache/cabal/logs
chown -R test:test /root/.cache/cabal/logs/ chown -R test:test /root/.cache/cabal/logs/
chown -R test:test /root/.cache/cabal/packages/hackage.haskell.org/
chown -R test:test "$TEST_TMPDIR" chown -R test:test "$TEST_TMPDIR"
mkdir -p /builds/gargantext/haskell-gargantext/devops/coreNLP/stanford-corenlp-current
cp -R /root/devops/coreNLP/stanford-corenlp-${CORENLP}/* /builds/gargantext/haskell-gargantext/devops/coreNLP/stanford-corenlp-current/
nix-shell --run "chown -R test:test /root/.config/ && su -m test -c \"export PATH=$PATH:$TEST_NIX_PATH && export TMPDIR=$TEST_TMPDIR && cd /builds/gargantext/haskell-gargantext; $CABAL --store-dir=$STORE_DIR v2-test --test-show-details=streaming --flags 'test-crypto no-phylo-debug-logs' --ghc-options='-O0 -fclear-plugins'\"" nix-shell --run "chown -R test:test /root/.config/ && su -m test -c \"export PATH=$PATH:$TEST_NIX_PATH && export TMPDIR=$TEST_TMPDIR && cd /builds/gargantext/haskell-gargantext; $CABAL --store-dir=$STORE_DIR v2-test --test-show-details=streaming --flags 'test-crypto no-phylo-debug-logs' --ghc-options='-O0 -fclear-plugins'\""
chown -R root:root dist-newstyle/ chown -R root:root dist-newstyle/
chown -R root:root /root/ chown -R root:root /root/
chown -R root:root $STORE_DIR chown -R root:root $STORE_DIR
chown -R root:root /root/.cache/cabal/logs/ chown -R root:root /root/.cache/cabal/logs/
chown -R root:root /root/.cache/cabal/packages/hackage.haskell.org/
chown -Rh root:root /builds/gargantext/haskell-gargantext/devops/coreNLP/stanford-corenlp-current
## Version 0.0.7.4.7
* [BACK][FIX][Adjust the output of the UpdateList tests (#460)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/460)
* [BACK][FIX][Import/export in SQLite format (#362)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/362)
* [FRONT][FIX][When the progress bar is empty: say: waiting task (#503)](https://gitlab.iscpif.fr/gargantext/purescript-gargantext/issues/503)
## Version 0.0.7.4.6
* [BACK][FIX][project in `docker-compose` (#450)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/450)
* [BACK][FIX][Upgrade GHC to 9.6.x (#436)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/436) and [Merge Request](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/merge_requests/395)
* [BACK][FIX][Error during import of term in TSV format (#381)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/381)
* [BACK][FIX][Loading a terms file with empty terms gives an undecipherable and inconsistent error (#395)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/395) and [Merge Request](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/merge_requests/399)
## Version 0.0.7.4.5.1
* [FRONT][FIX][[Corpus import/upload] The error message has disappeared on version 0.0.7.4.2 at least (#728)](https://gitlab.iscpif.fr/gargantext/purescript-gargantext/issues/728)
## Version 0.0.7.4.5 ## Version 0.0.7.4.5
* [BACK][FIX][Error when uploading a specific TSV file (#433)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/433) * [BACK][FIX][Error when uploading a specific TSV file (#433)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/433)
......
...@@ -103,6 +103,17 @@ Then initialize the database using the dedicated command: from inside the ...@@ -103,6 +103,17 @@ Then initialize the database using the dedicated command: from inside the
And provide a name and a password for the master user as requested. And provide a name and a password for the master user as requested.
### Launching CoreNLP
CoreNLP can be started with nix:
```shell
nix-shell --run startCoreNLPServer.sh
```
By default, this starts on port `9000`. If you want a different port, then do:
```shell
nix-shell --run 'startCoreNLPServer.sh -p 9001'
```
### Running Gargantext ### Running Gargantext
From inside the `haskell-gargantext/` directory, run From inside the `haskell-gargantext/` directory, run
...@@ -237,11 +248,11 @@ The CI is on gitlab and is controlled by the `.gitlab-ci.yml` file. ...@@ -237,11 +248,11 @@ The CI is on gitlab and is controlled by the `.gitlab-ci.yml` file.
The Docker image that is used can be built with: The Docker image that is used can be built with:
```shell ```shell
podman build -t cgenie/gargantext:9.4.8 -f ./devops/docker/Dockerfile --format=docker . podman build -t cgenie/gargantext:9.6.6 --pull -f ./devops/docker/Dockerfile --format=docker .
``` ```
or or
```shell ```shell
docker build -t cgenie/gargantext:9.4.8 -f ./devops/docker/Dockerfile . docker build -t cgenie/gargantext:9.6.6 --pull -f ./devops/docker/Dockerfile .
``` ```
NOTE: if podman/docker complains about not enough space, it's probably NOTE: if podman/docker complains about not enough space, it's probably
...@@ -263,9 +274,9 @@ When a development is needed on libraries (for instance, the HAL crawler in http ...@@ -263,9 +274,9 @@ When a development is needed on libraries (for instance, the HAL crawler in http
2. When changes work and tests are OK, commit in repo `hal` 2. When changes work and tests are OK, commit in repo `hal`
2. When changes are commited / merged: 2. When changes are commited / merged:
1. Get the hash id, and edit `cabal.project` with the **new commit id** 1. Get the hash id, and edit `cabal.project` with the **new commit id**
2. run `./bin/update-project-dependencies` 2. run `nix-shell --run ./bin/update-project-dependencies`
- get an error that sha256 don't match, so update the `./bin/update-project-dependencies` with new sha256 hash - get an error that sha256 don't match, so update the `./bin/update-project-dependencies` with new sha256 hash
- run again `./bin/update-project-dependencies` (to make sure it's a fixed point now) - run again `nix-shell --run ./bin/update-project-dependencies` (to make sure it's a fixed point now)
> Note: without `stack.yaml` we would have to only fix `cabal.project` -> `source-repository-package` commit id. Sha256 is there to make sure CI reruns the tests. > Note: without `stack.yaml` we would have to only fix `cabal.project` -> `source-repository-package` commit id. Sha256 is there to make sure CI reruns the tests.
......
#!/usr/bin/env bash
set -euxo pipefail
current_dir=$(basename "$PWD")
if [ "$current_dir" == "bin" ]; then
source ./setup-ci-environment
else
source ./bin/setup-ci-environment
fi
cabal --store-dir=$STORE_DIR v2-update "hackage.haskell.org,${INDEX_STATE}"
# Install cabal2stack if it can't be found.
if ! cabal2stack --help &> /dev/null
then
echo "cabal2stack could not be found"
CURDIR=$PWD
git clone https://github.com/iconnect/cabal2stack.git cabal2stack-installer
cd cabal2stack-installer
cabal --store-dir=$STORE_DIR v2-install --allow-newer --index-state="${INDEX_STATE}" --overwrite-policy=always
cd $CURDIR
rm -rf cabal2stack-installer
fi
...@@ -6,10 +6,8 @@ current_dir=$(basename "$PWD") ...@@ -6,10 +6,8 @@ current_dir=$(basename "$PWD")
if [ "$current_dir" == "bin" ]; then if [ "$current_dir" == "bin" ]; then
source ./setup-ci-environment source ./setup-ci-environment
./install-cabal2stack
else else
source ./bin/setup-ci-environment source ./bin/setup-ci-environment
./bin/install-cabal2stack
fi fi
# README! # README!
...@@ -18,8 +16,8 @@ fi ...@@ -18,8 +16,8 @@ fi
# with the `sha256sum` result calculated on the `cabal.project` and # with the `sha256sum` result calculated on the `cabal.project` and
# `cabal.project.freeze`. This ensures the files stay deterministic so that CI # `cabal.project.freeze`. This ensures the files stay deterministic so that CI
# cache can kick in. # cache can kick in.
expected_cabal_project_hash="37352ca36ca5e69d9945da11439be4c3909297b338242855fa588dffdf1ba02b" expected_cabal_project_hash="963418e37a17d4bb67d4b885613144b36d290f612eea80355e82abc7e76b450c"
expected_cabal_project_freeze_hash="cd52143d3a9d285360b59c6371d3e258552c1bc115bd612024db3de1f7593ff7" expected_cabal_project_freeze_hash="cd52143d3a9d285360b59c6371d3e258552c1bc115bd612024db3de1f7593ff7"
cabal --store-dir=$STORE_DIR v2-build --dry-run cabal --store-dir=$STORE_DIR v2-build --dry-run
......
...@@ -72,7 +72,7 @@ source-repository-package ...@@ -72,7 +72,7 @@ source-repository-package
source-repository-package source-repository-package
type: git type: git
location: https://gitlab.iscpif.fr/gargantext/crawlers/istex.git location: https://gitlab.iscpif.fr/gargantext/crawlers/istex.git
tag: 521ca54f1502b13f629eff2223aaf5007e6d52ec tag: 894482ef97eadce6b1c13ebced1edfe394b5be5e
source-repository-package source-repository-package
type: git type: git
...@@ -132,12 +132,12 @@ source-repository-package ...@@ -132,12 +132,12 @@ source-repository-package
location: https://github.com/haskell-github-trust/ekg-json location: https://github.com/haskell-github-trust/ekg-json
tag: bd0592818882f9cf34d2991d01f7dcb3d8bca309 tag: bd0592818882f9cf34d2991d01f7dcb3d8bca309
-- FIXME(adn) Compat-shim while we wait for upstream to catch-up -- NOTE(adn) This forks binds to nng.
source-repository-package source-repository-package
type: git type: git
location: https://github.com/garganscript/nanomsg-haskell location: https://github.com/adinapoli/nanomsg-haskell
tag: 5868db564d7d3c4568ccd11c852292b834d26c55 tag: 2d69707bf639be2055e3228dab38cc4f2a658111
source-repository-package source-repository-package
type: git type: git
location: https://github.com/adinapoli/http-reverse-proxy.git location: https://github.com/adinapoli/http-reverse-proxy.git
......
FROM openjdk
#ADD home/debian/CoreNLP /CoreNLP
ADD stanford-corenlp-current /CoreNLP
WORKDIR /CoreNLP
CMD ./startServer.sh
#!/bin/bash
# releases are here:
# https://stanfordnlp.github.io/CoreNLP/history.html
VERSION=4.5.4
FILE="stanford-corenlp-${VERSION}.zip"
DIR_V="stanford-corenlp-${VERSION}"
DIR="stanford-corenlp-current"
URL="http://nlp.stanford.edu/software/${FILE}"
[ ! -f ${FILE} ] && echo 'Fetching file' && wget ${URL} -O ${FILE}
[ ! -d ${DIR_V} ] && echo 'Unzipping file' && unzip ./${FILE}
[ ! -L ${DIR} ] && echo "Symlinking ${DIR_V} -> ${DIR}" && ln -s ${DIR_V} ${DIR}
[ ! -f ${DIR}/startServer.sh ] && echo "Copying startServer.sh" && cp ./startServer.sh ${DIR}/
echo "You can now build with: docker build -t cgenie/corenlp-garg:${VERSION}" --pull .
#!/bin/sh
java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000
...@@ -3,16 +3,6 @@ FROM ubuntu:noble ...@@ -3,16 +3,6 @@ FROM ubuntu:noble
## NOTA BENE: In order for this to be built successfully, you have to run ./devops/coreNLP/build.sh first. ## NOTA BENE: In order for this to be built successfully, you have to run ./devops/coreNLP/build.sh first.
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ARG GHC=9.6.6
ARG CORENLP=4.5.4
ARG CORE
COPY ./shell.nix /builds/gargantext/shell.nix
COPY ./nix/pkgs.nix /builds/gargantext/nix/pkgs.nix
COPY ./nix/pinned-25.05.nix /builds/gargantext/nix/pinned-25.05.nix
COPY ./devops/coreNLP/build.sh /root/devops/coreNLP/build.sh
COPY ./devops/coreNLP/startServer.sh /root/devops/coreNLP/startServer.sh
COPY ./bin/setup-ci-environment /builds/gargantext/bin/setup-ci-environment
COPY ./bin/install-cabal2stack /builds/gargantext/bin/install-cabal2stack
ENV TZ=Europe/Rome ENV TZ=Europe/Rome
ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'
...@@ -25,32 +15,15 @@ ENV PATH=/root/.local/bin:$PATH ...@@ -25,32 +15,15 @@ ENV PATH=/root/.local/bin:$PATH
RUN apt-get update && \ RUN apt-get update && \
apt-get install --no-install-recommends -y \ apt-get install --no-install-recommends -y \
apt-transport-https \ apt-transport-https \
autoconf \
automake \
build-essential \
ca-certificates \ ca-certificates \
curl \ curl \
gcc \
git \ git \
gnupg2 \ gnupg2 \
libffi-dev \
libffi8 \
libgmp-dev \
libgmp10 \
libncurses-dev \
libncurses6 \
libnuma-dev \
libtinfo6 \
locales \ locales \
lsb-release \
software-properties-common \ software-properties-common \
strace \
sudo \ sudo \
wget \
vim \
xz-utils \ xz-utils \
zlib1g-dev \ #zlib1g-dev \
openjdk-21-jdk \
unzip && \ unzip && \
apt-get clean && rm -rf /var/lib/apt/lists/* && \ apt-get clean && rm -rf /var/lib/apt/lists/* && \
mkdir -m 0755 /nix && groupadd -r nixbld && chown root /nix && \ mkdir -m 0755 /nix && groupadd -r nixbld && chown root /nix && \
...@@ -60,16 +33,21 @@ RUN apt-get update && \ ...@@ -60,16 +33,21 @@ RUN apt-get update && \
SHELL ["/bin/bash", "-o", "pipefail", "-c"] SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN cd /root/devops/coreNLP; ./build.sh && \ RUN set -o pipefail && \
set -o pipefail && \ locale-gen en_US.UTF-8 && \
bash <(curl -L https://releases.nixos.org/nix/nix-2.26.2/install) --no-daemon && \ bash <(curl -L https://releases.nixos.org/nix/nix-2.26.2/install) --no-daemon && \
locale-gen en_US.UTF-8 && chown root -R /nix && \ chown root -R /nix && \
. "$HOME/.nix-profile/etc/profile.d/nix.sh" && \ . "$HOME/.nix-profile/etc/profile.d/nix.sh" && \
mkdir -p "/builds/gargantext/" && chmod 777 -R "/builds/gargantext" && \ mkdir -p "/builds/gargantext/" && chmod 777 -R "/builds/gargantext" && \
echo "source $HOME/.nix-profile/etc/profile.d/nix.sh" >> "$HOME/.bashrc" && \ echo "source $HOME/.nix-profile/etc/profile.d/nix.sh" >> "$HOME/.bashrc" && \
echo `which nix-env` && \ echo `which nix-env` && \
. $HOME/.bashrc && nix-env --version && \ . $HOME/.bashrc && nix-env --version
cd /builds/gargantext && nix-shell --run "./bin/install-cabal2stack"
# We want to cache nix artifacts in the Dockerfile to improve CI speed
COPY ./shell.nix /nix-ci-build/
COPY ./nix /nix-ci-build/
RUN set -o pipefail && \
pushd /nix-ci-build/ && nix-build shell.nix && popd
WORKDIR "/builds/gargantext/" WORKDIR "/builds/gargantext/"
version: '3' version: '3'
name: 'gargantext'
services: services:
caddy: # caddy:
image: caddy:alpine # image: caddy:alpine
ports: # ports:
- 8108:8108 # - 8108:8108
volumes: # volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro # - ./Caddyfile:/etc/caddy/Caddyfile:ro
- ../../purescript-gargantext:/srv/purescript-gargantext:ro # - ../../purescript-gargantext:/srv/purescript-gargantext:ro
#postgres11: #postgres11:
# #image: 'postgres:latest' # #image: 'postgres:latest'
...@@ -61,12 +62,6 @@ services: ...@@ -61,12 +62,6 @@ services:
# volumes: # volumes:
# - pgadmin:/var/lib/pgadmin # - pgadmin:/var/lib/pgadmin
corenlp:
#image: 'cgenie/corenlp-garg:latest'
image: 'cgenie/corenlp-garg:4.5.4'
ports:
- 9000:9000
# johnsnownlp: # johnsnownlp:
# image: 'johnsnowlabs/nlp-server:latest' # image: 'johnsnowlabs/nlp-server:latest'
# volumes: # volumes:
......
#!/bin/bash
if [ ! -d coreNLP ]; then
mkdir -v coreNLP
fi
pushd coreNLP
wget https://dl.gargantext.org/coreNLP.tar.bz2
tar xvjf coreNLP.tar.bz2
pushd home/debian/CoreNLP
./startServer.sh
...@@ -124,8 +124,8 @@ smtp_host = "localhost" ...@@ -124,8 +124,8 @@ smtp_host = "localhost"
[notifications] [notifications]
central-exchange = { bind = "tcp://*:5560", connect = "tcp://localhost:5560" } central-exchange = { bind = "tcp://*:5560", connect = "tcp://127.0.0.1:5560" }
dispatcher = { bind = "tcp://*:5561", connect = "tcp://localhost:5561" } dispatcher = { bind = "tcp://*:5561", connect = "tcp://127.0.0.1:5561" }
[nlp] [nlp]
......
...@@ -5,7 +5,7 @@ cabal-version: 3.4 ...@@ -5,7 +5,7 @@ cabal-version: 3.4
-- see: https://github.com/sol/hpack -- see: https://github.com/sol/hpack
name: gargantext name: gargantext
version: 0.0.7.4.5 version: 0.0.7.4.7
synopsis: Search, map, share synopsis: Search, map, share
description: Please see README.md description: Please see README.md
category: Data category: Data
...@@ -54,6 +54,8 @@ data-files: ...@@ -54,6 +54,8 @@ data-files:
test-data/stemming/lancaster.txt test-data/stemming/lancaster.txt
test-data/test_config.ini test-data/test_config.ini
test-data/test_config.toml test-data/test_config.toml
test-data/issue-381/Termes_A_Ajouter_T4SC_Intellixir.tsv
test-data/issue-381/Termes_A_Ajouter_T4SC_Intellixir12.csv
.clippy.dhall .clippy.dhall
-- common options -- common options
...@@ -311,6 +313,7 @@ library ...@@ -311,6 +313,7 @@ library
Gargantext.Orphans.Accelerate Gargantext.Orphans.Accelerate
Gargantext.Orphans.OpenAPI Gargantext.Orphans.OpenAPI
Gargantext.System.Logging Gargantext.System.Logging
Gargantext.System.Logging.Loggers
Gargantext.System.Logging.Types Gargantext.System.Logging.Types
Gargantext.Utils.Dict Gargantext.Utils.Dict
Gargantext.Utils.Jobs.Error Gargantext.Utils.Jobs.Error
...@@ -806,6 +809,7 @@ test-suite garg-test-tasty ...@@ -806,6 +809,7 @@ test-suite garg-test-tasty
other-modules: other-modules:
CLI.Phylo.Common CLI.Phylo.Common
Paths_gargantext Paths_gargantext
Test.API.Private.List
Test.API.Private.Move Test.API.Private.Move
Test.API.Private.Remote Test.API.Private.Remote
Test.API.Private.Share Test.API.Private.Share
...@@ -883,6 +887,7 @@ test-suite garg-test-hspec ...@@ -883,6 +887,7 @@ test-suite garg-test-hspec
Test.API.GraphQL Test.API.GraphQL
Test.API.Notifications Test.API.Notifications
Test.API.Private Test.API.Private
Test.API.Private.List
Test.API.Private.Move Test.API.Private.Move
Test.API.Private.Remote Test.API.Private.Remote
Test.API.Private.Share Test.API.Private.Share
......
...@@ -42,6 +42,9 @@ cradle: ...@@ -42,6 +42,9 @@ cradle:
- path: "./bin/gargantext-cli/CLI/Phylo/Profile.hs" - path: "./bin/gargantext-cli/CLI/Phylo/Profile.hs"
component: "gargantext:exe:gargantext" component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-cli/CLI/Server.hs"
component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-cli/CLI/Server/Routes.hs" - path: "./bin/gargantext-cli/CLI/Server/Routes.hs"
component: "gargantext:exe:gargantext" component: "gargantext:exe:gargantext"
...@@ -51,13 +54,10 @@ cradle: ...@@ -51,13 +54,10 @@ cradle:
- path: "./bin/gargantext-cli/CLI/Upgrade.hs" - path: "./bin/gargantext-cli/CLI/Upgrade.hs"
component: "gargantext:exe:gargantext" component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-cli/Paths_gargantext.hs" - path: "./bin/gargantext-cli/CLI/Worker.hs"
component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-server/Main.hs"
component: "gargantext:exe:gargantext" component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-server/Paths_gargantext.hs" - path: "./bin/gargantext-cli/Paths_gargantext.hs"
component: "gargantext:exe:gargantext" component: "gargantext:exe:gargantext"
- path: "./test" - path: "./test"
...@@ -68,9 +68,3 @@ cradle: ...@@ -68,9 +68,3 @@ cradle:
- path: "./test" - path: "./test"
component: "gargantext:test:garg-test-hspec" component: "gargantext:test:garg-test-hspec"
- path: "./bench/Main.hs"
component: "gargantext:bench:garg-bench"
- path: "./bench/Paths_gargantext.hs"
component: "gargantext:bench:garg-bench"
{ lib,
ghc,
haskellPackages,
fetchFromGitHub }:
let
src = fetchFromGitHub {
repo = "cabal2stack";
owner = "iconnect";
rev = "e4960683f518ab4c964e7646706fe2a7e1bf751a";
hash = "sha256-KE9VUXFy9QfRmu/+DFcgxV/E6oPBAR7hRaFrSt93eeY=";
};
in
with haskellPackages;
mkDerivation {
inherit src;
pname = "cabal2stack";
version = "0";
isExecutable = true;
executableHaskellDepends = [ base
aeson
cabal-plan
HsYAML
HsYAML-aeson
optics-core
optics-extra
optparse-applicative
transformers ];
patches = [
./patches/cabal2stack.patch
];
license = lib.licenses.bsd3;
mainProgram = "cabal2stack";
}
# https://nixos.wiki/wiki/Java
{ fetchzip,
makeWrapper,
stdenv,
writeShellScript,
jre,
version ? "4.5.9",
hash ? "sha256-DOGBkGJfvR1PoXz2CNoo58HXwGLxvPKMChRqlrFtQLQ=",
}:
stdenv.mkDerivation (finalAttrs:
let
startServer = writeShellScript "startCoreNLPServer.sh" ''
set -x
PORT=9000
while getopts ':p:h' opt; do
case $opt in
(p) PORT=$OPTARG;;
(h) echo "$(basename $0) [-p 9000]"
exit 0
;;
esac
done
shift "$((OPTIND - 1))"
${jre}/bin/java -mx4g -cp "$CORENLP_PATH/*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port $PORT -timeout 15000 "$@"
'';
# see https://huggingface.co/stanfordnlp/CoreNLP/commits/main
versionCommits = {
"4.5.8" = "34264e88b7add9e0045f4727bc7d1872385f06aa";
"4.5.9" = "06f79ee8b1ec475d7630b1871bfd75a57c77ffa4";
};
commit = versionCommits."${finalAttrs.version}";
in
{
name = "corenlp";
inherit version;
src = fetchzip {
inherit hash;
# url = "http://nlp.stanford.edu/software/stanford-corenlp-${finalAttrs.version}.zip";
# huggin face is more stable
url = "https://huggingface.co/stanfordnlp/CoreNLP/resolve/${commit}/stanford-corenlp-latest.zip";
};
buildInputs = [
jre
];
nativeBuildInputs = [
makeWrapper
];
phases = [ "unpackPhase" "installPhase" ];
installPhase = ''
runHook preInstall
mkdir -p $out/bin
mkdir -p $out/share/corenlp
cp -r . $out/share/corenlp
makeWrapper ${startServer} $out/bin/startCoreNLPServer.sh \
--set CORENLP_PATH "$out/share/corenlp"
runHook postInstall
'';
}
)
{ fetchFromGitLab,
fetchpatch,
graphviz }:
let
graphviz_dev = graphviz.overrideAttrs (finalAttrs: previousAttrs: {
version = "11.0.0~dev";
src = fetchFromGitLab {
owner = "graphviz";
repo = "graphviz";
rev = "f3ec849249ef9cb824feb7f97449d7159e1dcb4e"; # head as of 2024-03-25, see gargantext#329
hash = "sha256-s86IqWz6zeKbcRqpV3cVQBVviHbhUSX1U8GVuJBfjC4=";
};
});
in
graphviz_dev.overrideAttrs
(finalAttrs: previousAttrs: {
# Increase the YY_BUF_SIZE, see https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/290#note_9015
patches = [
(fetchpatch {
url = "https://gist.githubusercontent.com/adinapoli/e93ca7b1d714d27f4af537716b03e3bb/raw/b9cc297c3465878da2d18ee92a3f9b8273923493/graphviz-yy-buf-size.patch";
sha256 = "sha256-8Q3tf37iYaPV50P+Vf/n263ordECiu5eKwONCy3ynV8=";
})
];
})
{ fetchFromGitHub,
igraph,
arpack,
blas,
glpk,
gmp,
lapack,
libxml2,
nanomsg,
plfit,
llvmPackages,
version ? "0.10.4",
hash ? "sha256-LsTOxUktGZcp46Ec9QH3+9C+VADMYTZZCjKF1gp36xk=" }:
igraph.overrideAttrs (finalAttrs: previousAttrs: {
inherit version;
nativeBuildInputs = previousAttrs.nativeBuildInputs;
src = fetchFromGitHub {
inherit hash;
owner = "igraph";
repo = "igraph";
rev = version;
};
postPatch = ''
echo "${version}" > IGRAPH_VERSION
'';
outputs = [ "dev" "out" "doc" ];
buildInputs = [
arpack
blas
glpk
gmp
lapack
libxml2
nanomsg
plfit
llvmPackages.openmp
];
cmakeFlags = [
"-DIGRAPH_USE_INTERNAL_BLAS=OFF"
"-DIGRAPH_USE_INTERNAL_LAPACK=OFF"
"-DIGRAPH_USE_INTERNAL_ARPACK=OFF"
"-DIGRAPH_USE_INTERNAL_GLPK=OFF"
"-DIGRAPH_USE_INTERNAL_GMP=OFF"
"-DIGRAPH_USE_INTERNAL_PLFIT=OFF"
"-DIGRAPH_GLPK_SUPPORT=ON"
"-DIGRAPH_GRAPHML_SUPPORT=OFF"
"-DIGRAPH_OPENMP_SUPPORT=ON"
"-DIGRAPH_ENABLE_LTO=AUTO"
"-DIGRAPH_ENABLE_TLS=ON"
"-DBUILD_SHARED_LIBS=ON"
"-DCMAKE_INSTALL_PREFIX=${placeholder "out"}"
"-DCMAKE_INSTALL_LIBDIR=${placeholder "out"}/lib"
"-DCMAKE_INSTALL_DATADIR=${placeholder "out"}/share"
];
doCheck = false;
postInstall = ''
mkdir -p "$out/share"
cp -r doc "$out/share"
'';
postFixup = previousAttrs.postFixup + ''
echo "Copying files where they belong .."
CUR_DIR=$PWD
cd "$dev/include/igraph" && cp *.h ../
cd $CUR_DIR
'';
})
diff --git i/cabal2stack.cabal w/cabal2stack.cabal
index 69767a2..92c4895 100644
--- i/cabal2stack.cabal
+++ w/cabal2stack.cabal
@@ -26,14 +26,14 @@ executable cabal2stack
-- boot dependencies
build-depends:
- , base >=4.12 && <4.18
+ , base >=4.12 && <5
, bytestring ^>=0.10.8.2 || ^>=0.11.3.0
, containers ^>=0.6.0.1
, directory ^>=1.3.3.0
, filepath ^>=1.4.2.1
, process ^>=1.6.5.0
, text >=1.2.3.0 && <2.1
- , transformers ^>=0.5.6.2
+ , transformers >=0.5.6.2 && < 0.7
-- other dependencies
build-depends:
@@ -43,4 +43,4 @@ executable cabal2stack
, HsYAML-aeson ^>=0.2.0.1
, optics-core ^>=0.4
, optics-extra ^>=0.4
- , optparse-applicative ^>=0.17.0.0
+ , optparse-applicative >=0.17.0.0 && < 0.20
...@@ -6,88 +6,15 @@ rec { ...@@ -6,88 +6,15 @@ rec {
inherit pkgs; inherit pkgs;
ghc966 = pkgs.haskell.compiler.ghc966; ghc966 = pkgs.haskell.compiler.ghc966;
cabal_install = pkgs.haskell.lib.compose.justStaticExecutables pkgs.haskell.packages.ghc966.cabal-install; cabal_install = pkgs.haskell.lib.compose.justStaticExecutables pkgs.haskell.packages.ghc966.cabal-install;
graphviz_dev = pkgs.graphviz.overrideAttrs (finalAttrs: previousAttrs: {
version = "11.0.0~dev"; graphviz = pkgs.callPackage ./graphviz.nix {};
src = pkgs.fetchFromGitLab { igraph_0_10_4 = pkgs.callPackage ./igraph.nix {};
owner = "graphviz"; corenlp = pkgs.callPackage ./corenlp.nix { }; # 4.5.8
repo = "graphviz"; cabal2stack = pkgs.callPackage ./cabal2stack.nix { ghc = ghc966; };
rev = "f3ec849249ef9cb824feb7f97449d7159e1dcb4e"; # head as of 2024-03-25, see gargantext#329 nng_notls = pkgs.nng.overrideAttrs (old: {
hash = "sha256-s86IqWz6zeKbcRqpV3cVQBVviHbhUSX1U8GVuJBfjC4="; cmakeFlags = (old.cmakeFlags or []) ++ [ "-DNNG_ENABLE_TLS=OFF" ];
};
});
graphviz = graphviz_dev.overrideAttrs (finalAttrs: previousAttrs: {
# Increase the YY_BUF_SIZE, see https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/290#note_9015
patches = [
(pkgs.fetchpatch {
url = "https://gist.githubusercontent.com/adinapoli/e93ca7b1d714d27f4af537716b03e3bb/raw/b9cc297c3465878da2d18ee92a3f9b8273923493/graphviz-yy-buf-size.patch";
sha256 = "sha256-8Q3tf37iYaPV50P+Vf/n263ordECiu5eKwONCy3ynV8=";
})
];
});
igraph_0_10_4 = pkgs.igraph.overrideAttrs (finalAttrs: previousAttrs: {
version = "0.10.4";
nativeBuildInputs = previousAttrs.nativeBuildInputs;
src = pkgs.fetchFromGitHub {
owner = "igraph";
repo = "igraph";
rev = "0.10.4";
hash = "sha256-LsTOxUktGZcp46Ec9QH3+9C+VADMYTZZCjKF1gp36xk=";
};
postPatch = ''
echo "0.10.4" > IGRAPH_VERSION
'';
outputs = [ "dev" "out" "doc" ];
buildInputs = [
pkgs.arpack
pkgs.blas
pkgs.glpk
pkgs.gmp
pkgs.lapack
pkgs.libxml2
pkgs.nanomsg
pkgs.plfit
pkgs.llvmPackages.openmp
];
cmakeFlags = [
"-DIGRAPH_USE_INTERNAL_BLAS=OFF"
"-DIGRAPH_USE_INTERNAL_LAPACK=OFF"
"-DIGRAPH_USE_INTERNAL_ARPACK=OFF"
"-DIGRAPH_USE_INTERNAL_GLPK=OFF"
"-DIGRAPH_USE_INTERNAL_GMP=OFF"
"-DIGRAPH_USE_INTERNAL_PLFIT=OFF"
"-DIGRAPH_GLPK_SUPPORT=ON"
"-DIGRAPH_GRAPHML_SUPPORT=OFF"
"-DIGRAPH_OPENMP_SUPPORT=ON"
"-DIGRAPH_ENABLE_LTO=AUTO"
"-DIGRAPH_ENABLE_TLS=ON"
"-DBUILD_SHARED_LIBS=ON"
"-DCMAKE_INSTALL_PREFIX=${placeholder "out"}"
"-DCMAKE_INSTALL_LIBDIR=${placeholder "out"}/lib"
"-DCMAKE_INSTALL_DATADIR=${placeholder "out"}/share"
];
doCheck = false;
postInstall = ''
mkdir -p "$out/share"
cp -r doc "$out/share"
'';
postFixup = previousAttrs.postFixup + ''
echo "Copying files where they belong .."
CUR_DIR=$PWD
cd "$dev/include/igraph" && cp *.h ../
cd $CUR_DIR
'';
}); });
hsBuildInputs = [ hsBuildInputs = [
ghc966 ghc966
cabal_install cabal_install
...@@ -96,32 +23,36 @@ rec { ...@@ -96,32 +23,36 @@ rec {
pkgs.haskellPackages.pretty-show pkgs.haskellPackages.pretty-show
]; ];
nonhsBuildInputs = with pkgs; [ nonhsBuildInputs = with pkgs; [
#haskell-language-server
blas
bzip2 bzip2
cabal2stack
corenlp
curl
czmq czmq
docker-compose docker-compose
expat
gfortran
git git
gmp gmp
graphviz
gsl gsl
#haskell-language-server
hlint hlint
libffi icu
igraph_0_10_4
jre
lapack lapack
xz libffi
libpqxx
libsodium
nng_notls
nil # nix language server
pcre pcre
pkg-config pkg-config
postgresql postgresql
stdenv.cc.cc
xz xz
zlib zlib
blas
gfortran
expat
icu
graphviz
gcc13
igraph_0_10_4
libpqxx
libsodium
nanomsg
zeromq zeromq
curl curl
] ++ ( lib.optionals stdenv.isDarwin [ ] ++ ( lib.optionals stdenv.isDarwin [
......
...@@ -70,29 +70,30 @@ import System.Cron.Schedule qualified as Cron ...@@ -70,29 +70,30 @@ import System.Cron.Schedule qualified as Cron
-- | startGargantext takes as parameters port number and Toml file. -- | startGargantext takes as parameters port number and Toml file.
startGargantext :: Mode -> PortNumber -> SettingsFile -> IO () startGargantext :: Mode -> PortNumber -> SettingsFile -> IO ()
startGargantext mode port sf@(SettingsFile settingsFile) = withLoggerIO mode $ \logger -> do startGargantext mode port sf@(SettingsFile settingsFile) = do
config <- readConfig sf <&> (gc_frontend_config . fc_appPort) .~ port config <- readConfig sf <&> (gc_frontend_config . fc_appPort) .~ port
when (port /= config ^. gc_frontend_config . fc_appPort) $ withLoggerIO (config ^. gc_logging) $ \logger -> do
panicTrace "TODO: conflicting settings of port" when (port /= config ^. gc_frontend_config . fc_appPort) $
withNotifications config $ \dispatcher -> do panicTrace "TODO: conflicting settings of port"
env <- newEnv logger config dispatcher withNotifications config $ \dispatcher -> do
let fc = env ^. env_config . gc_frontend_config env <- newEnv logger config dispatcher
let proxyStatus = microServicesProxyStatus fc let fc = env ^. env_config . gc_frontend_config
runDbCheck env let proxyStatus = microServicesProxyStatus fc
startupInfo config port proxyStatus runDbCheck env
app <- makeApp env startupInfo config port proxyStatus
mid <- makeGargMiddleware (fc ^. fc_cors) mode app <- makeApp env
periodicActions <- schedulePeriodicActions env mid <- makeGargMiddleware (fc ^. fc_cors) mode
periodicActions <- schedulePeriodicActions env
let runServer = run port (mid app) `finally` stopGargantext periodicActions
case proxyStatus of let runServer = run port (mid app) `finally` stopGargantext periodicActions
PXY_disabled case proxyStatus of
-> runServer -- the proxy is disabled, do not spawn the application PXY_disabled
PXY_enabled proxyPort -> runServer -- the proxy is disabled, do not spawn the application
-> do PXY_enabled proxyPort
proxyCache <- InMemory.newCache (Just oneHour) -> do
let runProxy = run proxyPort (mid (microServicesProxyApp proxyCache env)) proxyCache <- InMemory.newCache (Just oneHour)
Async.race_ runServer runProxy let runProxy = run proxyPort (mid (microServicesProxyApp proxyCache env))
Async.race_ runServer runProxy
where runDbCheck env = do where runDbCheck env = do
r <- runExceptT (runReaderT DB.dbCheck env) `catch` r <- runExceptT (runReaderT DB.dbCheck env) `catch`
......
...@@ -38,12 +38,11 @@ module Gargantext.API.Admin.EnvTypes ( ...@@ -38,12 +38,11 @@ module Gargantext.API.Admin.EnvTypes (
import Control.Lens (to, view) import Control.Lens (to, view)
import Data.List ((\\)) import Data.List ((\\))
import Data.Pool (Pool) import Data.Pool (Pool)
import Data.Text qualified as T
import Database.PostgreSQL.Simple (Connection) import Database.PostgreSQL.Simple (Connection)
import Gargantext.API.Admin.Orchestrator.Types (JobLog, noJobLog) import Gargantext.API.Admin.Orchestrator.Types (JobLog, noJobLog)
import Gargantext.API.Errors.Types (BackendInternalError) import Gargantext.API.Errors.Types (BackendInternalError)
import Gargantext.API.Prelude (GargM, IsGargServer) import Gargantext.API.Prelude (GargM, IsGargServer)
import Gargantext.Core.Config (GargConfig(..), gc_mail_config, gc_nlp_config, HasJWTSettings(..), HasConfig(..), HasManager(..), gc_logging, lc_log_level) import Gargantext.Core.Config
import Gargantext.Core.Mail.Types (HasMail, mailSettings) import Gargantext.Core.Mail.Types (HasMail, mailSettings)
import Gargantext.Core.NLP (HasNLPServer(..), nlpServerMap) import Gargantext.Core.NLP (HasNLPServer(..), nlpServerMap)
import Gargantext.Core.NodeStory import Gargantext.Core.NodeStory
...@@ -58,6 +57,7 @@ import Gargantext.Utils.Jobs.Monad (MonadJobStatus(..)) ...@@ -58,6 +57,7 @@ import Gargantext.Utils.Jobs.Monad (MonadJobStatus(..))
import Network.HTTP.Client (Manager) import Network.HTTP.Client (Manager)
import Servant.Auth.Server (JWTSettings) import Servant.Auth.Server (JWTSettings)
import System.Log.FastLogger qualified as FL import System.Log.FastLogger qualified as FL
import Gargantext.System.Logging.Loggers
data Mode = Dev | Mock | Prod data Mode = Dev | Mock | Prod
...@@ -139,21 +139,13 @@ instance MonadLogger (GargM DevEnv BackendInternalError) where ...@@ -139,21 +139,13 @@ instance MonadLogger (GargM DevEnv BackendInternalError) where
instance HasLogger (GargM DevEnv BackendInternalError) where instance HasLogger (GargM DevEnv BackendInternalError) where
data instance Logger (GargM DevEnv BackendInternalError) = data instance Logger (GargM DevEnv BackendInternalError) =
GargDevLogger { GargDevLogger { _GargDevLogger :: MonadicStdLogger FL.LogStr IO }
dev_logger_mode :: Mode type instance LogInitParams (GargM DevEnv BackendInternalError) = LogConfig
, dev_logger_set :: FL.LoggerSet
}
type instance LogInitParams (GargM DevEnv BackendInternalError) = Mode
type instance LogPayload (GargM DevEnv BackendInternalError) = FL.LogStr type instance LogPayload (GargM DevEnv BackendInternalError) = FL.LogStr
initLogger = \mode -> do initLogger cfg = fmap GargDevLogger $ (liftIO $ monadicStdLogger cfg)
dev_logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize destroyLogger = liftIO . _msl_destroy . _GargDevLogger
pure $ GargDevLogger mode dev_logger_set logMsg (GargDevLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
destroyLogger = \GargDevLogger{..} -> liftIO $ FL.rmLoggerSet dev_logger_set logTxt (GargDevLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
logMsg = \(GargDevLogger mode logger_set) lvl msg -> do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
data DevEnv = DevEnv data DevEnv = DevEnv
{ _dev_env_config :: !GargConfig { _dev_env_config :: !GargConfig
...@@ -225,25 +217,14 @@ instance HasNLPServer DevEnv where ...@@ -225,25 +217,14 @@ instance HasNLPServer DevEnv where
instance IsGargServer Env BackendInternalError (GargM Env BackendInternalError) instance IsGargServer Env BackendInternalError (GargM Env BackendInternalError)
instance HasLogger (GargM Env BackendInternalError) where instance HasLogger (GargM Env BackendInternalError) where
data instance Logger (GargM Env BackendInternalError) = newtype instance Logger (GargM Env BackendInternalError) =
GargLogger { GargLogger { _GargLogger :: MonadicStdLogger FL.LogStr IO }
logger_mode :: Mode type instance LogInitParams (GargM Env BackendInternalError) = LogConfig
, logger_set :: FL.LoggerSet
}
type instance LogInitParams (GargM Env BackendInternalError) = Mode
type instance LogPayload (GargM Env BackendInternalError) = FL.LogStr type instance LogPayload (GargM Env BackendInternalError) = FL.LogStr
initLogger mode = do initLogger cfg = fmap GargLogger $ (liftIO $ monadicStdLogger cfg)
logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize destroyLogger = liftIO . _msl_destroy . _GargLogger
pure $ GargLogger mode logger_set logMsg (GargLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
destroyLogger (GargLogger{..}) = liftIO $ FL.rmLoggerSet logger_set logTxt (GargLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
logMsg (GargLogger mode logger_set) lvl msg = do
cfg <- view hasConfig
let minLvl = cfg ^. gc_logging . lc_log_level
when (lvl >= minLvl) $ do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
instance MonadLogger (GargM Env BackendInternalError) where instance MonadLogger (GargM Env BackendInternalError) where
getLogger = asks _env_logger getLogger = asks _env_logger
...@@ -266,10 +266,11 @@ nodeWriteChecks nid = ...@@ -266,10 +266,11 @@ nodeWriteChecks nid =
-- if: -- if:
-- * He/she is a super user -- * He/she is a super user
-- * He/she owns the target or the source -- * He/she owns the target or the source
-- * The node has been shared with the user
moveChecks :: SourceId -> TargetId -> BoolExpr AccessCheck moveChecks :: SourceId -> TargetId -> BoolExpr AccessCheck
moveChecks (SourceId sourceId) (TargetId targetId) = moveChecks (SourceId sourceId) (TargetId targetId) =
BAnd (nodeUser sourceId `BOr` nodeSuper sourceId) BAnd (nodeUser sourceId `BOr` nodeSuper sourceId `BOr` nodeShared sourceId)
(nodeUser targetId `BOr` nodeUser targetId) (nodeUser targetId `BOr` nodeUser targetId `BOr` nodeShared targetId)
publishChecks :: NodeId -> BoolExpr AccessCheck publishChecks :: NodeId -> BoolExpr AccessCheck
publishChecks nodeId = publishChecks nodeId =
......
...@@ -16,11 +16,11 @@ import Control.Lens (view) ...@@ -16,11 +16,11 @@ import Control.Lens (view)
import Control.Monad (fail) import Control.Monad (fail)
import Database.PostgreSQL.Simple qualified as PGS import Database.PostgreSQL.Simple qualified as PGS
import Data.Pool (withResource) import Data.Pool (withResource)
import Gargantext.API.Admin.EnvTypes ( DevEnv(..), Mode(Dev) ) import Gargantext.API.Admin.EnvTypes ( DevEnv(..) )
import Gargantext.API.Admin.Settings ( newPool ) import Gargantext.API.Admin.Settings ( newPool )
import Gargantext.API.Errors.Types ( BackendInternalError ) import Gargantext.API.Errors.Types ( BackendInternalError )
import Gargantext.API.Prelude ( GargM ) import Gargantext.API.Prelude ( GargM )
import Gargantext.Core.Config (_gc_database_config) import Gargantext.Core.Config (_gc_database_config, gc_logging)
import Gargantext.Core.Config.Types (SettingsFile(..)) import Gargantext.Core.Config.Types (SettingsFile(..))
import Gargantext.Core.Config.Utils (readConfig) import Gargantext.Core.Config.Utils (readConfig)
import Gargantext.Core.NodeStory (fromDBNodeStoryEnv) import Gargantext.Core.NodeStory (fromDBNodeStoryEnv)
...@@ -32,14 +32,14 @@ import Servant ( ServerError ) ...@@ -32,14 +32,14 @@ import Servant ( ServerError )
------------------------------------------------------------------- -------------------------------------------------------------------
withDevEnv :: SettingsFile -> (DevEnv -> IO a) -> IO a withDevEnv :: SettingsFile -> (DevEnv -> IO a) -> IO a
withDevEnv settingsFile k = withLoggerIO Dev $ \logger -> do withDevEnv settingsFile k = do
env <- newDevEnv logger cfg <- readConfig settingsFile
k env -- `finally` cleanEnv env withLoggerIO (cfg ^. gc_logging) $ \logger -> do
env <- newDevEnv logger cfg
k env -- `finally` cleanEnv env
where where
newDevEnv logger = do newDevEnv logger cfg = do
cfg <- readConfig settingsFile
--nodeStory_env <- fromDBNodeStoryEnv (_gc_repofilepath cfg)
pool <- newPool (_gc_database_config cfg) pool <- newPool (_gc_database_config cfg)
nodeStory_env <- fromDBNodeStoryEnv pool nodeStory_env <- fromDBNodeStoryEnv pool
manager <- newTlsManager manager <- newTlsManager
......
...@@ -21,21 +21,21 @@ import Data.ByteString.Lazy qualified as BSL ...@@ -21,21 +21,21 @@ import Data.ByteString.Lazy qualified as BSL
import Data.Csv qualified as Tsv import Data.Csv qualified as Tsv
import Data.HashMap.Strict (HashMap) import Data.HashMap.Strict (HashMap)
import Data.HashMap.Strict qualified as HashMap import Data.HashMap.Strict qualified as HashMap
import Data.Map.Strict qualified as Map
import Data.Map.Strict (toList) import Data.Map.Strict (toList)
import Data.Map.Strict qualified as Map
import Data.Set qualified as Set import Data.Set qualified as Set
import Data.Text (concat, pack, splitOn) import Data.Text (concat, pack, splitOn)
import Data.Vector qualified as Vec
import Data.Vector (Vector) import Data.Vector (Vector)
import Data.Vector qualified as Vec
import Gargantext.API.Admin.EnvTypes (Env) import Gargantext.API.Admin.EnvTypes (Env)
import Gargantext.API.Errors.Types (BackendInternalError(InternalServerError)) import Gargantext.API.Errors.Types (BackendInternalError(InternalServerError))
import Gargantext.API.Ngrams.List.Types (_wjf_data, _wtf_data)
import Gargantext.API.Ngrams.Prelude (getNgramsList)
import Gargantext.API.Ngrams (setListNgrams) import Gargantext.API.Ngrams (setListNgrams)
import Gargantext.API.Ngrams.List.Types (_wjf_data, _wtf_data, _wtf_name)
import Gargantext.API.Ngrams.Prelude (getNgramsList)
import Gargantext.API.Ngrams.Types import Gargantext.API.Ngrams.Types
import Gargantext.API.Prelude (GargM, serverError, HasServerError) import Gargantext.API.Prelude (GargM, serverError, HasServerError)
import Gargantext.API.Routes.Named.List qualified as Named import Gargantext.API.Routes.Named.List qualified as Named
import Gargantext.API.Worker (serveWorkerAPI, serveWorkerAPIEJob) import Gargantext.API.Worker (serveWorkerAPI, serveWorkerAPIM)
import Gargantext.Core.NodeStory.Types ( HasNodeStory ) import Gargantext.Core.NodeStory.Types ( HasNodeStory )
import Gargantext.Core.Text.Ngrams (Ngrams, NgramsType(NgramsTerms)) import Gargantext.Core.Text.Ngrams (Ngrams, NgramsType(NgramsTerms))
import Gargantext.Core.Types.Main (ListType(..)) import Gargantext.Core.Types.Main (ListType(..))
...@@ -47,13 +47,12 @@ import Gargantext.Database.Schema.Ngrams ( text2ngrams, NgramsId ) ...@@ -47,13 +47,12 @@ import Gargantext.Database.Schema.Ngrams ( text2ngrams, NgramsId )
import Gargantext.Database.Schema.Node (_node_parent_id) import Gargantext.Database.Schema.Node (_node_parent_id)
import Gargantext.Database.Types (Indexed(..)) import Gargantext.Database.Types (Indexed(..))
import Gargantext.Prelude hiding (concat, toList) import Gargantext.Prelude hiding (concat, toList)
import Gargantext.System.Logging (logLocM, MonadLogger) import Gargantext.System.Logging
import Gargantext.Utils.Jobs.Monad (MonadJobStatus(..)) import Gargantext.Utils.Jobs.Monad (MonadJobStatus(..))
import Prelude qualified import Prelude qualified
import Protolude qualified as P import Protolude qualified as P
import Servant import Servant
import Servant.Server.Generic (AsServerT) import Servant.Server.Generic (AsServerT)
import Gargantext.System.Logging (LogLevel(..))
getAPI :: Named.GETAPI (AsServerT (GargM Env BackendInternalError)) getAPI :: Named.GETAPI (AsServerT (GargM Env BackendInternalError))
...@@ -159,11 +158,12 @@ tsvAPI = tsvPostAsync ...@@ -159,11 +158,12 @@ tsvAPI = tsvPostAsync
tsvPostAsync :: Named.TSVAPI (AsServerT (GargM Env BackendInternalError)) tsvPostAsync :: Named.TSVAPI (AsServerT (GargM Env BackendInternalError))
tsvPostAsync = tsvPostAsync =
Named.TSVAPI { Named.TSVAPI {
updateListTSVEp = \lId -> serveWorkerAPIEJob $ \p -> updateListTSVEp = \lId -> serveWorkerAPIM $ \p -> do
$(logLocM) DEBUG $ "Started to upload " <> (_wtf_name p)
case ngramsListFromTSVData (_wtf_data p) of case ngramsListFromTSVData (_wtf_data p) of
Left err -> Left $ InternalServerError $ err500 { errReasonPhrase = err } Left err -> throwError $ InternalServerError $ err500 { errReasonPhrase = err }
Right ngramsList -> Right $ Jobs.JSONPost { _jp_list_id = lId Right ngramsList -> pure $ Jobs.JSONPost { _jp_list_id = lId
, _jp_ngrams_list = ngramsList } , _jp_ngrams_list = ngramsList }
} }
-- | Tries converting a text file into an 'NgramList', so that we can reuse the -- | Tries converting a text file into an 'NgramList', so that we can reuse the
...@@ -189,6 +189,9 @@ tsvToNgramsTableMap :: Tsv.Record -> Tsv.Parser (Maybe NgramsTableMap) ...@@ -189,6 +189,9 @@ tsvToNgramsTableMap :: Tsv.Record -> Tsv.Parser (Maybe NgramsTableMap)
tsvToNgramsTableMap record = case Vec.toList record of tsvToNgramsTableMap record = case Vec.toList record of
(map P.decodeUtf8 -> [status, label, forms]) (map P.decodeUtf8 -> [status, label, forms])
-> pure $ Just $ conv status label forms -> pure $ Just $ conv status label forms
-- Garg #381: alias the forms to the empty text.
(map P.decodeUtf8 -> [status, label])
-> pure $ Just $ conv status label mempty
-- WARNING: This silently ignores errors (#433) -- WARNING: This silently ignores errors (#433)
_ -> pure Nothing _ -> pure Nothing
......
...@@ -32,10 +32,15 @@ data WorkerAPI contentType input mode = WorkerAPI ...@@ -32,10 +32,15 @@ data WorkerAPI contentType input mode = WorkerAPI
serveWorkerAPI :: IsGargServer env err m serveWorkerAPI :: IsGargServer env err m
=> (input -> Job) => (input -> Job)
-> WorkerAPI contentType input (AsServerT m) -> WorkerAPI contentType input (AsServerT m)
serveWorkerAPI f = WorkerAPI { workerAPIPost } serveWorkerAPI f = serveWorkerAPIM (pure . f)
serveWorkerAPIM :: IsGargServer env err m
=> (input -> m Job)
-> WorkerAPI contentType input (AsServerT m)
serveWorkerAPIM mkJob = WorkerAPI { workerAPIPost }
where where
workerAPIPost i = do workerAPIPost i = do
let job = f i job <- mkJob i
logM DEBUG $ "[serveWorkerAPI] sending job " <> show job logM DEBUG $ "[serveWorkerAPI] sending job " <> show job
mId <- sendJob job mId <- sendJob job
pure $ JobInfo { _ji_message_id = mId pure $ JobInfo { _ji_message_id = mId
......
...@@ -16,37 +16,27 @@ module Gargantext.Core.Config.Utils ( ...@@ -16,37 +16,27 @@ module Gargantext.Core.Config.Utils (
import Gargantext.Core.Config.Types (SettingsFile(..)) import Gargantext.Core.Config.Types (SettingsFile(..))
import Gargantext.Prelude import Gargantext.Prelude
-- import Network.URI (URI)
-- import Network.URI (parseURI)
import Toml import Toml
import Toml.Schema import Gargantext.Core.Config
import System.Environment (lookupEnv)
import Gargantext.System.Logging.Types (parseLogLevel)
import qualified Data.Text as T
readConfig :: FromValue a => SettingsFile -> IO a readConfig :: SettingsFile -> IO GargConfig
readConfig (SettingsFile fp) = do readConfig (SettingsFile fp) = do
c <- readFile fp c <- readFile fp
case decode c of case decode c of
Failure err -> panicTrace ("Error reading TOML file: " <> show err) Failure err -> panicTrace ("Error reading TOML file: " <> show err)
Success _ r -> return r Success _ r -> do
-- Ovverride the log level based on the GGTX_LOG_LEVEL (if set)
mLvl <- lookupEnv "GGTX_LOG_LEVEL"
-- _URI :: Toml.TomlBiMap URI Text case mLvl of
-- _URI = Toml.BiMap (Right . show) parseURI' Nothing -> pure r
-- where Just s ->
-- parseURI' :: Text -> Either Toml.TomlBiMapError URI case parseLogLevel (T.pack s) of
-- parseURI' t = Left err -> do
-- case parseURI (T.unpack t) of putStrLn $ "unknown log level " <> s <> ": " <> T.unpack err <> " , ignoring GGTX_LOG_LEVEL"
-- Nothing -> Left $ Toml.ArbitraryError "Cannot parse URI" pure r
-- Just u -> Right u Right lvl' -> pure $ r & gc_logging . lc_log_level .~ lvl'
-- uriToml :: Toml.Key -> Toml.TomlCodec URI
-- uriToml = Toml.match (_URI >>> Toml._Text)
-- _Word16 :: Toml.TomlBiMap Word16 Toml.AnyValue
-- _Word16 = Toml._BoundedInteger >>> Toml._Integer
-- word16Toml :: Toml.Key -> Toml.TomlCodec Word16
-- word16Toml = Toml.match _Word16
...@@ -14,6 +14,8 @@ https://dev.sub.gargantext.org/#/share/Notes/187918 ...@@ -14,6 +14,8 @@ https://dev.sub.gargantext.org/#/share/Notes/187918
-} -}
{-# LANGUAGE TemplateHaskell #-}
module Gargantext.Core.Notifications.CentralExchange ( module Gargantext.Core.Notifications.CentralExchange (
gServer gServer
, notify , notify
...@@ -29,8 +31,8 @@ import Gargantext.Core.Config (GargConfig, gc_notifications_config, gc_logging) ...@@ -29,8 +31,8 @@ import Gargantext.Core.Config (GargConfig, gc_notifications_config, gc_logging)
import Gargantext.Core.Config.Types (NotificationsConfig(..)) import Gargantext.Core.Config.Types (NotificationsConfig(..))
import Gargantext.Core.Notifications.CentralExchange.Types import Gargantext.Core.Notifications.CentralExchange.Types
import Gargantext.Prelude import Gargantext.Prelude
import Gargantext.System.Logging (LogLevel(..), withLogger, logMsg) import Gargantext.System.Logging (LogLevel(..), withLogger, logLoc, Logger)
import Nanomsg (Pull(..), Push(..), bind, connect, recv, send, withSocket) import Nanomsg (Pull(..), Push(..), bind, connect, recv, send, withSocket, shutdown, Socket, Sender)
import System.Timeout (timeout) import System.Timeout (timeout)
{- {-
...@@ -46,31 +48,31 @@ with many users having updates. ...@@ -46,31 +48,31 @@ with many users having updates.
-} -}
gServer :: GargConfig -> IO () gServer :: HasCallStack => GargConfig -> IO ()
gServer cfg = do gServer cfg = do
withSocket Pull $ \s -> do withLogger log_cfg $ \ioLogger -> do
withSocket Push $ \s_dispatcher -> do withSocket Pull $ \s -> do
withLogger log_cfg $ \ioLogger -> do withSocket Push $ \s_dispatcher -> do
logMsg ioLogger DEBUG $ "[central_exchange] binding to " <> T.unpack _nc_central_exchange_bind $(logLoc) ioLogger DEBUG $ "[central_exchange] binding to " <> _nc_central_exchange_bind
_ <- bind s $ T.unpack _nc_central_exchange_bind bindEndpoint <- bind s $ T.unpack _nc_central_exchange_bind
withLogger log_cfg $ \ioLogger -> do $(logLoc) ioLogger DEBUG $ "[central_exchange] bound to " <> show bindEndpoint
logMsg ioLogger DEBUG $ "[central_exchange] connecting to " <> T.unpack _nc_dispatcher_bind $(logLoc) ioLogger DEBUG $ "[central_exchange] connecting to " <> _nc_dispatcher_bind
_ <- connect s_dispatcher $ T.unpack _nc_dispatcher_connect dispatchEndpoint <- connect s_dispatcher $ T.unpack _nc_dispatcher_connect
$(logLoc) ioLogger DEBUG $ "[central_exchange] connected to " <> show dispatchEndpoint
tChan <- TChan.newTChanIO
tChan <- TChan.newTChanIO
-- | We have 2 threads: one that listens for nanomsg messages
-- | and puts them on the 'tChan' and the second one that reads -- | We have 2 threads: one that listens for nanomsg messages
-- | the 'tChan' and calls Dispatcher accordingly. This is to -- | and puts them on the 'tChan' and the second one that reads
-- | make reading nanomsg as fast as possible. -- | the 'tChan' and calls Dispatcher accordingly. This is to
void $ Async.concurrently (worker s_dispatcher tChan) $ do -- | make reading nanomsg as fast as possible.
withLogger log_cfg $ \ioLogger -> do void $ Async.concurrently (worker s_dispatcher tChan) $ do
forever $ do forever $ do
-- putText "[central_exchange] receiving" $(logLoc) ioLogger DEBUG $ "[central_exchange] receiving"
r <- recv s r <- recv s
logMsg ioLogger DEBUG $ "[central_exchange] received: " <> show r $(logLoc) ioLogger DEBUG $ "[central_exchange] received: " <> show r
-- C.putStrLn $ "[central_exchange] " <> r -- C.putStrLn $ "[central_exchange] " <> r
atomically $ TChan.writeTChan tChan r atomically $ TChan.writeTChan tChan r
where where
NotificationsConfig{..} = cfg ^. gc_notifications_config NotificationsConfig{..} = cfg ^. gc_notifications_config
log_cfg = cfg ^. gc_logging log_cfg = cfg ^. gc_logging
...@@ -80,7 +82,7 @@ gServer cfg = do ...@@ -80,7 +82,7 @@ gServer cfg = do
r <- atomically $ TChan.readTChan tChan r <- atomically $ TChan.readTChan tChan
case Aeson.decode (BSL.fromStrict r) of case Aeson.decode (BSL.fromStrict r) of
Just (UpdateTreeFirstLevel _node_id) -> do Just (UpdateTreeFirstLevel _node_id) -> do
-- logMsg ioLogger DEBUG $ "[central_exchange] update tree: " <> show node_id -- $(logLoc) ioLogger DEBUG $ "[central_exchange] update tree: " <> show node_id
-- putText $ "[central_exchange] sending that to the dispatcher: " <> show node_id -- putText $ "[central_exchange] sending that to the dispatcher: " <> show node_id
-- To make this more robust, use withAsync so we don't -- To make this more robust, use withAsync so we don't
-- block the main thread (send is blocking) -- block the main thread (send is blocking)
...@@ -97,27 +99,43 @@ gServer cfg = do ...@@ -97,27 +99,43 @@ gServer cfg = do
-- process, independent of the server. -- process, independent of the server.
-- send the same message that we received -- send the same message that we received
-- void $ sendNonblocking s_dispatcher r -- void $ sendNonblocking s_dispatcher r
void $ timeout 100_000 $ send s_dispatcher r sendTimeout ioLogger s_dispatcher r
Just (UpdateWorkerProgress _ji _jl) -> do Just (UpdateWorkerProgress _ji _jl) -> do
-- logMsg ioLogger DEBUG $ "[central_exchange] update worker progress: " <> show ji <> ", " <> show jl -- $(logLoc) ioLogger DEBUG $ "[central_exchange] update worker progress: " <> show ji <> ", " <> show jl
void $ timeout 100_000 $ send s_dispatcher r sendTimeout ioLogger s_dispatcher r
Just Ping -> do Just Ping -> do
void $ timeout 100_000 $ send s_dispatcher r sendTimeout ioLogger s_dispatcher r
Nothing -> Nothing ->
logMsg ioLogger ERROR $ "[central_exchange] cannot decode message: " <> show r $(logLoc) ioLogger ERROR $ "[central_exchange] cannot decode message: " <> show r
-- | A static send timeout in microseconds.
notify :: GargConfig -> CEMessage -> IO () send_timeout_us :: Int
notify cfg ceMessage = do send_timeout_us = 50_000
-- | Sends the given payload ensure the send doesn't take more than the static
-- 'send_timeout_ns', logging a message if the timeouts kicks in.
sendTimeout :: Sender a => Logger IO -> Socket a -> ByteString -> IO ()
sendTimeout ioLogger sock payload = withFrozenCallStack $ do
timeoutKickedIn <- timeout send_timeout_us $ send sock $ payload
case timeoutKickedIn of
Nothing ->
$(logLoc) ioLogger ERROR $ "[central_exchange] couldn't send msg in timely fashion."
Just () ->
$(logLoc) ioLogger DEBUG $ "[central_exchange] message sent."
notify :: HasCallStack => GargConfig -> CEMessage -> IO ()
notify cfg ceMessage = withLogger log_cfg $ \ioLogger -> do
Async.withAsync (pure ()) $ \_ -> do Async.withAsync (pure ()) $ \_ -> do
withSocket Push $ \s -> do withSocket Push $ \s -> do
_ <- connect s $ T.unpack _nc_central_exchange_connect connectEndpoint <- connect s $ T.unpack _nc_central_exchange_connect
let str = Aeson.encode ceMessage let do_work = do
withLogger log_cfg $ \ioLogger -> let str = Aeson.encode ceMessage
logMsg ioLogger DEBUG $ "[central_exchange] sending: " <> (T.unpack $ TE.decodeUtf8 $ BSL.toStrict str) $(logLoc) ioLogger DEBUG $ "[central_exchange] sending to " <> _nc_central_exchange_connect
-- err <- sendNonblocking s $ BSL.toStrict str $(logLoc) ioLogger DEBUG $ "[central_exchange] sending: " <> (TE.decodeUtf8 $ BSL.toStrict str)
-- putText $ "[notify] err: " <> show err -- err <- sendNonblocking s $ BSL.toStrict str
void $ timeout 100_000 $ send s $ BSL.toStrict str -- putText $ "[notify] err: " <> show err
sendTimeout ioLogger s (BSL.toStrict str)
do_work `finally` shutdown s connectEndpoint
where where
NotificationsConfig { _nc_central_exchange_connect } = cfg ^. gc_notifications_config NotificationsConfig { _nc_central_exchange_connect } = cfg ^. gc_notifications_config
log_cfg = cfg ^. gc_logging log_cfg = cfg ^. gc_logging
...@@ -14,6 +14,8 @@ https://dev.sub.gargantext.org/#/share/Notes/187918 ...@@ -14,6 +14,8 @@ https://dev.sub.gargantext.org/#/share/Notes/187918
-} -}
{-# LANGUAGE TemplateHaskell #-}
module Gargantext.Core.Notifications.Dispatcher ( module Gargantext.Core.Notifications.Dispatcher (
Dispatcher -- opaque Dispatcher -- opaque
, withDispatcher , withDispatcher
...@@ -34,11 +36,11 @@ import Gargantext.Core.Notifications.CentralExchange.Types qualified as CETypes ...@@ -34,11 +36,11 @@ import Gargantext.Core.Notifications.CentralExchange.Types qualified as CETypes
import Gargantext.Core.Notifications.Dispatcher.Types import Gargantext.Core.Notifications.Dispatcher.Types
import Gargantext.Core.Worker.Types (JobInfo(..)) import Gargantext.Core.Worker.Types (JobInfo(..))
import Gargantext.Prelude import Gargantext.Prelude
import Gargantext.System.Logging (LogLevel(..), withLogger, logMsg)
import Nanomsg (Pull(..), bind, recv, withSocket) import Nanomsg (Pull(..), bind, recv, withSocket)
import Network.WebSockets qualified as WS import Network.WebSockets qualified as WS
import StmContainers.Set qualified as SSet import StmContainers.Set qualified as SSet
import Gargantext.Core.Config import Gargantext.Core.Config
import Gargantext.System.Logging
{- {-
...@@ -92,20 +94,18 @@ dispatcherListener config subscriptions = do ...@@ -92,20 +94,18 @@ dispatcherListener config subscriptions = do
where where
NotificationsConfig { _nc_dispatcher_bind } = config ^. gc_notifications_config NotificationsConfig { _nc_dispatcher_bind } = config ^. gc_notifications_config
log_cfg = config ^. gc_logging log_cfg = config ^. gc_logging
worker tChan throttleTChan = do worker tChan throttleTChan = withLogger log_cfg $ \ioL -> do
-- tId <- myThreadId tId <- myThreadId
forever $ do forever $ do
r <- atomically $ TChan.readTChan tChan r <- atomically $ TChan.readTChan tChan
-- putText $ "[" <> show tId <> "] received a message: " <> decodeUtf8 r $(logLoc) ioL DEBUG $ "[" <> show tId <> "] received a message: " <> decodeUtf8 r
case Aeson.decode (BSL.fromStrict r) of case Aeson.decode (BSL.fromStrict r) of
Nothing -> Nothing ->
withLogger log_cfg $ \ioL ->
logMsg ioL DEBUG "[dispatcher_listener] unknown message from central exchange" logMsg ioL DEBUG "[dispatcher_listener] unknown message from central exchange"
Just ceMessage -> do Just ceMessage -> do
withLogger log_cfg $ \ioL -> logMsg ioL DEBUG $ "[dispatcher_listener] received " <> show ceMessage
logMsg ioL DEBUG $ "[dispatcher_listener] received " <> show ceMessage
-- subs <- atomically $ readTVar subscriptions -- subs <- atomically $ readTVar subscriptions
filteredSubs <- atomically $ do filteredSubs <- atomically $ do
let subs' = UnfoldlM.filter (pure . ceMessageSubPred ceMessage) $ SSet.unfoldlM subscriptions let subs' = UnfoldlM.filter (pure . ceMessageSubPred ceMessage) $ SSet.unfoldlM subscriptions
......
...@@ -11,11 +11,11 @@ https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/341 ...@@ -11,11 +11,11 @@ https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/341
Docs: Docs:
https://dev.sub.gargantext.org/#/share/Notes/187918 https://dev.sub.gargantext.org/#/share/Notes/187918
-} -}
{-# OPTIONS_GHC -fno-warn-unused-matches -fno-warn-unused-imports #-} {-# OPTIONS_GHC -fno-warn-unused-matches -fno-warn-unused-imports #-}
module Gargantext.Core.Notifications.Dispatcher.Types where module Gargantext.Core.Notifications.Dispatcher.Types where
import Codec.Binary.UTF8.String qualified as CBUTF8 import Codec.Binary.UTF8.String qualified as CBUTF8
...@@ -120,7 +120,7 @@ instance ToJSON Topic where ...@@ -120,7 +120,7 @@ instance ToJSON Topic where
-- pure $ MJobLog jl -- pure $ MJobLog jl
data ConnectedUser = data ConnectedUser =
CUUser UserId CUUser UserId
| CUPublic | CUPublic
...@@ -128,7 +128,7 @@ data ConnectedUser = ...@@ -128,7 +128,7 @@ data ConnectedUser =
instance Hashable ConnectedUser where instance Hashable ConnectedUser where
hashWithSalt salt (CUUser userId) = hashWithSalt salt ("cuuser" :: Text, userId) hashWithSalt salt (CUUser userId) = hashWithSalt salt ("cuuser" :: Text, userId)
hashWithSalt salt CUPublic = hashWithSalt salt ("cupublic" :: Text) hashWithSalt salt CUPublic = hashWithSalt salt ("cupublic" :: Text)
newtype WSKeyConnection = WSKeyConnection (ByteString, WS.Connection) newtype WSKeyConnection = WSKeyConnection (ByteString, WS.Connection)
instance Hashable WSKeyConnection where instance Hashable WSKeyConnection where
hashWithSalt salt (WSKeyConnection (key, _conn)) = hashWithSalt salt key hashWithSalt salt (WSKeyConnection (key, _conn)) = hashWithSalt salt key
...@@ -142,7 +142,7 @@ wsKey :: WSKeyConnection -> ByteString ...@@ -142,7 +142,7 @@ wsKey :: WSKeyConnection -> ByteString
wsKey (WSKeyConnection (key, _conn)) = key wsKey (WSKeyConnection (key, _conn)) = key
wsConn :: WSKeyConnection -> WS.Connection wsConn :: WSKeyConnection -> WS.Connection
wsConn (WSKeyConnection (_key, conn)) = conn wsConn (WSKeyConnection (_key, conn)) = conn
data Subscription = data Subscription =
Subscription { Subscription {
s_connected_user :: ConnectedUser s_connected_user :: ConnectedUser
...@@ -158,7 +158,7 @@ subKey sub = wsKey $ s_ws_key_connection sub ...@@ -158,7 +158,7 @@ subKey sub = wsKey $ s_ws_key_connection sub
type Token = Text type Token = Text
{- {-
We accept requests for subscription/unsubscription via websocket. We accept requests for subscription/unsubscription via websocket.
...@@ -200,7 +200,7 @@ instance ToJSON WSRequest where ...@@ -200,7 +200,7 @@ instance ToJSON WSRequest where
toJSON (WSAuthorize token) = Aeson.object [ "request" .= ( "authorize" :: Text ) toJSON (WSAuthorize token) = Aeson.object [ "request" .= ( "authorize" :: Text )
, "token" .= token ] , "token" .= token ]
toJSON WSDeauthorize = Aeson.object [ "request" .= ( "deauthorize" :: Text ) ] toJSON WSDeauthorize = Aeson.object [ "request" .= ( "deauthorize" :: Text ) ]
class HasDispatcher env dispatcher where class HasDispatcher env dispatcher where
hasDispatcher :: Getter env dispatcher hasDispatcher :: Getter env dispatcher
......
...@@ -100,6 +100,7 @@ wsLoop log_cfg jwtS subscriptions ws = flip finally disconnect $ do ...@@ -100,6 +100,7 @@ wsLoop log_cfg jwtS subscriptions ws = flip finally disconnect $ do
where where
wsLoop' user ioLogger = do wsLoop' user ioLogger = do
dm <- WS.receiveDataMessage (wsConn ws) dm <- WS.receiveDataMessage (wsConn ws)
logMsg ioLogger DEBUG $ "[wsLoop'] handling new message.."
newUser <- case dm of newUser <- case dm of
WS.Text dm' _ -> do WS.Text dm' _ -> do
...@@ -113,8 +114,8 @@ wsLoop log_cfg jwtS subscriptions ws = flip finally disconnect $ do ...@@ -113,8 +114,8 @@ wsLoop log_cfg jwtS subscriptions ws = flip finally disconnect $ do
let sub = Subscription { s_connected_user = user let sub = Subscription { s_connected_user = user
, s_ws_key_connection = ws , s_ws_key_connection = ws
, s_topic = topic } , s_topic = topic }
_ss <- insertSubscription subscriptions sub insertSubscription subscriptions sub
-- putText $ "[wsLoop] subscriptions: " <> show (showSub <$> ss) logMsg ioLogger DEBUG $ "[wsLoop] added subscription: " <> show sub
return user return user
Just (WSUnsubscribe topic) -> do Just (WSUnsubscribe topic) -> do
logMsg ioLogger DEBUG $ "[wsLoop'] unsubscribe topic " <> show topic logMsg ioLogger DEBUG $ "[wsLoop'] unsubscribe topic " <> show topic
......
...@@ -54,7 +54,9 @@ data TsvList = TsvList ...@@ -54,7 +54,9 @@ data TsvList = TsvList
instance FromNamedRecord TsvList where instance FromNamedRecord TsvList where
parseNamedRecord r = TsvList <$> r .: "status" parseNamedRecord r = TsvList <$> r .: "status"
<*> r .: "label" <*> r .: "label"
<*> r .: "forms" -- Issue #381: be lenient in the forms
-- field, if missing, default to the empty text.
<*> (fromMaybe mempty <$> r .: "forms")
instance ToNamedRecord TsvList where instance ToNamedRecord TsvList where
toNamedRecord (TsvList s l f) = toNamedRecord (TsvList s l f) =
......
...@@ -24,15 +24,13 @@ import Control.Lens.TH ...@@ -24,15 +24,13 @@ import Control.Lens.TH
import Control.Monad.Trans.Control (MonadBaseControl) import Control.Monad.Trans.Control (MonadBaseControl)
import Data.Maybe (fromJust) import Data.Maybe (fromJust)
import Data.Pool qualified as Pool import Data.Pool qualified as Pool
import Data.Text qualified as T
import Database.PostgreSQL.Simple qualified as PSQL import Database.PostgreSQL.Simple qualified as PSQL
import Gargantext.API.Admin.EnvTypes (Mode(Dev), modeToLoggingLevels)
import Gargantext.API.Admin.Orchestrator.Types (JobLog, noJobLog) import Gargantext.API.Admin.Orchestrator.Types (JobLog, noJobLog)
import Gargantext.API.Job (RemainingSteps(..), jobLogStart, jobLogProgress, jobLogFailures, jobLogComplete, addErrorEvent, jobLogFailTotal, jobLogFailTotalWithMessage, jobLogAddMore) import Gargantext.API.Job (RemainingSteps(..), jobLogStart, jobLogProgress, jobLogFailures, jobLogComplete, addErrorEvent, jobLogFailTotal, jobLogFailTotalWithMessage, jobLogAddMore)
import Gargantext.API.Prelude (GargM) import Gargantext.API.Prelude (GargM)
import Gargantext.Core.Notifications.CentralExchange qualified as CE import Gargantext.Core.Notifications.CentralExchange qualified as CE
import Gargantext.Core.Notifications.CentralExchange.Types qualified as CET import Gargantext.Core.Notifications.CentralExchange.Types qualified as CET
import Gargantext.Core.Config (GargConfig(..), HasConfig(..), gc_logging) import Gargantext.Core.Config (GargConfig(..), HasConfig(..), gc_logging, LogConfig)
import Gargantext.Core.Config.Mail qualified as Mail import Gargantext.Core.Config.Mail qualified as Mail
import Gargantext.Core.Config.Utils (readConfig) import Gargantext.Core.Config.Utils (readConfig)
import Gargantext.Core.Config.Types (SettingsFile(..)) import Gargantext.Core.Config.Types (SettingsFile(..))
...@@ -50,6 +48,7 @@ import Gargantext.Utils.Jobs.Monad ( MonadJobStatus(..), JobHandle ) ...@@ -50,6 +48,7 @@ import Gargantext.Utils.Jobs.Monad ( MonadJobStatus(..), JobHandle )
import GHC.IO.Exception (IOException(..), IOErrorType(OtherError)) import GHC.IO.Exception (IOException(..), IOErrorType(OtherError))
import Prelude qualified import Prelude qualified
import System.Log.FastLogger qualified as FL import System.Log.FastLogger qualified as FL
import Gargantext.System.Logging.Loggers
data WorkerEnv = WorkerEnv data WorkerEnv = WorkerEnv
...@@ -70,13 +69,14 @@ data WorkerJobState = WorkerJobState ...@@ -70,13 +69,14 @@ data WorkerJobState = WorkerJobState
withWorkerEnv :: SettingsFile -> (WorkerEnv -> IO a) -> IO a withWorkerEnv :: SettingsFile -> (WorkerEnv -> IO a) -> IO a
withWorkerEnv settingsFile k = withLoggerIO Dev $ \logger -> do withWorkerEnv settingsFile k = do
env <- newWorkerEnv logger cfg <- readConfig settingsFile
k env -- `finally` cleanEnv env withLoggerIO (cfg ^. gc_logging) $ \logger -> do
env <- newWorkerEnv logger cfg
k env -- `finally` cleanEnv env
where where
newWorkerEnv logger = do newWorkerEnv logger cfg = do
cfg <- readConfig settingsFile
--nodeStory_env <- fromDBNodeStoryEnv (_gc_repofilepath cfg) --nodeStory_env <- fromDBNodeStoryEnv (_gc_repofilepath cfg)
-- pool <- newPool $ _gc_database_config cfg -- pool <- newPool $ _gc_database_config cfg
let dbConfig = _gc_database_config cfg let dbConfig = _gc_database_config cfg
...@@ -97,22 +97,14 @@ instance HasConfig WorkerEnv where ...@@ -97,22 +97,14 @@ instance HasConfig WorkerEnv where
hasConfig = to _w_env_config hasConfig = to _w_env_config
instance HasLogger (GargM WorkerEnv IOException) where instance HasLogger (GargM WorkerEnv IOException) where
data instance Logger (GargM WorkerEnv IOException) = newtype instance Logger (GargM WorkerEnv IOException) =
GargWorkerLogger { GargWorkerLogger { _GargWorkerLogger :: MonadicStdLogger FL.LogStr IO }
w_logger_mode :: Mode type instance LogInitParams (GargM WorkerEnv IOException) = LogConfig
, w_logger_set :: FL.LoggerSet
}
type instance LogInitParams (GargM WorkerEnv IOException) = Mode
type instance LogPayload (GargM WorkerEnv IOException) = FL.LogStr type instance LogPayload (GargM WorkerEnv IOException) = FL.LogStr
initLogger mode = do initLogger cfg = fmap GargWorkerLogger $ (liftIO $ monadicStdLogger cfg)
w_logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize destroyLogger = liftIO . _msl_destroy . _GargWorkerLogger
pure $ GargWorkerLogger mode w_logger_set logMsg (GargWorkerLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
destroyLogger (GargWorkerLogger{..}) = liftIO $ FL.rmLoggerSet w_logger_set logTxt (GargWorkerLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
logMsg (GargWorkerLogger mode logger_set) lvl msg = do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
instance HasConnectionPool WorkerEnv where instance HasConnectionPool WorkerEnv where
connPool = to _w_env_pool connPool = to _w_env_pool
...@@ -182,29 +174,20 @@ newtype WorkerMonad a = ...@@ -182,29 +174,20 @@ newtype WorkerMonad a =
, MonadFail ) , MonadFail )
instance HasLogger WorkerMonad where instance HasLogger WorkerMonad where
data instance Logger WorkerMonad = newtype instance Logger WorkerMonad =
WorkerMonadLogger { WorkerMonadLogger { _WorkerMonadLogger :: MonadicStdLogger FL.LogStr IO }
wm_logger_mode :: Mode type instance LogInitParams WorkerMonad = LogConfig
, wm_logger_set :: FL.LoggerSet
}
type instance LogInitParams WorkerMonad = Mode
type instance LogPayload WorkerMonad = FL.LogStr type instance LogPayload WorkerMonad = FL.LogStr
initLogger mode = do initLogger cfg = fmap WorkerMonadLogger $ (liftIO $ monadicStdLogger cfg)
wm_logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize destroyLogger = liftIO . _msl_destroy . _WorkerMonadLogger
pure $ WorkerMonadLogger mode wm_logger_set logMsg (WorkerMonadLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
destroyLogger (WorkerMonadLogger{..}) = liftIO $ FL.rmLoggerSet wm_logger_set logTxt (WorkerMonadLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
logMsg (WorkerMonadLogger mode logger_set) lvl msg = do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
instance MonadLogger WorkerMonad where instance MonadLogger WorkerMonad where
getLogger = do getLogger = do
env <- ask env <- ask
let (GargWorkerLogger { .. }) = _w_env_logger env let (GargWorkerLogger lgr) = _w_env_logger env
pure $ WorkerMonadLogger { wm_logger_mode = w_logger_mode pure $ WorkerMonadLogger lgr
, wm_logger_set = w_logger_set }
runWorkerMonad :: WorkerEnv -> WorkerMonad a -> IO a runWorkerMonad :: WorkerEnv -> WorkerMonad a -> IO a
runWorkerMonad env m = do runWorkerMonad env m = do
......
...@@ -36,6 +36,7 @@ module Gargantext.Database.Query.Table.Node ...@@ -36,6 +36,7 @@ module Gargantext.Database.Query.Table.Node
, getParentId , getParentId
, getUserRootPublicNode , getUserRootPublicNode
, getUserRootPrivateNode , getUserRootPrivateNode
, getUserRootShareNode
, selectNode , selectNode
-- * Boolean queries -- * Boolean queries
...@@ -464,6 +465,11 @@ getUserRootPrivateNode :: (HasNodeError err, HasDBid NodeType) ...@@ -464,6 +465,11 @@ getUserRootPrivateNode :: (HasNodeError err, HasDBid NodeType)
-> DBCmd err (Node HyperdataFolder) -> DBCmd err (Node HyperdataFolder)
getUserRootPrivateNode = get_user_root_node_folder NodeFolderPrivate getUserRootPrivateNode = get_user_root_node_folder NodeFolderPrivate
getUserRootShareNode :: (HasNodeError err, HasDBid NodeType)
=> UserId
-> DBCmd err (Node HyperdataFolder)
getUserRootShareNode = get_user_root_node_folder NodeFolderShared
get_user_root_node_folder :: (HasNodeError err, HasDBid NodeType) get_user_root_node_folder :: (HasNodeError err, HasDBid NodeType)
=> NodeType => NodeType
-> UserId -> UserId
......
...@@ -13,17 +13,15 @@ module Gargantext.System.Logging ( ...@@ -13,17 +13,15 @@ module Gargantext.System.Logging (
) where ) where
import Gargantext.System.Logging.Types import Gargantext.System.Logging.Types
import Gargantext.System.Logging.Loggers
import Control.Exception.Safe (MonadMask, bracket) import Control.Exception.Safe (MonadMask, bracket)
import Control.Monad (when)
import Gargantext.Core.Config (LogConfig(..)) import Gargantext.Core.Config (LogConfig(..))
import Control.Monad.IO.Class import Control.Monad.IO.Class
import Control.Monad.Trans.Control import Control.Monad.Trans.Control
import Data.Text qualified as T import Data.Text qualified as T
import Data.Time.Clock (getCurrentTime)
import Language.Haskell.TH hiding (Type) import Language.Haskell.TH hiding (Type)
import Language.Haskell.TH.Syntax qualified as TH import Language.Haskell.TH.Syntax qualified as TH
import Prelude import Prelude
import System.Environment (lookupEnv)
-- | A variant of 'logTxt' that doesn't require passing an explicit 'Logger'. -- | A variant of 'logTxt' that doesn't require passing an explicit 'Logger'.
...@@ -86,25 +84,10 @@ withLoggerIO params act = bracket (initLogger params) destroyLogger act ...@@ -86,25 +84,10 @@ withLoggerIO params act = bracket (initLogger params) destroyLogger act
-- | A plain logger in the IO monad, waiting for more serious logging solutions like -- | A plain logger in the IO monad, waiting for more serious logging solutions like
-- the one described in https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/229 -- the one described in https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/229
instance HasLogger IO where instance HasLogger IO where
data instance Logger IO = IOLogger LogLevel data instance Logger IO = IOLogger { _IOLogger :: IOStdLogger }
type instance LogInitParams IO = LogConfig type instance LogInitParams IO = LogConfig
type instance LogPayload IO = String type instance LogPayload IO = String
initLogger LogConfig{..} = do initLogger cfg = fmap IOLogger $ (liftIO $ ioStdLogger cfg)
-- let the env var take precedence over the LogConfig one. destroyLogger = liftIO . _iosl_destroy . _IOLogger
mLvl <- liftIO $ lookupEnv "GGTX_LOG_LEVEL" logMsg (IOLogger ioLogger) = _iosl_log_msg ioLogger
lvl <- case mLvl of logTxt (IOLogger ioLogger) lvl msg = liftIO $ _iosl_log_txt ioLogger lvl msg
Nothing -> pure _lc_log_level
Just s ->
case parseLogLevel (T.pack s) of
Left err -> do
liftIO $ putStrLn $ "unknown log level " <> s <> ": " <> T.unpack err <> " , ignoring GGTX_LOG_LEVEL"
pure $ _lc_log_level
Right lvl' -> pure lvl'
pure $ IOLogger lvl
destroyLogger _ = pure ()
logMsg (IOLogger minLvl) lvl msg = do
t <- getCurrentTime
when (lvl >= minLvl) $ do
let pfx = "[" <> show t <> "] [" <> show lvl <> "] "
putStrLn $ pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (T.unpack msg)
{-| Canned loggers to avoid reinventing the wheel every time. -}
module Gargantext.System.Logging.Loggers (
ioStdLogger
, IOStdLogger -- opaque, you can't build it directly, use 'ioStdLogger'
, _iosl_log_level
, _iosl_destroy
, _iosl_log_msg
, _iosl_log_txt
, monadicStdLogger
, _msl_log_level
, _msl_destroy
, _msl_log_msg
, _msl_log_txt
, MonadicStdLogger
) where
import Control.Monad
import Control.Monad.IO.Class
import Data.Text qualified as T
import Data.Time
import Gargantext.Core.Config
import Gargantext.System.Logging.Types
import Prelude
import System.Log.FastLogger qualified as FL
data IOStdLogger =
IOStdLogger { _iosl_log_level :: LogLevel
, _iosl_destroy :: IO ()
, _iosl_log_msg :: LogLevel -> String -> IO ()
, _iosl_log_txt :: LogLevel -> T.Text -> IO ()
}
ioStdLogger :: LogConfig -> IO IOStdLogger
ioStdLogger LogConfig{..} = do
let minLvl = _lc_log_level
let log_msg lvl msg = do
t <- getCurrentTime
when (lvl >= minLvl) $ do
let pfx = "[" <> show t <> "] [" <> show lvl <> "] "
putStrLn $ pfx <> msg
pure $ IOStdLogger
{ _iosl_log_level = minLvl
, _iosl_destroy = pure ()
, _iosl_log_msg = log_msg
, _iosl_log_txt = \lvl msg -> log_msg lvl (T.unpack msg)
}
-- | A monadic standard logger powered by fast-logger underneath.
data MonadicStdLogger payload m =
MonadicStdLogger { _msl_log_level :: LogLevel
, _msl_loggers :: [FL.LoggerSet]
, _msl_destroy :: m ()
, _msl_log_msg :: LogLevel -> payload -> m ()
, _msl_log_txt :: LogLevel -> T.Text -> m ()
}
monadicStdLogger :: MonadIO m => LogConfig -> IO (MonadicStdLogger FL.LogStr m)
monadicStdLogger LogConfig{..} = do
let minLvl = _lc_log_level
stdout_logger <- FL.newStderrLoggerSet FL.defaultBufSize
let log_msg lvl msg = liftIO $ do
t <- getCurrentTime
when (lvl >= minLvl) $ do
let pfx = "[" <> show t <> "] [" <> show lvl <> "] "
FL.pushLogStrLn stdout_logger $ FL.toLogStr pfx <> msg
pure $ MonadicStdLogger
{ _msl_log_level = minLvl
, _msl_loggers = [stdout_logger]
, _msl_destroy = liftIO $ FL.rmLoggerSet stdout_logger
, _msl_log_msg = log_msg
, _msl_log_txt = \lvl msg -> log_msg lvl (FL.toLogStr $ T.unpack msg)
}
...@@ -110,6 +110,10 @@ ...@@ -110,6 +110,10 @@
git: "https://github.com/adinapoli/http-reverse-proxy.git" git: "https://github.com/adinapoli/http-reverse-proxy.git"
subdirs: subdirs:
- . - .
- commit: 2d69707bf639be2055e3228dab38cc4f2a658111
git: "https://github.com/adinapoli/nanomsg-haskell"
subdirs:
- .
- commit: b9fca8beee0f23c17a6b2001ec834d071709e6e7 - commit: b9fca8beee0f23c17a6b2001ec834d071709e6e7
git: "https://github.com/alpmestan/hmatrix.git" git: "https://github.com/alpmestan/hmatrix.git"
subdirs: subdirs:
...@@ -134,10 +138,6 @@ ...@@ -134,10 +138,6 @@
git: "https://github.com/fpringle/servant-routes.git" git: "https://github.com/fpringle/servant-routes.git"
subdirs: subdirs:
- . - .
- commit: 5868db564d7d3c4568ccd11c852292b834d26c55
git: "https://github.com/garganscript/nanomsg-haskell"
subdirs:
- .
- commit: bd0592818882f9cf34d2991d01f7dcb3d8bca309 - commit: bd0592818882f9cf34d2991d01f7dcb3d8bca309
git: "https://github.com/haskell-github-trust/ekg-json" git: "https://github.com/haskell-github-trust/ekg-json"
subdirs: subdirs:
...@@ -158,7 +158,7 @@ ...@@ -158,7 +158,7 @@
git: "https://gitlab.iscpif.fr/gargantext/crawlers/isidore.git" git: "https://gitlab.iscpif.fr/gargantext/crawlers/isidore.git"
subdirs: subdirs:
- . - .
- commit: 521ca54f1502b13f629eff2223aaf5007e6d52ec - commit: 894482ef97eadce6b1c13ebced1edfe394b5be5e
git: "https://gitlab.iscpif.fr/gargantext/crawlers/istex.git" git: "https://gitlab.iscpif.fr/gargantext/crawlers/istex.git"
subdirs: subdirs:
- . - .
...@@ -367,7 +367,7 @@ flags: ...@@ -367,7 +367,7 @@ flags:
gargantext: gargantext:
"enable-benchmarks": false "enable-benchmarks": false
"no-phylo-debug-logs": true "no-phylo-debug-logs": true
"test-crypto": false "test-crypto": true
graphviz: graphviz:
"test-parsing": false "test-parsing": false
hashable: hashable:
......
status label
map impact-point analysis
map Simulated Destructive Re-Entry Conditions
map Passive Space-Debris Trajectories
map error-proofing mechanisms
map on-orbit life extension
map tether-gripper mechanism
map Field-Programmable Gate Arrays (FPGA)
map self-repair modular robot
map space-debris impact
map self-repairing
map in-orbit servicing
map online self-repairing
map triple-module redundancy systems
map model-based system engineering
map low-thrust orbital transfer
map space-borne orbit debris surveillance
map atmospheric re-entry
map demisable tanks' re-entry
map non-cooperative spacecraft
map model-based approaches
map model-based methods
map impact-induced electrical anomalies
map Low-Cost Deorbit System
map tape-shaped tethers
map self-repair
map self-healing material
map vision-based navigation
map model-based process
status,label
map,impact-point analysis
map,Simulated Destructive Re-Entry Conditions
map,Passive Space-Debris Trajectories
map,error-proofing mechanisms
map,on-orbit life extension
map,tether-gripper mechanism
map,Field-Programmable Gate Arrays (FPGA)
map,self-repair modular robot
map,space-debris impact
map,self-repairing
map,in-orbit servicing
map,online self-repairing
map,triple-module redundancy systems
map,model-based system engineering
map,low-thrust orbital transfer
map,space-borne orbit debris surveillance
map,atmospheric re-entry
map,demisable tanks' re-entry
map,non-cooperative spacecraft
map,model-based approaches
map,model-based methods
map,impact-induced electrical anomalies
map,Low-Cost Deorbit System
map,tape-shaped tethers
map,self-repair
map,self-healing material
map,vision-based navigation
map,model-based process
...@@ -67,10 +67,10 @@ login_type = "Normal" ...@@ -67,10 +67,10 @@ login_type = "Normal"
[notifications] [notifications]
central-exchange = { bind = "tcp://*:15560", connect = "tcp://localhost:15560" } # We do not hardcode the bind and connect here, because the test infrastructure
dispatcher = { bind = "tcp://*:15561", connect = "tcp://localhost:15561" } # will randomize the connection endpoints via IPC.
# central-exchange = { bind = "ipc:///tmp/ce.ipc", connect = "ipc:///tmp/ce.ipc" } central-exchange = { bind = "", connect = "" }
# dispatcher = { bind = "ipc:///tmp/d.ipc", connect = "ipc:///tmp/d.ipc" } dispatcher = { bind = "", connect = "" }
[nlp] [nlp]
......
...@@ -211,7 +211,9 @@ checkNotification ctx@(SpecContext testEnv port _app _) act = do ...@@ -211,7 +211,9 @@ checkNotification ctx@(SpecContext testEnv port _app _) act = do
act authRes act authRes
waitForTChanValue tchan (Just $ DT.NUpdateTree treeId) 1_000 -- Wait /up to/ 5 seconds for the notification value. This makes running the tests
-- a bit less flaky on CI, which might be slower to process the incoming notifications.
waitForTChanValue tchan (Just $ DT.NUpdateTree treeId) 5_000
where where
log_cfg = (test_config testEnv) ^. gc_logging log_cfg = (test_config testEnv) ^. gc_logging
......
...@@ -4,23 +4,33 @@ module Test.API.Prelude ...@@ -4,23 +4,33 @@ module Test.API.Prelude
( newCorpusForUser ( newCorpusForUser
, newPrivateFolderForUser , newPrivateFolderForUser
, newPublicFolderForUser , newPublicFolderForUser
, newShareFolderForUser
, newFolderForUser , newFolderForUser
, addFolderForUser
, getRootPublicFolderIdForUser , getRootPublicFolderIdForUser
, getRootPrivateFolderIdForUser , getRootPrivateFolderIdForUser
, getRootShareFolderIdForUser
, newTeamWithOwner
, myUserNodeId , myUserNodeId
, checkEither , checkEither
, shouldFailWith , shouldFailWith
-- User fixtures
, alice
, bob
) where ) where
import Data.Aeson qualified as JSON import Data.Aeson qualified as JSON
import Data.Text qualified as T import Data.Text qualified as T
import Gargantext.API.Errors import Gargantext.API.Errors
import Gargantext.Core.Types.Individu import Gargantext.Core.Types.Individu
import Gargantext.Core.Types (NodeId, NodeType(..)) import Gargantext.Core.Types (NodeId, NodeType(..), ParentId)
import Gargantext.Core.Worker.Env () -- instance HasNodeError import Gargantext.Core.Worker.Env () -- instance HasNodeError
import Gargantext.Database.Action.User import Gargantext.Database.Action.User
import Gargantext.Database.Admin.Types.Hyperdata.Corpus import Gargantext.Database.Admin.Types.Hyperdata.Corpus
import Gargantext.Database.Query.Table.Node (insertNode, mk, getUserRootPublicNode, getUserRootPrivateNode) import Gargantext.Database.Prelude (DBCmd)
import Gargantext.Database.Query.Table.Node (insertNode, mk, getUserRootPublicNode, getUserRootPrivateNode, getUserRootShareNode)
import Gargantext.Database.Query.Table.Node.Error (HasNodeError)
import Gargantext.Database.Query.Table.Node.User (getUserByName) import Gargantext.Database.Query.Table.Node.User (getUserByName)
import Gargantext.Database.Query.Tree.Root import Gargantext.Database.Query.Tree.Root
import Gargantext.Database.Schema.Node (_node_id) import Gargantext.Database.Schema.Node (_node_id)
...@@ -41,27 +51,46 @@ newCorpusForUser env uname = flip runReaderT env $ runTestMonad $ do ...@@ -41,27 +51,46 @@ newCorpusForUser env uname = flip runReaderT env $ runTestMonad $ do
(corpusId:_) <- mk (Just corpusName) (Nothing :: Maybe HyperdataCorpus) parentId uid (corpusId:_) <- mk (Just corpusName) (Nothing :: Maybe HyperdataCorpus) parentId uid
pure corpusId pure corpusId
newFolderForUser :: TestEnv -> T.Text -> T.Text -> IO NodeId -- | Creates a new folder for the input user, nested under the given 'ParentId', if given.
newFolderForUser env uname folderName = flip runReaderT env $ runTestMonad $ do newFolderForUser' :: HasNodeError err
uid <- getUserId (UserName uname) => User
parentId <- getRootId (UserName uname) -> T.Text
-> ParentId
-> DBCmd err NodeId
newFolderForUser' ur folderName parentId = do
uid <- getUserId ur
insertNode NodeFolder (Just folderName) Nothing parentId uid insertNode NodeFolder (Just folderName) Nothing parentId uid
addFolderForUser :: TestEnv
-> User
-> T.Text
-> ParentId
-> IO NodeId
addFolderForUser env ur folderName parentId = flip runReaderT env $ runTestMonad $ do
newFolderForUser' ur folderName parentId
newFolderForUser :: TestEnv -> User -> T.Text -> IO NodeId
newFolderForUser env uname folderName = flip runReaderT env $ runTestMonad $ do
parentId <- getRootId uname
newFolderForUser' uname folderName parentId
-- | Generate a 'Node' where we can append more data into, a bit reminiscent to the -- | Generate a 'Node' where we can append more data into, a bit reminiscent to the
-- \"Private\" root node we use in the real Gargantext. -- \"Private\" root node we use in the real Gargantext.
newPrivateFolderForUser :: TestEnv -> T.Text -> IO NodeId newPrivateFolderForUser :: TestEnv -> User -> IO NodeId
newPrivateFolderForUser env uname = flip runReaderT env $ runTestMonad $ do newPrivateFolderForUser env ur = newFolder env ur NodeFolderPrivate
uid <- getUserId (UserName uname)
parentId <- getRootId (UserName uname)
let nodeName = "NodeFolderPrivate"
insertNode NodeFolderPrivate (Just nodeName) Nothing parentId uid
newPublicFolderForUser :: TestEnv -> T.Text -> IO NodeId newPublicFolderForUser :: TestEnv -> User -> IO NodeId
newPublicFolderForUser env uname = flip runReaderT env $ runTestMonad $ do newPublicFolderForUser env ur = newFolder env ur NodeFolderPublic
uid <- getUserId (UserName uname)
parentId <- getRootId (UserName uname) newShareFolderForUser :: TestEnv -> User -> IO NodeId
let nodeName = "NodeFolderPublic" newShareFolderForUser env ur = newFolder env ur NodeFolderShared
insertNode NodeFolderPublic (Just nodeName) Nothing parentId uid
newFolder :: TestEnv -> User -> NodeType -> IO NodeId
newFolder env ur nt = flip runReaderT env $ runTestMonad $ do
let nodeName = show nt
uid <- getUserId ur
parentId <- getRootId ur
insertNode nt (Just nodeName) Nothing parentId uid
getRootPublicFolderIdForUser :: TestEnv -> User -> IO NodeId getRootPublicFolderIdForUser :: TestEnv -> User -> IO NodeId
getRootPublicFolderIdForUser env uname = flip runReaderT env $ runTestMonad $ do getRootPublicFolderIdForUser env uname = flip runReaderT env $ runTestMonad $ do
...@@ -71,6 +100,16 @@ getRootPrivateFolderIdForUser :: TestEnv -> User -> IO NodeId ...@@ -71,6 +100,16 @@ getRootPrivateFolderIdForUser :: TestEnv -> User -> IO NodeId
getRootPrivateFolderIdForUser env uname = flip runReaderT env $ runTestMonad $ do getRootPrivateFolderIdForUser env uname = flip runReaderT env $ runTestMonad $ do
_node_id <$> (getUserId uname >>= getUserRootPrivateNode) _node_id <$> (getUserId uname >>= getUserRootPrivateNode)
getRootShareFolderIdForUser :: TestEnv -> User -> IO NodeId
getRootShareFolderIdForUser env uname = flip runReaderT env $ runTestMonad $ do
_node_id <$> (getUserId uname >>= getUserRootShareNode)
newTeamWithOwner :: TestEnv -> User -> T.Text -> IO NodeId
newTeamWithOwner env uname teamName = flip runReaderT env $ runTestMonad $ do
uid <- getUserId uname
parentId <- liftIO $ getRootShareFolderIdForUser env uname
insertNode NodeTeam (Just teamName) Nothing parentId uid
myUserNodeId :: TestEnv -> T.Text -> IO NodeId myUserNodeId :: TestEnv -> T.Text -> IO NodeId
myUserNodeId env uname = flip runReaderT env $ runTestMonad $ do myUserNodeId env uname = flip runReaderT env $ runTestMonad $ do
_node_id <$> getUserByName uname _node_id <$> getUserByName uname
...@@ -84,3 +123,9 @@ action `shouldFailWith` backendError = case action of ...@@ -84,3 +123,9 @@ action `shouldFailWith` backendError = case action of
| otherwise | otherwise
-> fail $ "FailureResponse didn't have FrontendError: " <> show fr -> fail $ "FailureResponse didn't have FrontendError: " <> show fr
_xs -> fail $ "Unexpected ClientError: " <> show _xs _xs -> fail $ "Unexpected ClientError: " <> show _xs
alice :: User
alice = UserName "alice"
bob :: User
bob = UserName "bob"
...@@ -23,6 +23,7 @@ import Test.API.Private.Move qualified as Move ...@@ -23,6 +23,7 @@ import Test.API.Private.Move qualified as Move
import Test.API.Private.Remote qualified as Remote import Test.API.Private.Remote qualified as Remote
import Test.API.Private.Share qualified as Share import Test.API.Private.Share qualified as Share
import Test.API.Private.Table qualified as Table import Test.API.Private.Table qualified as Table
import Test.API.Private.List qualified as List
import Test.API.Routes (mkUrl, get_node, get_tree) import Test.API.Routes (mkUrl, get_node, get_tree)
import Test.API.Setup (withTestDBAndPort, dbEnvSetup, SpecContext (..)) import Test.API.Setup (withTestDBAndPort, dbEnvSetup, SpecContext (..))
import Test.Hspec import Test.Hspec
...@@ -114,3 +115,5 @@ tests = sequential $ do ...@@ -114,3 +115,5 @@ tests = sequential $ do
Move.tests Move.tests
describe "Remote API" $ do describe "Remote API" $ do
Remote.tests Remote.tests
describe "List API" $ do
List.tests
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeFamilies #-}
module Test.API.Private.List (
tests
) where
import Data.Aeson.QQ
import Data.Text.IO qualified as TIO
import Gargantext.API.Ngrams.List.Types
import Gargantext.API.Ngrams.Types qualified as APINgrams
import Gargantext.API.Node.Corpus.New.Types qualified as FType
import Gargantext.Core.Config
import Gargantext.Core.Types
import Gargantext.Core.Types.Individu
import Gargantext.Prelude
import Paths_gargantext
import Servant.Client.Streaming
import Test.API.Prelude (newCorpusForUser, checkEither)
import Test.API.Routes
import Test.API.Setup
import Test.Database.Types
import Test.Hspec (Spec, it, aroundAll, describe, sequential)
import Test.Hspec.Expectations
import Test.Hspec.Wai.Internal (withApplication)
import Test.Utils
import Fmt
tests :: Spec
tests = sequential $ aroundAll withTestDBAndPort $ do
describe "Prelude" $ do
it "setup DB triggers" $ \SpecContext{..} -> do
setupEnvironment _sctx_env
-- Let's create the Alice user.
void $ createAliceAndBob _sctx_env
describe "Importing terms as TSV" $ do
it "should work for TSV with a missing 'forms' column" $ \(SpecContext testEnv port app _) -> do
cId <- newCorpusForUser testEnv "alice"
let log_cfg = test_config testEnv ^. gc_logging
withApplication app $ do
withValidLogin port "alice" (GargPassword "alice") $ \clientEnv token -> do
([listId] :: [NodeId]) <- protectedJSON token "POST" (mkUrl port ("/node/" <> build cId)) [aesonQQ|{"pn_typename":"NodeList","pn_name":"Testing"}|]
-- Upload the CSV doc
simpleNgrams <- liftIO (TIO.readFile =<< getDataFileName "test-data/issue-381/Termes_A_Ajouter_T4SC_Intellixir.tsv")
let params = WithTextFile { _wtf_filetype = FType.TSV
, _wtf_data = simpleNgrams
, _wtf_name = "simple.tsv" }
pendingJob <- checkEither $ liftIO $ runClientM (add_tsv_to_list token listId params) clientEnv
_ <- pollUntilWorkFinished log_cfg port pendingJob
-- Now check that we can retrieve the ngrams, and the ngrams list is not empty!
liftIO $ do
eRes <- checkEither $ runClientM (get_table_ngrams token cId APINgrams.Terms listId 50 Nothing (Just MapTerm) Nothing Nothing Nothing Nothing) clientEnv
let (APINgrams.NgramsTable terms) = APINgrams._vc_data eRes
length terms `shouldSatisfy` (>= 1)
...@@ -9,6 +9,7 @@ module Test.API.Private.Move ( ...@@ -9,6 +9,7 @@ module Test.API.Private.Move (
import Gargantext.API.Errors import Gargantext.API.Errors
import Gargantext.Core.Types import Gargantext.Core.Types
import Gargantext.Core.Types.Individu import Gargantext.Core.Types.Individu
import Gargantext.API.Node.Share.Types
import Gargantext.Database.Query.Table.NodeNode (SourceId(..), TargetId(..)) import Gargantext.Database.Query.Table.NodeNode (SourceId(..), TargetId(..))
import Gargantext.Prelude import Gargantext.Prelude
import Servant.Client.Streaming import Servant.Client.Streaming
...@@ -29,6 +30,53 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -29,6 +30,53 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
-- Let's create the Alice user. -- Let's create the Alice user.
void $ createAliceAndBob _sctx_env void $ createAliceAndBob _sctx_env
describe "Moving a node" $ do
describe "private to private moves" $ do
it "should allow moving one folder into another" $ \(SpecContext testEnv serverPort app _) -> do
withApplication app $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> liftIO $ do
aliceRoot <- getRootPrivateFolderIdForUser testEnv alice
child1 <- addFolderForUser testEnv alice "child1" aliceRoot
child2 <- addFolderForUser testEnv alice "child2" aliceRoot
-- Test that moving child1 into child2 works.
res <- checkEither $ runClientM (move_node token (SourceId child2) (TargetId child1)) clientEnv
res `shouldBe` [child2]
describe "share to share moves" $ do
it "should allow moving one folder into another (as team owner)" $ \(SpecContext testEnv serverPort app _) -> do
withApplication app $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> liftIO $ do
-- Let's create (once) the shared team folder
void $ newShareFolderForUser testEnv alice
teamNode <- newTeamWithOwner testEnv alice "Alice's Team"
child1 <- addFolderForUser testEnv alice "child1" teamNode
child2 <- addFolderForUser testEnv alice "child2" teamNode
-- Test that moving child1 into child2 works.
res <- checkEither $ runClientM (move_node token (SourceId child2) (TargetId child1)) clientEnv
res `shouldBe` [child2]
it "should allow moving one folder into another (as team member)" $ \(SpecContext testEnv serverPort app _) -> do
withApplication app $ do
(teamNode, child1, child2) <- liftIO $ do
-- Let's create (once) the shared team folder
void $ newShareFolderForUser testEnv alice
teamNode <- newTeamWithOwner testEnv alice "Alice's Team"
child1 <- addFolderForUser testEnv alice "child1" teamNode
child2 <- addFolderForUser testEnv alice "child2" teamNode
pure (teamNode, child1, child2)
-- let's make bob a team member.
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> liftIO $ do
let params = ShareTeamParams "bob"
res <- checkEither $ runClientM (addTeamMember token teamNode params) clientEnv
res `shouldBe` UnsafeMkNodeId 1
withValidLogin serverPort "bob" (GargPassword "bob") $ \clientEnv token -> liftIO $ do
-- Test that moving child1 into child2 works.
res <- checkEither $ runClientM (move_node token (SourceId child2) (TargetId child1)) clientEnv
res `shouldBe` [child2]
describe "Publishing a Corpus" $ do describe "Publishing a Corpus" $ do
it "should forbid moving a corpus node into another user Public folder" $ \(SpecContext testEnv serverPort app _) -> do it "should forbid moving a corpus node into another user Public folder" $ \(SpecContext testEnv serverPort app _) -> do
...@@ -36,7 +84,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -36,7 +84,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do liftIO $ do
cId <- newCorpusForUser testEnv "alice" cId <- newCorpusForUser testEnv "alice"
bobPublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "bob") bobPublicFolderId <- getRootPublicFolderIdForUser testEnv bob
res <- runClientM (move_node token (SourceId cId) (TargetId bobPublicFolderId)) clientEnv res <- runClientM (move_node token (SourceId cId) (TargetId bobPublicFolderId)) clientEnv
res `shouldFailWith` EC_403__policy_check_error res `shouldFailWith` EC_403__policy_check_error
...@@ -45,7 +93,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -45,7 +93,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
nodes <- liftIO $ do nodes <- liftIO $ do
cId <- newCorpusForUser testEnv "alice" cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice") alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
liftIO $ length nodes `shouldBe` 1 liftIO $ length nodes `shouldBe` 1
...@@ -54,8 +102,8 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -54,8 +102,8 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
nodes <- liftIO $ do nodes <- liftIO $ do
cId <- newCorpusForUser testEnv "alice" cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice") alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
alicePrivateFolderId <- getRootPrivateFolderIdForUser testEnv (UserName "alice") alicePrivateFolderId <- getRootPrivateFolderIdForUser testEnv alice
_ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv _ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePrivateFolderId)) clientEnv checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePrivateFolderId)) clientEnv
length nodes `shouldBe` 1 length nodes `shouldBe` 1
...@@ -65,7 +113,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -65,7 +113,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
aliceCorpusId <- withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do aliceCorpusId <- withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do liftIO $ do
cId <- newCorpusForUser testEnv "alice" cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice") alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
_ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv _ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
-- Check that we can see the folder -- Check that we can see the folder
...@@ -85,7 +133,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -85,7 +133,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
aliceCorpusId <- withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do aliceCorpusId <- withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do liftIO $ do
cId <- newCorpusForUser testEnv "alice" cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice") alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
_ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv _ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
-- Check that we can see the folder -- Check that we can see the folder
...@@ -103,7 +151,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -103,7 +151,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
-- Now alice moves the node back to her private folder, effectively unpublishing it. -- Now alice moves the node back to her private folder, effectively unpublishing it.
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do liftIO $ do
alicePrivateFolderId <- getRootPrivateFolderIdForUser testEnv (UserName "alice") alicePrivateFolderId <- getRootPrivateFolderIdForUser testEnv alice
void $ checkEither $ runClientM (move_node token (SourceId aliceCorpusId) (TargetId alicePrivateFolderId)) clientEnv void $ checkEither $ runClientM (move_node token (SourceId aliceCorpusId) (TargetId alicePrivateFolderId)) clientEnv
withValidLogin serverPort "bob" (GargPassword "bob") $ \clientEnv token -> do withValidLogin serverPort "bob" (GargPassword "bob") $ \clientEnv token -> do
...@@ -118,7 +166,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -118,7 +166,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do liftIO $ do
cId <- newCorpusForUser testEnv "alice" cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice") alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
_ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv _ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
-- Trying to delete a strictly published node should fail -- Trying to delete a strictly published node should fail
...@@ -129,9 +177,9 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -129,9 +177,9 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withApplication app $ do withApplication app $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do liftIO $ do
fId <- newFolderForUser testEnv "alice" "my-test-folder" fId <- newFolderForUser testEnv alice "my-test-folder"
fId'' <- newPrivateFolderForUser testEnv "alice" fId'' <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice") alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
res <- runClientM (move_node token (SourceId fId) (TargetId alicePublicFolderId)) clientEnv res <- runClientM (move_node token (SourceId fId) (TargetId alicePublicFolderId)) clientEnv
res `shouldFailWith` EC_403__node_move_error res `shouldFailWith` EC_403__node_move_error
......
...@@ -7,11 +7,11 @@ module Test.API.Private.Remote ( ...@@ -7,11 +7,11 @@ module Test.API.Private.Remote (
) where ) where
import Control.Lens import Control.Lens
import Gargantext.API.Admin.EnvTypes (Mode(..))
import Gargantext.API.Errors import Gargantext.API.Errors
import Gargantext.API (makeApp) import Gargantext.API (makeApp)
import Gargantext.API.Routes.Client (remoteExportClient) import Gargantext.API.Routes.Client (remoteExportClient)
import Gargantext.API.Routes.Named.Remote (RemoteExportRequest(..)) import Gargantext.API.Routes.Named.Remote (RemoteExportRequest(..))
import Gargantext.Core.Config
import Gargantext.Core.Types.Individu import Gargantext.Core.Types.Individu
import Gargantext.Core.Types (NodeId(UnsafeMkNodeId)) import Gargantext.Core.Types (NodeId(UnsafeMkNodeId))
import Gargantext.Prelude import Gargantext.Prelude
...@@ -32,10 +32,10 @@ withTwoServerInstances :: (SpecContext (TestEnv,Wai.Application,Warp.Port) -> IO ...@@ -32,10 +32,10 @@ withTwoServerInstances :: (SpecContext (TestEnv,Wai.Application,Warp.Port) -> IO
withTwoServerInstances action = withTwoServerInstances action =
withTestDB $ \testEnv1 -> do withTestDB $ \testEnv1 -> do
withTestDB $ \testEnv2 -> do withTestDB $ \testEnv2 -> do
garg1App <- withLoggerIO Mock $ \ioLogger -> do garg1App <- withLoggerIO (log_cfg testEnv1) $ \ioLogger -> do
env <- newTestEnv testEnv1 ioLogger server1Port env <- newTestEnv testEnv1 ioLogger server1Port
makeApp env makeApp env
garg2App <- withLoggerIO Mock $ \ioLogger -> do garg2App <- withLoggerIO (log_cfg testEnv2) $ \ioLogger -> do
env <- newTestEnv testEnv2 ioLogger server2Port env <- newTestEnv testEnv2 ioLogger server2Port
makeApp env makeApp env
...@@ -45,6 +45,7 @@ withTwoServerInstances action = ...@@ -45,6 +45,7 @@ withTwoServerInstances action =
where where
server1Port = 8008 server1Port = 8008
server2Port = 9008 server2Port = 9008
log_cfg te = test_config te ^. gc_logging
tests :: Spec tests :: Spec
tests = sequential $ aroundAll withTwoServerInstances $ do tests = sequential $ aroundAll withTwoServerInstances $ do
...@@ -84,7 +85,7 @@ tests = sequential $ aroundAll withTwoServerInstances $ do ...@@ -84,7 +85,7 @@ tests = sequential $ aroundAll withTwoServerInstances $ do
it "forbids transferring certain node types" $ \(SpecContext testEnv1 server1Port app1 (_testEnv2, _app2, server2Port)) -> do it "forbids transferring certain node types" $ \(SpecContext testEnv1 server1Port app1 (_testEnv2, _app2, server2Port)) -> do
withApplication app1 $ do withApplication app1 $ do
withValidLogin server1Port "alice" (GargPassword "alice") $ \aliceClientEnv aliceToken -> do withValidLogin server1Port "alice" (GargPassword "alice") $ \aliceClientEnv aliceToken -> do
folderId <- liftIO $ newPrivateFolderForUser testEnv1 "alice" folderId <- liftIO $ newPrivateFolderForUser testEnv1 alice
withValidLogin server2Port "bob" (GargPassword "bob") $ \_bobClientEnv bobToken -> do withValidLogin server2Port "bob" (GargPassword "bob") $ \_bobClientEnv bobToken -> do
liftIO $ do liftIO $ do
let rq = RemoteExportRequest { _rer_instance_url = fromMaybe (panicTrace "impossible") $ parseBaseUrl "http://localhost:9008" let rq = RemoteExportRequest { _rer_instance_url = fromMaybe (panicTrace "impossible") $ parseBaseUrl "http://localhost:9008"
......
...@@ -29,6 +29,7 @@ module Test.API.Routes ( ...@@ -29,6 +29,7 @@ module Test.API.Routes (
, add_form_to_list , add_form_to_list
, add_tsv_to_list , add_tsv_to_list
, get_corpus_sqlite_export , get_corpus_sqlite_export
, addTeamMember
) where ) where
import Data.Text.Encoding qualified as TE import Data.Text.Encoding qualified as TE
...@@ -39,6 +40,7 @@ import Gargantext.API.HashedResponse (HashedResponse) ...@@ -39,6 +40,7 @@ import Gargantext.API.HashedResponse (HashedResponse)
import Gargantext.API.Ngrams.List.Types (WithJsonFile, WithTextFile) import Gargantext.API.Ngrams.List.Types (WithJsonFile, WithTextFile)
import Gargantext.API.Ngrams.Types ( NgramsTable, NgramsTablePatch, OrderBy, TabType, Versioned, VersionedWithCount ) import Gargantext.API.Ngrams.Types ( NgramsTable, NgramsTablePatch, OrderBy, TabType, Versioned, VersionedWithCount )
import Gargantext.API.Node.Corpus.Export.Types (CorpusSQLite) import Gargantext.API.Node.Corpus.Export.Types (CorpusSQLite)
import Gargantext.API.Node.Share.Types (ShareNodeParams(..))
import Gargantext.API.Routes.Client import Gargantext.API.Routes.Client
import Gargantext.API.Routes.Named import Gargantext.API.Routes.Named
import Gargantext.API.Routes.Named.Corpus (CorpusExportAPI(corpusSQLiteEp)) import Gargantext.API.Routes.Named.Corpus (CorpusExportAPI(corpusSQLiteEp))
...@@ -46,6 +48,7 @@ import Gargantext.API.Routes.Named.List (updateListJSONEp, updateListTSVEp) ...@@ -46,6 +48,7 @@ import Gargantext.API.Routes.Named.List (updateListJSONEp, updateListTSVEp)
import Gargantext.API.Routes.Named.Node hiding (treeAPI) import Gargantext.API.Routes.Named.Node hiding (treeAPI)
import Gargantext.API.Routes.Named.Private hiding (tableNgramsAPI) import Gargantext.API.Routes.Named.Private hiding (tableNgramsAPI)
import Gargantext.API.Routes.Named.Publish (PublishAPI(..), PublishRequest(..)) import Gargantext.API.Routes.Named.Publish (PublishAPI(..), PublishRequest(..))
import Gargantext.API.Routes.Named.Share (shareNodeEp)
import Gargantext.API.Routes.Named.Table import Gargantext.API.Routes.Named.Table
import Gargantext.API.Routes.Named.Tree (nodeTreeEp) import Gargantext.API.Routes.Named.Tree (nodeTreeEp)
import Gargantext.API.Types () -- MimeUnrender instances import Gargantext.API.Types () -- MimeUnrender instances
...@@ -359,3 +362,22 @@ get_corpus_sqlite_export (toServantToken -> token) cId = ...@@ -359,3 +362,22 @@ get_corpus_sqlite_export (toServantToken -> token) cId =
& ($ cId) & ($ cId)
& corpusSQLiteEp & corpusSQLiteEp
& ($ Nothing) -- Maybe ListId & ($ Nothing) -- Maybe ListId
addTeamMember :: Token -> NodeId -> ShareNodeParams -> ClientM NodeId
addTeamMember (toServantToken -> token) nodeId params = fmap UnsafeMkNodeId $
clientRoutes & apiWithCustomErrorScheme
& ($ GES_new)
& backendAPI
& backendAPI'
& mkBackEndAPI
& gargAPIVersion
& gargPrivateAPI
& mkPrivateAPI
& ($ token)
& nodeEp
& nodeEndpointAPI
& ($ nodeId)
& shareAPI
& shareNodeEp
& ($ params)
{-# LANGUAGE BangPatterns #-} {-# LANGUAGE BangPatterns #-}
{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ScopedTypeVariables #-}
module Test.API.Setup ( module Test.API.Setup (
...@@ -20,14 +20,12 @@ import Control.Monad.Reader ...@@ -20,14 +20,12 @@ import Control.Monad.Reader
import Data.ByteString.Lazy.Char8 qualified as C8L import Data.ByteString.Lazy.Char8 qualified as C8L
import Data.Cache qualified as InMemory import Data.Cache qualified as InMemory
import Data.Streaming.Network (bindPortTCP) import Data.Streaming.Network (bindPortTCP)
import Gargantext.API.Admin.EnvTypes (Mode(Mock), Env (..), env_dispatcher)
import Gargantext.API.Errors.Types
import Gargantext.API (makeApp) import Gargantext.API (makeApp)
import Gargantext.API.Admin.EnvTypes (Env (..), env_dispatcher)
import Gargantext.API.Errors.Types
import Gargantext.API.Prelude import Gargantext.API.Prelude
import Gargantext.Core.Config (gc_logging) import Gargantext.Core.Config hiding (jwtSettings)
import Gargantext.Core.Config (gc_notifications_config) import Gargantext.Core.Config.Types (fc_appPort, jwtSettings)
import Gargantext.Core.Config (_gc_secrets, gc_frontend_config)
import Gargantext.Core.Config.Types (NotificationsConfig(..), fc_appPort, jwtSettings)
import Gargantext.Core.Notifications (withNotifications) import Gargantext.Core.Notifications (withNotifications)
import Gargantext.Core.Types.Individu import Gargantext.Core.Types.Individu
import Gargantext.Database.Action.Flow import Gargantext.Database.Action.Flow
...@@ -45,10 +43,9 @@ import Gargantext.System.Logging ...@@ -45,10 +43,9 @@ import Gargantext.System.Logging
import Network.HTTP.Client.TLS (newTlsManager) import Network.HTTP.Client.TLS (newTlsManager)
import Network.HTTP.Types import Network.HTTP.Types
import Network.Wai (Application, responseLBS) import Network.Wai (Application, responseLBS)
import Network.Wai.Handler.Warp.Internal
import Network.Wai.Handler.Warp qualified as Warp
import Network.Wai.Handler.Warp (runSettingsSocket)
import Network.Wai qualified as Wai import Network.Wai qualified as Wai
import Network.Wai.Handler.Warp qualified as Warp
import Network.Wai.Handler.Warp.Internal
import Network.WebSockets qualified as WS import Network.WebSockets qualified as WS
import Prelude hiding (show) import Prelude hiding (show)
import Servant.Auth.Client () import Servant.Auth.Client ()
...@@ -99,20 +96,16 @@ newTestEnv testEnv logger port = do ...@@ -99,20 +96,16 @@ newTestEnv testEnv logger port = do
, _env_jwt_settings , _env_jwt_settings
} }
nc :: NotificationsConfig
nc = NotificationsConfig { _nc_central_exchange_bind = "tcp://*:15560"
, _nc_central_exchange_connect = "tcp://localhost:15560"
, _nc_dispatcher_bind = "tcp://*:15561"
, _nc_dispatcher_connect = "tcp://localhost:15561" }
-- | Run the gargantext server on a random port, picked by Warp, which allows -- | Run the gargantext server on a random port, picked by Warp, which allows
-- for concurrent tests to be executed in parallel, if we need to. -- for concurrent tests to be executed in parallel, if we need to.
-- NOTE: We don't need to change the 'NotificationConfig' at this point, because
-- the 'TestEnv' will already contain the correct values for us to use.
-- (cfg 'setup' in 'Test.Database.Setup').
withTestDBAndPort :: (SpecContext () -> IO ()) -> IO () withTestDBAndPort :: (SpecContext () -> IO ()) -> IO ()
withTestDBAndPort action = withTestDB $ \testEnv -> do withTestDBAndPort action = withTestDB $ \testEnv -> do
withNotifications (cfg testEnv) $ \dispatcher -> do let cfg = test_config testEnv
withLoggerIO Mock $ \ioLogger -> do withNotifications cfg $ \dispatcher -> do
withLoggerIO (log_cfg cfg) $ \ioLogger -> do
env <- newTestEnv testEnv ioLogger 8080 env <- newTestEnv testEnv ioLogger 8080
<&> env_dispatcher .~ dispatcher <&> env_dispatcher .~ dispatcher
app <- makeApp env app <- makeApp env
...@@ -126,32 +119,31 @@ withTestDBAndPort action = withTestDB $ \testEnv -> do ...@@ -126,32 +119,31 @@ withTestDBAndPort action = withTestDB $ \testEnv -> do
[ Handler $ \(err :: WS.ConnectionException) -> [ Handler $ \(err :: WS.ConnectionException) ->
case err of case err of
WS.CloseRequest _ _ -> WS.CloseRequest _ _ ->
withLogger (log_cfg testEnv) $ \ioLogger' -> withLogger (log_cfg cfg) $ \ioLogger' ->
logTxt ioLogger' DEBUG "[withTestDBAndPort] CloseRequest caught" logTxt ioLogger' DEBUG "[withTestDBAndPort] CloseRequest caught"
WS.ConnectionClosed -> WS.ConnectionClosed ->
withLogger (log_cfg testEnv) $ \ioLogger' -> withLogger (log_cfg cfg) $ \ioLogger' ->
logTxt ioLogger' DEBUG "[withTestDBAndPort] ConnectionClosed caught" logTxt ioLogger' DEBUG "[withTestDBAndPort] ConnectionClosed caught"
_ -> do _ -> do
withLogger (log_cfg testEnv) $ \ioLogger' -> withLogger (log_cfg cfg) $ \ioLogger' ->
logTxt ioLogger' ERROR $ "[withTestDBAndPort] unknown exception: " <> show err logTxt ioLogger' ERROR $ "[withTestDBAndPort] unknown exception: " <> show err
throw err throw err
-- re-throw any other exceptions -- re-throw any other exceptions
, Handler $ \(err :: SomeException) -> throw err ] , Handler $ \(err :: SomeException) -> throw err ]
where where
cfg te = (test_config te) & gc_notifications_config .~ nc log_cfg cfg = cfg ^. gc_logging
log_cfg te = (cfg te) ^. gc_logging
-- | Starts the backend server /and/ the microservices proxy, the former at -- | Starts the backend server /and/ the microservices proxy, the former at
-- a random port, the latter at a predictable port. -- a random port, the latter at a predictable port.
withBackendServerAndProxy :: (((TestEnv, Warp.Port, Warp.Port)) -> IO ()) -> IO () withBackendServerAndProxy :: (((TestEnv, Warp.Port, Warp.Port)) -> IO ()) -> IO ()
withBackendServerAndProxy action = withBackendServerAndProxy action =
withTestDB $ \testEnv -> do withTestDB $ \testEnv -> do
gargApp <- withLoggerIO Mock $ \ioLogger -> do gargApp <- withLoggerIO (log_cfg testEnv) $ \ioLogger -> do
env <- newTestEnv testEnv ioLogger 8080 env <- newTestEnv testEnv ioLogger 8080
makeApp env makeApp env
proxyCache <- InMemory.newCache Nothing proxyCache <- InMemory.newCache Nothing
proxyApp <- withLoggerIO Mock $ \ioLogger -> do proxyApp <- withLoggerIO (log_cfg testEnv) $ \ioLogger -> do
env <- newTestEnv testEnv ioLogger 8080 env <- newTestEnv testEnv ioLogger 8080
pure $ microServicesProxyApp proxyCache env pure $ microServicesProxyApp proxyCache env
...@@ -160,6 +152,8 @@ withBackendServerAndProxy action = ...@@ -160,6 +152,8 @@ withBackendServerAndProxy action =
action (testEnv, serverPort, proxyPort) action (testEnv, serverPort, proxyPort)
where where
proxyPort = 8090 proxyPort = 8090
cfg te = test_config te
log_cfg te = cfg te ^. gc_logging
setupEnvironment :: TestEnv -> IO () setupEnvironment :: TestEnv -> IO ()
setupEnvironment env = flip runReaderT env $ runTestMonad $ do setupEnvironment env = flip runReaderT env $ runTestMonad $ do
...@@ -214,7 +208,7 @@ testWithApplicationOnPort mkApp userPort action = do ...@@ -214,7 +208,7 @@ testWithApplicationOnPort mkApp userPort action = do
sock <- bindPortTCP userPort "127.0.0.1" sock <- bindPortTCP userPort "127.0.0.1"
result <- result <-
Async.race Async.race
(runSettingsSocket appSettings sock app) (Warp.runSettingsSocket appSettings sock app)
(waitFor started >> action) (waitFor started >> action)
case result of case result of
Left () -> UnliftIO.throwString "Unexpected: runSettingsSocket exited" Left () -> UnliftIO.throwString "Unexpected: runSettingsSocket exited"
......
...@@ -67,7 +67,7 @@ import Paths_gargantext (getDataFileName) ...@@ -67,7 +67,7 @@ import Paths_gargantext (getDataFileName)
import qualified Prelude import qualified Prelude
import Servant.Client.Streaming import Servant.Client.Streaming
import System.FilePath import System.FilePath
import Test.API.Prelude (checkEither, newCorpusForUser, newPrivateFolderForUser) import Test.API.Prelude (checkEither, newCorpusForUser, newPrivateFolderForUser, alice)
import Test.API.Routes (mkUrl, gqlUrl, get_table_ngrams, put_table_ngrams, toServantToken, clientRoutes, get_table, update_node, add_form_to_list, add_tsv_to_list) import Test.API.Routes (mkUrl, gqlUrl, get_table_ngrams, put_table_ngrams, toServantToken, clientRoutes, get_table, update_node, add_form_to_list, add_tsv_to_list)
import Test.API.Setup (withTestDBAndPort, dbEnvSetup, SpecContext (..)) import Test.API.Setup (withTestDBAndPort, dbEnvSetup, SpecContext (..))
import Test.Database.Types import Test.Database.Types
...@@ -350,7 +350,7 @@ createDocsList :: FilePath ...@@ -350,7 +350,7 @@ createDocsList :: FilePath
-> Token -> Token
-> WaiSession () CorpusId -> WaiSession () CorpusId
createDocsList testDataPath testEnv port clientEnv token = do createDocsList testDataPath testEnv port clientEnv token = do
folderId <- liftIO $ newPrivateFolderForUser testEnv "alice" folderId <- liftIO $ newPrivateFolderForUser testEnv alice
([corpusId] :: [NodeId]) <- protectedJSON token "POST" (mkUrl port ("/node/" <> build folderId)) [aesonQQ|{"pn_typename":"NodeCorpus","pn_name":"TestCorpus"}|] ([corpusId] :: [NodeId]) <- protectedJSON token "POST" (mkUrl port ("/node/" <> build folderId)) [aesonQQ|{"pn_typename":"NodeCorpus","pn_name":"TestCorpus"}|]
-- Import the docsList with only two documents, both containing a \"fortran\" term. -- Import the docsList with only two documents, both containing a \"fortran\" term.
simpleDocs <- liftIO (TIO.readFile =<< getDataFileName testDataPath) simpleDocs <- liftIO (TIO.readFile =<< getDataFileName testDataPath)
......
...@@ -12,18 +12,20 @@ Portability : POSIX ...@@ -12,18 +12,20 @@ Portability : POSIX
{-# OPTIONS_GHC -Wno-orphans #-} {-# OPTIONS_GHC -Wno-orphans #-}
{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
module Test.API.Worker ( module Test.API.Worker (
tests tests
) where ) where
import Control.Concurrent (threadDelay) import Control.Concurrent.Async (withAsync, forConcurrently_)
import Control.Concurrent.Async (withAsync)
import Control.Concurrent.STM.TChan import Control.Concurrent.STM.TChan
import Control.Lens
import Control.Monad.STM (atomically) import Control.Monad.STM (atomically)
import Data.Aeson qualified as Aeson import Data.Aeson qualified as Aeson
import Data.Maybe (isJust) import Data.Maybe (isJust)
import Gargantext.Core.Config
import Gargantext.Core.Notifications.Dispatcher.Types qualified as DT import Gargantext.Core.Notifications.Dispatcher.Types qualified as DT
import Gargantext.Core.Worker.Jobs (sendJobWithCfg) import Gargantext.Core.Worker.Jobs (sendJobWithCfg)
import Gargantext.Core.Worker.Jobs.Types (Job(Ping)) import Gargantext.Core.Worker.Jobs.Types (Job(Ping))
...@@ -35,6 +37,10 @@ import Test.Database.Types (test_config) ...@@ -35,6 +37,10 @@ import Test.Database.Types (test_config)
import Test.Hspec import Test.Hspec
import Test.Instances () import Test.Instances ()
import Test.Utils.Notifications import Test.Utils.Notifications
import Gargantext.System.Logging
import qualified Data.Text.Encoding as TE
import qualified Data.ByteString as BL
import Test.Tasty.HUnit (assertBool)
...@@ -43,29 +49,52 @@ tests = sequential $ aroundAll withTestDBAndPort $ do ...@@ -43,29 +49,52 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
describe "Worker" $ do describe "Worker" $ do
it "simple Ping job works" $ \(SpecContext testEnv port _app _) -> do it "simple Ping job works" $ \(SpecContext testEnv port _app _) -> do
let cfg = test_config testEnv let cfg = test_config testEnv
let log_cfg = (test_config testEnv) ^. gc_logging
let topic = DT.Ping let topic = DT.Ping
tchan <- newTChanIO :: IO (TChan (Maybe DT.Notification)) tchan <- newTChanIO :: IO (TChan (Maybe DT.Notification))
-- setup a websocket connection
let wsConnect =
withWSConnection ("127.0.0.1", port) $ \conn -> do
WS.sendTextData conn $ Aeson.encode (DT.WSSubscribe topic)
d <- WS.receiveData conn
let dec = Aeson.decode d :: Maybe DT.Notification
atomically $ writeTChan tchan dec
-- wait a bit to settle
threadDelay (100 * millisecond)
withAsync wsConnect $ \_a -> do
-- wait a bit to connect
threadDelay (500 * millisecond)
withAsync (setupWsThread log_cfg topic tchan port) $ \_a -> do
_ <- sendJobWithCfg cfg Ping _ <- sendJobWithCfg cfg Ping
mTimeout <- Timeout.timeout (5 * 1_000_000) $ do mTimeout <- Timeout.timeout (5 * 1_000_000) $ do
md <- atomically $ readTChan tchan md <- atomically $ readTChan tchan
md `shouldBe` Just DT.NPing md `shouldBe` Just DT.NPing
mTimeout `shouldSatisfy` isJust mTimeout `shouldSatisfy` isJust
describe "concurrency" $ do
-- This test checks that two concurrent threads can both subscribe
-- to the same topic and get notified.
it "handles concurrent threads" $ \(SpecContext testEnv port _app _) -> do
let cfg = test_config testEnv
let log_cfg = (test_config testEnv) ^. gc_logging
let topic = DT.Ping
let competingThreads = 3
forConcurrently_ [ 1 .. competingThreads ] $ \(tid :: Int) -> do
tchan <- newTChanIO :: IO (TChan (Maybe DT.Notification))
withAsync (setupWsThread log_cfg topic tchan port) $ \_a -> do
_ <- sendJobWithCfg cfg Ping
mTimeout <- Timeout.timeout (5 * 1_000_000) $ do
md <- atomically $ readTChan tchan
md `shouldBe` Just DT.NPing
assertBool ("Competing Thread " <> show tid <> " didn't receive a value.") (isJust mTimeout)
setupWsThread :: LogConfig -> DT.Topic -> TChan (Maybe DT.Notification) -> Int -> IO ()
setupWsThread log_cfg topic tchan port = withLogger log_cfg $ \ioL -> do
withWSConnection ("127.0.0.1", port) $ \conn -> do
let payload = Aeson.encode (DT.WSSubscribe topic)
$(logLoc) ioL DEBUG $ "Sending payload: " <> (TE.decodeUtf8 $ BL.toStrict $ payload)
WS.sendTextData conn $ Aeson.encode (DT.WSSubscribe topic)
d <- WS.receiveData conn
$(logLoc) ioL DEBUG $ "Received: " <> (TE.decodeUtf8 $ BL.toStrict d)
let dec = Aeson.decode d :: Maybe DT.Notification
atomically $ writeTChan tchan dec
...@@ -21,7 +21,7 @@ import Gargantext.Database.Prelude (DBCmd) ...@@ -21,7 +21,7 @@ import Gargantext.Database.Prelude (DBCmd)
import Gargantext.Database.Query.Table.Node import Gargantext.Database.Query.Table.Node
import Gargantext.Database.Query.Table.NodeNode import Gargantext.Database.Query.Table.NodeNode
import Gargantext.Database.Schema.Node (NodePoly(..)) import Gargantext.Database.Schema.Node (NodePoly(..))
import Test.API.Prelude (newPrivateFolderForUser, newPublicFolderForUser) import Test.API.Prelude (newPrivateFolderForUser, newPublicFolderForUser, alice)
import Test.Database.Types import Test.Database.Types
import Test.Tasty.HUnit import Test.Tasty.HUnit
...@@ -40,8 +40,8 @@ testGetUserRootPublicNode testEnv = do ...@@ -40,8 +40,8 @@ testGetUserRootPublicNode testEnv = do
testIsReadOnlyWorks :: TestEnv -> Assertion testIsReadOnlyWorks :: TestEnv -> Assertion
testIsReadOnlyWorks testEnv = do testIsReadOnlyWorks testEnv = do
alicePrivateFolderId <- newPrivateFolderForUser testEnv "alice" alicePrivateFolderId <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- newPublicFolderForUser testEnv "alice" alicePublicFolderId <- newPublicFolderForUser testEnv alice
flip runReaderT testEnv $ runTestMonad $ do flip runReaderT testEnv $ runTestMonad $ do
-- Create a corpus, by default is not read only -- Create a corpus, by default is not read only
aliceUserId <- getUserId (UserName "alice") aliceUserId <- getUserId (UserName "alice")
...@@ -61,8 +61,8 @@ testIsReadOnlyWorks testEnv = do ...@@ -61,8 +61,8 @@ testIsReadOnlyWorks testEnv = do
-- then all the children (up to the first level) are also marked read-only. -- then all the children (up to the first level) are also marked read-only.
testPublishRecursiveFirstLevel :: TestEnv -> Assertion testPublishRecursiveFirstLevel :: TestEnv -> Assertion
testPublishRecursiveFirstLevel testEnv = do testPublishRecursiveFirstLevel testEnv = do
alicePrivateFolderId <- newPrivateFolderForUser testEnv "alice" alicePrivateFolderId <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- newPublicFolderForUser testEnv "alice" alicePublicFolderId <- newPublicFolderForUser testEnv alice
flip runReaderT testEnv $ runTestMonad $ do flip runReaderT testEnv $ runTestMonad $ do
-- Create a corpus, by default is not read only -- Create a corpus, by default is not read only
aliceUserId <- getUserId (UserName "alice") aliceUserId <- getUserId (UserName "alice")
...@@ -78,8 +78,8 @@ testPublishRecursiveFirstLevel testEnv = do ...@@ -78,8 +78,8 @@ testPublishRecursiveFirstLevel testEnv = do
-- then all the children of the children are also marked read-only. -- then all the children of the children are also marked read-only.
testPublishRecursiveNLevel :: TestEnv -> Assertion testPublishRecursiveNLevel :: TestEnv -> Assertion
testPublishRecursiveNLevel testEnv = do testPublishRecursiveNLevel testEnv = do
alicePrivateFolderId <- newPrivateFolderForUser testEnv "alice" alicePrivateFolderId <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- newPublicFolderForUser testEnv "alice" alicePublicFolderId <- newPublicFolderForUser testEnv alice
flip runReaderT testEnv $ runTestMonad $ do flip runReaderT testEnv $ runTestMonad $ do
-- Create a corpus, by default is not read only -- Create a corpus, by default is not read only
aliceUserId <- getUserId (UserName "alice") aliceUserId <- getUserId (UserName "alice")
...@@ -95,8 +95,8 @@ testPublishRecursiveNLevel testEnv = do ...@@ -95,8 +95,8 @@ testPublishRecursiveNLevel testEnv = do
testPublishLenientWorks :: TestEnv -> Assertion testPublishLenientWorks :: TestEnv -> Assertion
testPublishLenientWorks testEnv = do testPublishLenientWorks testEnv = do
alicePrivateFolderId <- newPrivateFolderForUser testEnv "alice" alicePrivateFolderId <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- newPublicFolderForUser testEnv "alice" alicePublicFolderId <- newPublicFolderForUser testEnv alice
flip runReaderT testEnv $ runTestMonad $ do flip runReaderT testEnv $ runTestMonad $ do
aliceUserId <- getUserId (UserName "alice") aliceUserId <- getUserId (UserName "alice")
corpusId <- insertDefaultNode NodeCorpus alicePrivateFolderId aliceUserId corpusId <- insertDefaultNode NodeCorpus alicePrivateFolderId aliceUserId
......
{-# LANGUAGE TupleSections #-} {-# LANGUAGE TupleSections #-}
{-# LANGUAGE ViewPatterns #-}
module Test.Database.Setup ( module Test.Database.Setup (
withTestDB withTestDB
...@@ -16,11 +17,9 @@ import Data.Text qualified as T ...@@ -16,11 +17,9 @@ import Data.Text qualified as T
import Data.Text.Encoding qualified as TE import Data.Text.Encoding qualified as TE
import Database.PostgreSQL.Simple qualified as PG import Database.PostgreSQL.Simple qualified as PG
import Database.PostgreSQL.Simple.Options qualified as Client import Database.PostgreSQL.Simple.Options qualified as Client
import Database.PostgreSQL.Simple.Options qualified as Opts
import Database.Postgres.Temp qualified as Tmp import Database.Postgres.Temp qualified as Tmp
import Gargantext.API.Admin.EnvTypes (Mode(Mock))
import Gargantext.Core.Config import Gargantext.Core.Config
import Gargantext.Core.Config.Types (SettingsFile(..)) import Gargantext.Core.Config.Types
import Gargantext.Core.Config.Utils (readConfig) import Gargantext.Core.Config.Utils (readConfig)
import Gargantext.Core.Config.Worker (wsDatabase, wsDefinitions) import Gargantext.Core.Config.Worker (wsDatabase, wsDefinitions)
import Gargantext.Core.NLP (nlpServerMap) import Gargantext.Core.NLP (nlpServerMap)
...@@ -33,8 +32,10 @@ import Paths_gargantext ...@@ -33,8 +32,10 @@ import Paths_gargantext
import Prelude qualified import Prelude qualified
import Shelly hiding (FilePath, run) import Shelly hiding (FilePath, run)
import Shelly qualified as SH import Shelly qualified as SH
import System.Environment
import Test.Database.Types import Test.Database.Types
import Test.Utils.Db (tmpDBToConnInfo) import Test.Utils.Db (tmpDBToConnInfo)
import UnliftIO.Temporary (withTempFile)
-- | Test DB settings. -- | Test DB settings.
...@@ -74,6 +75,22 @@ tmpPgConfig = Tmp.defaultConfig <> ...@@ -74,6 +75,22 @@ tmpPgConfig = Tmp.defaultConfig <>
, Client.password = pure dbPassword , Client.password = pure dbPassword
} }
-- | Overrides the 'NotificationsConfig' of the input 'GargConfig' with something suitable
-- for testing. It will replace the bind sites for the CE and the dispatcher with random temp files
-- to be used for IPC communication.
withTestNotificationConfig :: GargConfig -> (GargConfig -> IO a) -> IO a
withTestNotificationConfig cfg action = do
tmpDir <- fromMaybe "/tmp" <$> lookupEnv "TMPDIR"
withTempFile tmpDir "ce_test.ipc" $ \(T.pack -> ce_fp) _hdl1 -> do
withTempFile tmpDir "ds_test.ipc" $ \(T.pack -> ds_fp) _hdl2 -> do
action $ cfg & gc_notifications_config
.~ NotificationsConfig { _nc_central_exchange_bind = "ipc://" <> ce_fp
, _nc_central_exchange_connect = "ipc://" <> ce_fp
, _nc_dispatcher_bind = "ipc://" <> ds_fp
, _nc_dispatcher_connect = "ipc://" <> ds_fp
}
setup :: IO TestEnv setup :: IO TestEnv
setup = do setup = do
res <- Tmp.startConfig tmpPgConfig res <- Tmp.startConfig tmpPgConfig
...@@ -81,50 +98,52 @@ setup = do ...@@ -81,50 +98,52 @@ setup = do
Left err -> Prelude.fail $ show err Left err -> Prelude.fail $ show err
Right db -> do Right db -> do
let connInfo = tmpDBToConnInfo db let connInfo = tmpDBToConnInfo db
gargConfig <- testTomlPath >>= readConfig gargConfig0 <- testTomlPath >>= readConfig
-- fix db since we're using tmp-postgres -- fix db since we're using tmp-postgres
<&> (gc_database_config .~ connInfo) <&> (gc_database_config .~ connInfo)
-- <&> (gc_worker . wsDatabase .~ connInfo) -- <&> (gc_worker . wsDatabase .~ connInfo)
<&> (gc_worker . wsDatabase .~ (connInfo { PG.connectDatabase = "pgmq_test" })) <&> (gc_worker . wsDatabase .~ (connInfo { PG.connectDatabase = "pgmq_test" }))
-- putText $ "[setup] database: " <> show (gargConfig ^. gc_database_config) -- putText $ "[setup] database: " <> show (gargConfig ^. gc_database_config)
-- putText $ "[setup] worker db: " <> show (gargConfig ^. gc_worker . wsDatabase) -- putText $ "[setup] worker db: " <> show (gargConfig ^. gc_worker . wsDatabase)
let idleTime = 60.0 withTestNotificationConfig gargConfig0 $ \gargConfig -> do
let maxResources = 2 let log_cfg = gargConfig ^. gc_logging
let poolConfig = defaultPoolConfig (PG.connectPostgreSQL (Tmp.toConnectionString db)) let idleTime = 60.0
PG.close let maxResources = 2
idleTime let poolConfig = defaultPoolConfig (PG.connectPostgreSQL (Tmp.toConnectionString db))
maxResources PG.close
pool <- newPool (setNumStripes (Just 2) poolConfig) idleTime
bootstrapDB db pool gargConfig maxResources
ugen <- emptyCounter pool <- newPool (setNumStripes (Just 2) poolConfig)
test_nodeStory <- fromDBNodeStoryEnv pool bootstrapDB db pool gargConfig
withLoggerIO Mock $ \logger -> do ugen <- emptyCounter
test_nodeStory <- fromDBNodeStoryEnv pool
let wPoolConfig = defaultPoolConfig (PG.connectPostgreSQL (Tmp.toConnectionString db)) withLoggerIO log_cfg $ \logger -> do
PG.close
idleTime let wPoolConfig = defaultPoolConfig (PG.connectPostgreSQL (Tmp.toConnectionString db))
maxResources PG.close
wPool <- newPool (setNumStripes (Just 2) wPoolConfig) idleTime
wNodeStory <- fromDBNodeStoryEnv wPool maxResources
_w_env_job_state <- newTVarIO Nothing wPool <- newPool (setNumStripes (Just 2) wPoolConfig)
withLoggerIO Mock $ \wioLogger -> do wNodeStory <- fromDBNodeStoryEnv wPool
let wEnv = WorkerEnv { _w_env_config = gargConfig _w_env_job_state <- newTVarIO Nothing
, _w_env_logger = wioLogger withLoggerIO log_cfg $ \wioLogger -> do
, _w_env_pool = wPool let wEnv = WorkerEnv { _w_env_config = gargConfig
, _w_env_nodeStory = wNodeStory , _w_env_logger = wioLogger
, _w_env_mail = errorTrace "[wEnv] w_env_mail requested but not available" , _w_env_pool = wPool
, _w_env_nlp = nlpServerMap $ gargConfig ^. gc_nlp_config , _w_env_nodeStory = wNodeStory
, _w_env_job_state } , _w_env_mail = errorTrace "[wEnv] w_env_mail requested but not available"
, _w_env_nlp = nlpServerMap $ gargConfig ^. gc_nlp_config
wState <- initWorkerState wEnv (fromJust $ head $ gargConfig ^. gc_worker . wsDefinitions) , _w_env_job_state }
test_worker_tid <- forkIO $ Worker.run wState
pure $ TestEnv { test_db = DBHandle pool db wState <- initWorkerState wEnv (fromJust $ head $ gargConfig ^. gc_worker . wsDefinitions)
, test_config = gargConfig test_worker_tid <- forkIO $ Worker.run wState
, test_nodeStory pure $ TestEnv { test_db = DBHandle pool db
, test_usernameGen = ugen , test_config = gargConfig
, test_logger = logger , test_nodeStory
, test_worker_tid , test_usernameGen = ugen
} , test_logger = logger
, test_worker_tid
}
withTestDB :: (TestEnv -> IO ()) -> IO () withTestDB :: (TestEnv -> IO ()) -> IO ()
withTestDB = bracket setup teardown withTestDB = bracket setup teardown
...@@ -134,7 +153,7 @@ testEnvToPgConnectionInfo TestEnv{..} = ...@@ -134,7 +153,7 @@ testEnvToPgConnectionInfo TestEnv{..} =
PG.ConnectInfo { PG.connectHost = "0.0.0.0" PG.ConnectInfo { PG.connectHost = "0.0.0.0"
, PG.connectPort = fromIntegral $ fromMaybe 5432 , PG.connectPort = fromIntegral $ fromMaybe 5432
$ getLast $ getLast
$ Opts.port $ Client.port
$ Tmp.toConnectionOptions $ Tmp.toConnectionOptions
$ _DBTmp test_db $ _DBTmp test_db
, PG.connectUser = dbUser , PG.connectUser = dbUser
......
...@@ -22,11 +22,9 @@ import Control.Monad.Trans.Control ...@@ -22,11 +22,9 @@ import Control.Monad.Trans.Control
import Data.IORef import Data.IORef
import Data.Map qualified as Map import Data.Map qualified as Map
import Data.Pool import Data.Pool
import Data.Text qualified as T
import Database.PostgreSQL.Simple qualified as PG import Database.PostgreSQL.Simple qualified as PG
import Database.Postgres.Temp qualified as Tmp import Database.Postgres.Temp qualified as Tmp
import Gargantext hiding (to) import Gargantext hiding (to)
import Gargantext.API.Admin.EnvTypes
import Gargantext.API.Admin.Orchestrator.Types import Gargantext.API.Admin.Orchestrator.Types
import Gargantext.API.Errors.Types import Gargantext.API.Errors.Types
import Gargantext.API.Prelude import Gargantext.API.Prelude
...@@ -36,6 +34,7 @@ import Gargantext.Core.Mail.Types (HasMail(..)) ...@@ -36,6 +34,7 @@ import Gargantext.Core.Mail.Types (HasMail(..))
import Gargantext.Core.NLP (HasNLPServer(..)) import Gargantext.Core.NLP (HasNLPServer(..))
import Gargantext.Core.NodeStory import Gargantext.Core.NodeStory
import Gargantext.System.Logging (HasLogger(..), Logger, MonadLogger(..)) import Gargantext.System.Logging (HasLogger(..), Logger, MonadLogger(..))
import Gargantext.System.Logging.Loggers
import Gargantext.Utils.Jobs.Monad (MonadJobStatus(..)) import Gargantext.Utils.Jobs.Monad (MonadJobStatus(..))
import Network.URI (parseURI) import Network.URI (parseURI)
import Prelude qualified import Prelude qualified
...@@ -135,22 +134,11 @@ instance MonadLogger (GargM TestEnv BackendInternalError) where ...@@ -135,22 +134,11 @@ instance MonadLogger (GargM TestEnv BackendInternalError) where
getLogger = asks test_logger getLogger = asks test_logger
instance HasLogger (GargM TestEnv BackendInternalError) where instance HasLogger (GargM TestEnv BackendInternalError) where
data instance Logger (GargM TestEnv BackendInternalError) = newtype instance Logger (GargM TestEnv BackendInternalError) =
GargTestLogger { GargTestLogger { _GargTestLogger :: MonadicStdLogger FL.LogStr IO }
test_logger_mode :: Mode type instance LogInitParams (GargM TestEnv BackendInternalError) = LogConfig
, test_logger_set :: FL.LoggerSet
}
type instance LogInitParams (GargM TestEnv BackendInternalError) = Mode
type instance LogPayload (GargM TestEnv BackendInternalError) = FL.LogStr type instance LogPayload (GargM TestEnv BackendInternalError) = FL.LogStr
initLogger mode = do initLogger cfg = fmap GargTestLogger $ (liftIO $ monadicStdLogger cfg)
test_logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize destroyLogger = liftIO . _msl_destroy . _GargTestLogger
pure $ GargTestLogger mode test_logger_set logMsg (GargTestLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
destroyLogger GargTestLogger{..} = liftIO $ FL.rmLoggerSet test_logger_set logTxt (GargTestLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
logMsg (GargTestLogger mode logger_set) lvl msg = do
cfg <- view hasConfig
let minLvl = cfg ^. gc_logging . lc_log_level
when (lvl >= minLvl) $ do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
...@@ -333,15 +333,17 @@ waitUntil pred' timeoutMs = do ...@@ -333,15 +333,17 @@ waitUntil pred' timeoutMs = do
-- wait for given number of milliseconds for a given tchan value -- wait for given number of milliseconds for a given tchan value
waitForTChanValue :: (HasCallStack, Eq a, Show a) => TChan a -> a -> Int -> IO () waitForTChanValue :: (HasCallStack, Eq a, Show a) => TChan a -> a -> Int -> IO ()
waitForTChanValue tchan expected timeoutMs = do waitForTChanValue tchan expected timeoutMs = do
mTimeout <- Timeout.timeout (timeoutMs * 1000) $ do mTimeout <- Timeout.timeout total_wait $ do
v <- atomically $ readTChan tchan v <- atomically $ readTChan tchan
unless (v == expected) $ panicTrace $ "[waitForTChanValue] v != expected (" <> show v <> " != " <> show expected <> ")" unless (v == expected) $ panicTrace $ "[waitForTChanValue] v != expected (" <> show v <> " != " <> show expected <> ")"
-- v `shouldBe` expected -- v `shouldBe` expected
-- no timeout should have occurred -- no timeout should have occurred
-- mTimeout `shouldSatisfy` isJust -- mTimeout `shouldSatisfy` isJust
when (isNothing mTimeout) $ when (isNothing mTimeout) $
panicTrace $ "[waitForTChanValue] timeout when waiting for " <> show expected <> " on tchan" panicTrace $ "[waitForTChanValue] timeout of " <> show total_wait <> " milliseconds exhausted when waiting for " <> show expected <> " on tchan"
where
total_wait :: Int
total_wait = timeoutMs * 1_000
waitForTSem :: HasCallStack => TSem -> Int -> IO () waitForTSem :: HasCallStack => TSem -> Int -> IO ()
waitForTSem tsem timeoutMs = do waitForTSem tsem timeoutMs = do
......
...@@ -20,8 +20,8 @@ startCoreNLPServer :: IO ProcessHandle ...@@ -20,8 +20,8 @@ startCoreNLPServer :: IO ProcessHandle
startCoreNLPServer = do startCoreNLPServer = do
putText "calling start core nlp" putText "calling start core nlp"
devNull <- openFile "/dev/null" WriteMode devNull <- openFile "/dev/null" WriteMode
let p = proc "./startServer.sh" [] let p = proc "startCoreNLPServer.sh" []
(_, _, _, hdl) <- (createProcess $ p { cwd = Just "devops/coreNLP/stanford-corenlp-current" (_, _, _, hdl) <- (createProcess $ p { cwd = Nothing
-- NOTE(adn) Issue #451, this one has to stay disabled, because if we -- NOTE(adn) Issue #451, this one has to stay disabled, because if we
-- turn it on, despite the confusing documentation on the `process` library -- turn it on, despite the confusing documentation on the `process` library
-- it will cause the Haskell RTS to completely ignore the Ctrl^c and instead -- it will cause the Haskell RTS to completely ignore the Ctrl^c and instead
...@@ -34,10 +34,7 @@ startCoreNLPServer = do ...@@ -34,10 +34,7 @@ startCoreNLPServer = do
, std_err = UseHandle devNull , std_err = UseHandle devNull
}) `catch` \e -> case e of }) `catch` \e -> case e of
_ | True <- "does not exist" `isInfixOf` (T.pack . show @SomeException $ e) _ | True <- "does not exist" `isInfixOf` (T.pack . show @SomeException $ e)
-> fail $ "Cannot execute the 'startServer.sh' script. If this is the " <> -> fail $ "Cannot execute the 'startCoreNLPServer.sh' script. Make sure you are in a nix environment."
"first time you are running the tests, you have to run " <>
"cd devops/coreNLP && ./build.sh first. You have to run it only once, " <>
"and then you are good to go for the time being."
| otherwise -> throwIO e | otherwise -> throwIO e
pure hdl pure hdl
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment