Verified Commit c52d11b0 authored by Przemyslaw Kaminski's avatar Przemyslaw Kaminski

Merge branch 'dev' into 444-dev-temporary-file-storage

parents 29bf399f 5225daf6
# Optimising CI speed by using tips from https://blog.nimbleways.com/let-s-make-faster-gitlab-ci-cd-pipelines/
#image: cgenie/gargantext:9.4.8
image: adinapoli/gargantext:v3.5
image: cgenie/gargantext:9.6.6
variables:
STACK_ROOT: "${CI_PROJECT_DIR}/.stack-root"
STACK_OPTS: "--system-ghc"
STORE_DIR: "${CI_PROJECT_DIR}/.cabal"
CABAL_DIR: "${CI_PROJECT_DIR}/.cabal"
CORENLP: "4.5.4"
FF_USE_FASTZIP: "true"
ARTIFACT_COMPRESSION_LEVEL: "fast"
CACHE_COMPRESSION_LEVEL: "fast"
......@@ -95,17 +93,11 @@ test:
nix-shell --run "./bin/update-project-dependencies $STORE_DIR"
mkdir -p /root/.cache/cabal/logs
chown -R test:test /root/.cache/cabal/logs/
chown -R test:test /root/.cache/cabal/packages/hackage.haskell.org/
chown -R test:test "$TEST_TMPDIR"
mkdir -p /builds/gargantext/haskell-gargantext/devops/coreNLP/stanford-corenlp-current
cp -R /root/devops/coreNLP/stanford-corenlp-${CORENLP}/* /builds/gargantext/haskell-gargantext/devops/coreNLP/stanford-corenlp-current/
nix-shell --run "chown -R test:test /root/.config/ && su -m test -c \"export PATH=$PATH:$TEST_NIX_PATH && export TMPDIR=$TEST_TMPDIR && cd /builds/gargantext/haskell-gargantext; $CABAL --store-dir=$STORE_DIR v2-test --test-show-details=streaming --flags 'test-crypto no-phylo-debug-logs' --ghc-options='-O0 -fclear-plugins'\""
chown -R root:root dist-newstyle/
chown -R root:root /root/
chown -R root:root $STORE_DIR
chown -R root:root /root/.cache/cabal/logs/
chown -R root:root /root/.cache/cabal/packages/hackage.haskell.org/
chown -Rh root:root /builds/gargantext/haskell-gargantext/devops/coreNLP/stanford-corenlp-current
## Version 0.0.7.4.7
* [BACK][FIX][Adjust the output of the UpdateList tests (#460)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/460)
* [BACK][FIX][Import/export in SQLite format (#362)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/362)
* [FRONT][FIX][When the progress bar is empty: say: waiting task (#503)](https://gitlab.iscpif.fr/gargantext/purescript-gargantext/issues/503)
## Version 0.0.7.4.6
* [BACK][FIX][project in `docker-compose` (#450)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/450)
* [BACK][FIX][Upgrade GHC to 9.6.x (#436)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/436) and [Merge Request](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/merge_requests/395)
* [BACK][FIX][Error during import of term in TSV format (#381)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/381)
* [BACK][FIX][Loading a terms file with empty terms gives an undecipherable and inconsistent error (#395)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/395) and [Merge Request](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/merge_requests/399)
## Version 0.0.7.4.5.1
* [FRONT][FIX][[Corpus import/upload] The error message has disappeared on version 0.0.7.4.2 at least (#728)](https://gitlab.iscpif.fr/gargantext/purescript-gargantext/issues/728)
## Version 0.0.7.4.5
* [BACK][FIX][Error when uploading a specific TSV file (#433)](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/433)
......
......@@ -103,6 +103,17 @@ Then initialize the database using the dedicated command: from inside the
And provide a name and a password for the master user as requested.
### Launching CoreNLP
CoreNLP can be started with nix:
```shell
nix-shell --run startCoreNLPServer.sh
```
By default, this starts on port `9000`. If you want a different port, then do:
```shell
nix-shell --run 'startCoreNLPServer.sh -p 9001'
```
### Running Gargantext
From inside the `haskell-gargantext/` directory, run
......@@ -237,11 +248,11 @@ The CI is on gitlab and is controlled by the `.gitlab-ci.yml` file.
The Docker image that is used can be built with:
```shell
podman build -t cgenie/gargantext:9.4.8 -f ./devops/docker/Dockerfile --format=docker .
podman build -t cgenie/gargantext:9.6.6 --pull -f ./devops/docker/Dockerfile --format=docker .
```
or
```shell
docker build -t cgenie/gargantext:9.4.8 -f ./devops/docker/Dockerfile .
docker build -t cgenie/gargantext:9.6.6 --pull -f ./devops/docker/Dockerfile .
```
NOTE: if podman/docker complains about not enough space, it's probably
......@@ -263,9 +274,9 @@ When a development is needed on libraries (for instance, the HAL crawler in http
2. When changes work and tests are OK, commit in repo `hal`
2. When changes are commited / merged:
1. Get the hash id, and edit `cabal.project` with the **new commit id**
2. run `./bin/update-project-dependencies`
2. run `nix-shell --run ./bin/update-project-dependencies`
- get an error that sha256 don't match, so update the `./bin/update-project-dependencies` with new sha256 hash
- run again `./bin/update-project-dependencies` (to make sure it's a fixed point now)
- run again `nix-shell --run ./bin/update-project-dependencies` (to make sure it's a fixed point now)
> Note: without `stack.yaml` we would have to only fix `cabal.project` -> `source-repository-package` commit id. Sha256 is there to make sure CI reruns the tests.
......
#!/usr/bin/env bash
set -euxo pipefail
current_dir=$(basename "$PWD")
if [ "$current_dir" == "bin" ]; then
source ./setup-ci-environment
else
source ./bin/setup-ci-environment
fi
cabal --store-dir=$STORE_DIR v2-update "hackage.haskell.org,${INDEX_STATE}"
# Install cabal2stack if it can't be found.
if ! cabal2stack --help &> /dev/null
then
echo "cabal2stack could not be found"
CURDIR=$PWD
git clone https://github.com/iconnect/cabal2stack.git cabal2stack-installer
cd cabal2stack-installer
cabal --store-dir=$STORE_DIR v2-install --allow-newer --index-state="${INDEX_STATE}" --overwrite-policy=always
cd $CURDIR
rm -rf cabal2stack-installer
fi
......@@ -6,10 +6,8 @@ current_dir=$(basename "$PWD")
if [ "$current_dir" == "bin" ]; then
source ./setup-ci-environment
./install-cabal2stack
else
source ./bin/setup-ci-environment
./bin/install-cabal2stack
fi
# README!
......
......@@ -72,7 +72,7 @@ source-repository-package
source-repository-package
type: git
location: https://gitlab.iscpif.fr/gargantext/crawlers/istex.git
tag: 521ca54f1502b13f629eff2223aaf5007e6d52ec
tag: 894482ef97eadce6b1c13ebced1edfe394b5be5e
source-repository-package
type: git
......@@ -132,12 +132,12 @@ source-repository-package
location: https://github.com/haskell-github-trust/ekg-json
tag: bd0592818882f9cf34d2991d01f7dcb3d8bca309
-- FIXME(adn) Compat-shim while we wait for upstream to catch-up
-- NOTE(adn) This forks binds to nng.
source-repository-package
type: git
location: https://github.com/garganscript/nanomsg-haskell
tag: 5868db564d7d3c4568ccd11c852292b834d26c55
location: https://github.com/adinapoli/nanomsg-haskell
tag: 2d69707bf639be2055e3228dab38cc4f2a658111
source-repository-package
type: git
location: https://github.com/adinapoli/http-reverse-proxy.git
......
FROM openjdk
#ADD home/debian/CoreNLP /CoreNLP
ADD stanford-corenlp-current /CoreNLP
WORKDIR /CoreNLP
CMD ./startServer.sh
#!/bin/bash
# releases are here:
# https://stanfordnlp.github.io/CoreNLP/history.html
VERSION=4.5.4
FILE="stanford-corenlp-${VERSION}.zip"
DIR_V="stanford-corenlp-${VERSION}"
DIR="stanford-corenlp-current"
URL="http://nlp.stanford.edu/software/${FILE}"
[ ! -f ${FILE} ] && echo 'Fetching file' && wget ${URL} -O ${FILE}
[ ! -d ${DIR_V} ] && echo 'Unzipping file' && unzip ./${FILE}
[ ! -L ${DIR} ] && echo "Symlinking ${DIR_V} -> ${DIR}" && ln -s ${DIR_V} ${DIR}
[ ! -f ${DIR}/startServer.sh ] && echo "Copying startServer.sh" && cp ./startServer.sh ${DIR}/
echo "You can now build with: docker build -t cgenie/corenlp-garg:${VERSION}" --pull .
#!/bin/sh
java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000
......@@ -3,16 +3,6 @@ FROM ubuntu:noble
## NOTA BENE: In order for this to be built successfully, you have to run ./devops/coreNLP/build.sh first.
ARG DEBIAN_FRONTEND=noninteractive
ARG GHC=9.6.6
ARG CORENLP=4.5.4
ARG CORE
COPY ./shell.nix /builds/gargantext/shell.nix
COPY ./nix/pkgs.nix /builds/gargantext/nix/pkgs.nix
COPY ./nix/pinned-25.05.nix /builds/gargantext/nix/pinned-25.05.nix
COPY ./devops/coreNLP/build.sh /root/devops/coreNLP/build.sh
COPY ./devops/coreNLP/startServer.sh /root/devops/coreNLP/startServer.sh
COPY ./bin/setup-ci-environment /builds/gargantext/bin/setup-ci-environment
COPY ./bin/install-cabal2stack /builds/gargantext/bin/install-cabal2stack
ENV TZ=Europe/Rome
ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'
......@@ -25,32 +15,15 @@ ENV PATH=/root/.local/bin:$PATH
RUN apt-get update && \
apt-get install --no-install-recommends -y \
apt-transport-https \
autoconf \
automake \
build-essential \
ca-certificates \
curl \
gcc \
git \
gnupg2 \
libffi-dev \
libffi8 \
libgmp-dev \
libgmp10 \
libncurses-dev \
libncurses6 \
libnuma-dev \
libtinfo6 \
locales \
lsb-release \
software-properties-common \
strace \
sudo \
wget \
vim \
xz-utils \
zlib1g-dev \
openjdk-21-jdk \
#zlib1g-dev \
unzip && \
apt-get clean && rm -rf /var/lib/apt/lists/* && \
mkdir -m 0755 /nix && groupadd -r nixbld && chown root /nix && \
......@@ -60,16 +33,21 @@ RUN apt-get update && \
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN cd /root/devops/coreNLP; ./build.sh && \
set -o pipefail && \
RUN set -o pipefail && \
locale-gen en_US.UTF-8 && \
bash <(curl -L https://releases.nixos.org/nix/nix-2.26.2/install) --no-daemon && \
locale-gen en_US.UTF-8 && chown root -R /nix && \
chown root -R /nix && \
. "$HOME/.nix-profile/etc/profile.d/nix.sh" && \
mkdir -p "/builds/gargantext/" && chmod 777 -R "/builds/gargantext" && \
echo "source $HOME/.nix-profile/etc/profile.d/nix.sh" >> "$HOME/.bashrc" && \
echo `which nix-env` && \
. $HOME/.bashrc && nix-env --version && \
cd /builds/gargantext && nix-shell --run "./bin/install-cabal2stack"
. $HOME/.bashrc && nix-env --version
# We want to cache nix artifacts in the Dockerfile to improve CI speed
COPY ./shell.nix /nix-ci-build/
COPY ./nix /nix-ci-build/
RUN set -o pipefail && \
pushd /nix-ci-build/ && nix-build shell.nix && popd
WORKDIR "/builds/gargantext/"
version: '3'
name: 'gargantext'
services:
caddy:
image: caddy:alpine
ports:
- 8108:8108
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- ../../purescript-gargantext:/srv/purescript-gargantext:ro
# caddy:
# image: caddy:alpine
# ports:
# - 8108:8108
# volumes:
# - ./Caddyfile:/etc/caddy/Caddyfile:ro
# - ../../purescript-gargantext:/srv/purescript-gargantext:ro
#postgres11:
# #image: 'postgres:latest'
......@@ -61,12 +62,6 @@ services:
# volumes:
# - pgadmin:/var/lib/pgadmin
corenlp:
#image: 'cgenie/corenlp-garg:latest'
image: 'cgenie/corenlp-garg:4.5.4'
ports:
- 9000:9000
# johnsnownlp:
# image: 'johnsnowlabs/nlp-server:latest'
# volumes:
......
#!/bin/bash
if [ ! -d coreNLP ]; then
mkdir -v coreNLP
fi
pushd coreNLP
wget https://dl.gargantext.org/coreNLP.tar.bz2
tar xvjf coreNLP.tar.bz2
pushd home/debian/CoreNLP
./startServer.sh
......@@ -124,8 +124,8 @@ smtp_host = "localhost"
[notifications]
central-exchange = { bind = "tcp://*:5560", connect = "tcp://localhost:5560" }
dispatcher = { bind = "tcp://*:5561", connect = "tcp://localhost:5561" }
central-exchange = { bind = "tcp://*:5560", connect = "tcp://127.0.0.1:5560" }
dispatcher = { bind = "tcp://*:5561", connect = "tcp://127.0.0.1:5561" }
[nlp]
......
......@@ -5,7 +5,7 @@ cabal-version: 3.4
-- see: https://github.com/sol/hpack
name: gargantext
version: 0.0.7.4.5
version: 0.0.7.4.7
synopsis: Search, map, share
description: Please see README.md
category: Data
......@@ -54,6 +54,8 @@ data-files:
test-data/stemming/lancaster.txt
test-data/test_config.ini
test-data/test_config.toml
test-data/issue-381/Termes_A_Ajouter_T4SC_Intellixir.tsv
test-data/issue-381/Termes_A_Ajouter_T4SC_Intellixir12.csv
.clippy.dhall
-- common options
......@@ -309,6 +311,7 @@ library
Gargantext.Orphans.Accelerate
Gargantext.Orphans.OpenAPI
Gargantext.System.Logging
Gargantext.System.Logging.Loggers
Gargantext.System.Logging.Types
Gargantext.Utils.Dict
Gargantext.Utils.Jobs.Error
......@@ -803,6 +806,7 @@ test-suite garg-test-tasty
other-modules:
CLI.Phylo.Common
Paths_gargantext
Test.API.Private.List
Test.API.Private.Move
Test.API.Private.Remote
Test.API.Private.Share
......@@ -877,6 +881,7 @@ test-suite garg-test-hspec
Test.API.GraphQL
Test.API.Notifications
Test.API.Private
Test.API.Private.List
Test.API.Private.Move
Test.API.Private.Remote
Test.API.Private.Share
......
......@@ -42,6 +42,9 @@ cradle:
- path: "./bin/gargantext-cli/CLI/Phylo/Profile.hs"
component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-cli/CLI/Server.hs"
component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-cli/CLI/Server/Routes.hs"
component: "gargantext:exe:gargantext"
......@@ -51,13 +54,10 @@ cradle:
- path: "./bin/gargantext-cli/CLI/Upgrade.hs"
component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-cli/Paths_gargantext.hs"
component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-server/Main.hs"
- path: "./bin/gargantext-cli/CLI/Worker.hs"
component: "gargantext:exe:gargantext"
- path: "./bin/gargantext-server/Paths_gargantext.hs"
- path: "./bin/gargantext-cli/Paths_gargantext.hs"
component: "gargantext:exe:gargantext"
- path: "./test"
......@@ -68,9 +68,3 @@ cradle:
- path: "./test"
component: "gargantext:test:garg-test-hspec"
- path: "./bench/Main.hs"
component: "gargantext:bench:garg-bench"
- path: "./bench/Paths_gargantext.hs"
component: "gargantext:bench:garg-bench"
{ lib,
ghc,
haskellPackages,
fetchFromGitHub }:
let
src = fetchFromGitHub {
repo = "cabal2stack";
owner = "iconnect";
rev = "e4960683f518ab4c964e7646706fe2a7e1bf751a";
hash = "sha256-KE9VUXFy9QfRmu/+DFcgxV/E6oPBAR7hRaFrSt93eeY=";
};
in
with haskellPackages;
mkDerivation {
inherit src;
pname = "cabal2stack";
version = "0";
isExecutable = true;
executableHaskellDepends = [ base
aeson
cabal-plan
HsYAML
HsYAML-aeson
optics-core
optics-extra
optparse-applicative
transformers ];
patches = [
./patches/cabal2stack.patch
];
license = lib.licenses.bsd3;
mainProgram = "cabal2stack";
}
# https://nixos.wiki/wiki/Java
{ fetchzip,
makeWrapper,
stdenv,
writeShellScript,
jre,
version ? "4.5.9",
hash ? "sha256-DOGBkGJfvR1PoXz2CNoo58HXwGLxvPKMChRqlrFtQLQ=",
}:
stdenv.mkDerivation (finalAttrs:
let
startServer = writeShellScript "startCoreNLPServer.sh" ''
set -x
PORT=9000
while getopts ':p:h' opt; do
case $opt in
(p) PORT=$OPTARG;;
(h) echo "$(basename $0) [-p 9000]"
exit 0
;;
esac
done
shift "$((OPTIND - 1))"
${jre}/bin/java -mx4g -cp "$CORENLP_PATH/*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port $PORT -timeout 15000 "$@"
'';
# see https://huggingface.co/stanfordnlp/CoreNLP/commits/main
versionCommits = {
"4.5.8" = "34264e88b7add9e0045f4727bc7d1872385f06aa";
"4.5.9" = "06f79ee8b1ec475d7630b1871bfd75a57c77ffa4";
};
commit = versionCommits."${finalAttrs.version}";
in
{
name = "corenlp";
inherit version;
src = fetchzip {
inherit hash;
# url = "http://nlp.stanford.edu/software/stanford-corenlp-${finalAttrs.version}.zip";
# huggin face is more stable
url = "https://huggingface.co/stanfordnlp/CoreNLP/resolve/${commit}/stanford-corenlp-latest.zip";
};
buildInputs = [
jre
];
nativeBuildInputs = [
makeWrapper
];
phases = [ "unpackPhase" "installPhase" ];
installPhase = ''
runHook preInstall
mkdir -p $out/bin
mkdir -p $out/share/corenlp
cp -r . $out/share/corenlp
makeWrapper ${startServer} $out/bin/startCoreNLPServer.sh \
--set CORENLP_PATH "$out/share/corenlp"
runHook postInstall
'';
}
)
{ fetchFromGitLab,
fetchpatch,
graphviz }:
let
graphviz_dev = graphviz.overrideAttrs (finalAttrs: previousAttrs: {
version = "11.0.0~dev";
src = fetchFromGitLab {
owner = "graphviz";
repo = "graphviz";
rev = "f3ec849249ef9cb824feb7f97449d7159e1dcb4e"; # head as of 2024-03-25, see gargantext#329
hash = "sha256-s86IqWz6zeKbcRqpV3cVQBVviHbhUSX1U8GVuJBfjC4=";
};
});
in
graphviz_dev.overrideAttrs
(finalAttrs: previousAttrs: {
# Increase the YY_BUF_SIZE, see https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/290#note_9015
patches = [
(fetchpatch {
url = "https://gist.githubusercontent.com/adinapoli/e93ca7b1d714d27f4af537716b03e3bb/raw/b9cc297c3465878da2d18ee92a3f9b8273923493/graphviz-yy-buf-size.patch";
sha256 = "sha256-8Q3tf37iYaPV50P+Vf/n263ordECiu5eKwONCy3ynV8=";
})
];
})
{ fetchFromGitHub,
igraph,
arpack,
blas,
glpk,
gmp,
lapack,
libxml2,
nanomsg,
plfit,
llvmPackages,
version ? "0.10.4",
hash ? "sha256-LsTOxUktGZcp46Ec9QH3+9C+VADMYTZZCjKF1gp36xk=" }:
igraph.overrideAttrs (finalAttrs: previousAttrs: {
inherit version;
nativeBuildInputs = previousAttrs.nativeBuildInputs;
src = fetchFromGitHub {
inherit hash;
owner = "igraph";
repo = "igraph";
rev = version;
};
postPatch = ''
echo "${version}" > IGRAPH_VERSION
'';
outputs = [ "dev" "out" "doc" ];
buildInputs = [
arpack
blas
glpk
gmp
lapack
libxml2
nanomsg
plfit
llvmPackages.openmp
];
cmakeFlags = [
"-DIGRAPH_USE_INTERNAL_BLAS=OFF"
"-DIGRAPH_USE_INTERNAL_LAPACK=OFF"
"-DIGRAPH_USE_INTERNAL_ARPACK=OFF"
"-DIGRAPH_USE_INTERNAL_GLPK=OFF"
"-DIGRAPH_USE_INTERNAL_GMP=OFF"
"-DIGRAPH_USE_INTERNAL_PLFIT=OFF"
"-DIGRAPH_GLPK_SUPPORT=ON"
"-DIGRAPH_GRAPHML_SUPPORT=OFF"
"-DIGRAPH_OPENMP_SUPPORT=ON"
"-DIGRAPH_ENABLE_LTO=AUTO"
"-DIGRAPH_ENABLE_TLS=ON"
"-DBUILD_SHARED_LIBS=ON"
"-DCMAKE_INSTALL_PREFIX=${placeholder "out"}"
"-DCMAKE_INSTALL_LIBDIR=${placeholder "out"}/lib"
"-DCMAKE_INSTALL_DATADIR=${placeholder "out"}/share"
];
doCheck = false;
postInstall = ''
mkdir -p "$out/share"
cp -r doc "$out/share"
'';
postFixup = previousAttrs.postFixup + ''
echo "Copying files where they belong .."
CUR_DIR=$PWD
cd "$dev/include/igraph" && cp *.h ../
cd $CUR_DIR
'';
})
diff --git i/cabal2stack.cabal w/cabal2stack.cabal
index 69767a2..92c4895 100644
--- i/cabal2stack.cabal
+++ w/cabal2stack.cabal
@@ -26,14 +26,14 @@ executable cabal2stack
-- boot dependencies
build-depends:
- , base >=4.12 && <4.18
+ , base >=4.12 && <5
, bytestring ^>=0.10.8.2 || ^>=0.11.3.0
, containers ^>=0.6.0.1
, directory ^>=1.3.3.0
, filepath ^>=1.4.2.1
, process ^>=1.6.5.0
, text >=1.2.3.0 && <2.1
- , transformers ^>=0.5.6.2
+ , transformers >=0.5.6.2 && < 0.7
-- other dependencies
build-depends:
@@ -43,4 +43,4 @@ executable cabal2stack
, HsYAML-aeson ^>=0.2.0.1
, optics-core ^>=0.4
, optics-extra ^>=0.4
- , optparse-applicative ^>=0.17.0.0
+ , optparse-applicative >=0.17.0.0 && < 0.20
......@@ -6,88 +6,15 @@ rec {
inherit pkgs;
ghc966 = pkgs.haskell.compiler.ghc966;
cabal_install = pkgs.haskell.lib.compose.justStaticExecutables pkgs.haskell.packages.ghc966.cabal-install;
graphviz_dev = pkgs.graphviz.overrideAttrs (finalAttrs: previousAttrs: {
version = "11.0.0~dev";
src = pkgs.fetchFromGitLab {
owner = "graphviz";
repo = "graphviz";
rev = "f3ec849249ef9cb824feb7f97449d7159e1dcb4e"; # head as of 2024-03-25, see gargantext#329
hash = "sha256-s86IqWz6zeKbcRqpV3cVQBVviHbhUSX1U8GVuJBfjC4=";
};
});
graphviz = graphviz_dev.overrideAttrs (finalAttrs: previousAttrs: {
# Increase the YY_BUF_SIZE, see https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/290#note_9015
patches = [
(pkgs.fetchpatch {
url = "https://gist.githubusercontent.com/adinapoli/e93ca7b1d714d27f4af537716b03e3bb/raw/b9cc297c3465878da2d18ee92a3f9b8273923493/graphviz-yy-buf-size.patch";
sha256 = "sha256-8Q3tf37iYaPV50P+Vf/n263ordECiu5eKwONCy3ynV8=";
})
];
});
igraph_0_10_4 = pkgs.igraph.overrideAttrs (finalAttrs: previousAttrs: {
version = "0.10.4";
nativeBuildInputs = previousAttrs.nativeBuildInputs;
src = pkgs.fetchFromGitHub {
owner = "igraph";
repo = "igraph";
rev = "0.10.4";
hash = "sha256-LsTOxUktGZcp46Ec9QH3+9C+VADMYTZZCjKF1gp36xk=";
};
postPatch = ''
echo "0.10.4" > IGRAPH_VERSION
'';
outputs = [ "dev" "out" "doc" ];
buildInputs = [
pkgs.arpack
pkgs.blas
pkgs.glpk
pkgs.gmp
pkgs.lapack
pkgs.libxml2
pkgs.nanomsg
pkgs.plfit
pkgs.llvmPackages.openmp
];
cmakeFlags = [
"-DIGRAPH_USE_INTERNAL_BLAS=OFF"
"-DIGRAPH_USE_INTERNAL_LAPACK=OFF"
"-DIGRAPH_USE_INTERNAL_ARPACK=OFF"
"-DIGRAPH_USE_INTERNAL_GLPK=OFF"
"-DIGRAPH_USE_INTERNAL_GMP=OFF"
"-DIGRAPH_USE_INTERNAL_PLFIT=OFF"
"-DIGRAPH_GLPK_SUPPORT=ON"
"-DIGRAPH_GRAPHML_SUPPORT=OFF"
"-DIGRAPH_OPENMP_SUPPORT=ON"
"-DIGRAPH_ENABLE_LTO=AUTO"
"-DIGRAPH_ENABLE_TLS=ON"
"-DBUILD_SHARED_LIBS=ON"
"-DCMAKE_INSTALL_PREFIX=${placeholder "out"}"
"-DCMAKE_INSTALL_LIBDIR=${placeholder "out"}/lib"
"-DCMAKE_INSTALL_DATADIR=${placeholder "out"}/share"
];
doCheck = false;
postInstall = ''
mkdir -p "$out/share"
cp -r doc "$out/share"
'';
postFixup = previousAttrs.postFixup + ''
echo "Copying files where they belong .."
CUR_DIR=$PWD
cd "$dev/include/igraph" && cp *.h ../
cd $CUR_DIR
'';
graphviz = pkgs.callPackage ./graphviz.nix {};
igraph_0_10_4 = pkgs.callPackage ./igraph.nix {};
corenlp = pkgs.callPackage ./corenlp.nix { }; # 4.5.8
cabal2stack = pkgs.callPackage ./cabal2stack.nix { ghc = ghc966; };
nng_notls = pkgs.nng.overrideAttrs (old: {
cmakeFlags = (old.cmakeFlags or []) ++ [ "-DNNG_ENABLE_TLS=OFF" ];
});
hsBuildInputs = [
ghc966
cabal_install
......@@ -96,32 +23,36 @@ rec {
pkgs.haskellPackages.pretty-show
];
nonhsBuildInputs = with pkgs; [
#haskell-language-server
blas
bzip2
cabal2stack
corenlp
curl
czmq
docker-compose
expat
gfortran
git
gmp
graphviz
gsl
#haskell-language-server
hlint
libffi
icu
igraph_0_10_4
jre
lapack
xz
libffi
libpqxx
libsodium
nng_notls
nil # nix language server
pcre
pkg-config
postgresql
stdenv.cc.cc
xz
zlib
blas
gfortran
expat
icu
graphviz
gcc13
igraph_0_10_4
libpqxx
libsodium
nanomsg
zeromq
curl
] ++ ( lib.optionals stdenv.isDarwin [
......
......@@ -70,29 +70,30 @@ import System.Cron.Schedule qualified as Cron
-- | startGargantext takes as parameters port number and Toml file.
startGargantext :: Mode -> PortNumber -> SettingsFile -> IO ()
startGargantext mode port sf@(SettingsFile settingsFile) = withLoggerIO mode $ \logger -> do
startGargantext mode port sf@(SettingsFile settingsFile) = do
config <- readConfig sf <&> (gc_frontend_config . fc_appPort) .~ port
when (port /= config ^. gc_frontend_config . fc_appPort) $
panicTrace "TODO: conflicting settings of port"
withNotifications config $ \dispatcher -> do
env <- newEnv logger config dispatcher
let fc = env ^. env_config . gc_frontend_config
let proxyStatus = microServicesProxyStatus fc
runDbCheck env
startupInfo config port proxyStatus
app <- makeApp env
mid <- makeGargMiddleware (fc ^. fc_cors) mode
periodicActions <- schedulePeriodicActions env
let runServer = run port (mid app) `finally` stopGargantext periodicActions
case proxyStatus of
PXY_disabled
-> runServer -- the proxy is disabled, do not spawn the application
PXY_enabled proxyPort
-> do
proxyCache <- InMemory.newCache (Just oneHour)
let runProxy = run proxyPort (mid (microServicesProxyApp proxyCache env))
Async.race_ runServer runProxy
withLoggerIO (config ^. gc_logging) $ \logger -> do
when (port /= config ^. gc_frontend_config . fc_appPort) $
panicTrace "TODO: conflicting settings of port"
withNotifications config $ \dispatcher -> do
env <- newEnv logger config dispatcher
let fc = env ^. env_config . gc_frontend_config
let proxyStatus = microServicesProxyStatus fc
runDbCheck env
startupInfo config port proxyStatus
app <- makeApp env
mid <- makeGargMiddleware (fc ^. fc_cors) mode
periodicActions <- schedulePeriodicActions env
let runServer = run port (mid app) `finally` stopGargantext periodicActions
case proxyStatus of
PXY_disabled
-> runServer -- the proxy is disabled, do not spawn the application
PXY_enabled proxyPort
-> do
proxyCache <- InMemory.newCache (Just oneHour)
let runProxy = run proxyPort (mid (microServicesProxyApp proxyCache env))
Async.race_ runServer runProxy
where runDbCheck env = do
r <- runExceptT (runReaderT DB.dbCheck env) `catch`
......
......@@ -38,12 +38,11 @@ module Gargantext.API.Admin.EnvTypes (
import Control.Lens (to, view)
import Data.List ((\\))
import Data.Pool (Pool)
import Data.Text qualified as T
import Database.PostgreSQL.Simple (Connection)
import Gargantext.API.Admin.Orchestrator.Types (JobLog, noJobLog)
import Gargantext.API.Errors.Types (BackendInternalError)
import Gargantext.API.Prelude (GargM, IsGargServer)
import Gargantext.Core.Config (GargConfig(..), gc_mail_config, gc_nlp_config, HasJWTSettings(..), HasConfig(..), HasManager(..), gc_logging, lc_log_level)
import Gargantext.Core.Config
import Gargantext.Core.Mail.Types (HasMail, mailSettings)
import Gargantext.Core.NLP (HasNLPServer(..), nlpServerMap)
import Gargantext.Core.NodeStory
......@@ -58,6 +57,7 @@ import Gargantext.Utils.Jobs.Monad (MonadJobStatus(..))
import Network.HTTP.Client (Manager)
import Servant.Auth.Server (JWTSettings)
import System.Log.FastLogger qualified as FL
import Gargantext.System.Logging.Loggers
data Mode = Dev | Mock | Prod
......@@ -139,21 +139,13 @@ instance MonadLogger (GargM DevEnv BackendInternalError) where
instance HasLogger (GargM DevEnv BackendInternalError) where
data instance Logger (GargM DevEnv BackendInternalError) =
GargDevLogger {
dev_logger_mode :: Mode
, dev_logger_set :: FL.LoggerSet
}
type instance LogInitParams (GargM DevEnv BackendInternalError) = Mode
GargDevLogger { _GargDevLogger :: MonadicStdLogger FL.LogStr IO }
type instance LogInitParams (GargM DevEnv BackendInternalError) = LogConfig
type instance LogPayload (GargM DevEnv BackendInternalError) = FL.LogStr
initLogger = \mode -> do
dev_logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize
pure $ GargDevLogger mode dev_logger_set
destroyLogger = \GargDevLogger{..} -> liftIO $ FL.rmLoggerSet dev_logger_set
logMsg = \(GargDevLogger mode logger_set) lvl msg -> do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
initLogger cfg = fmap GargDevLogger $ (liftIO $ monadicStdLogger cfg)
destroyLogger = liftIO . _msl_destroy . _GargDevLogger
logMsg (GargDevLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
logTxt (GargDevLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
data DevEnv = DevEnv
{ _dev_env_config :: !GargConfig
......@@ -225,25 +217,14 @@ instance HasNLPServer DevEnv where
instance IsGargServer Env BackendInternalError (GargM Env BackendInternalError)
instance HasLogger (GargM Env BackendInternalError) where
data instance Logger (GargM Env BackendInternalError) =
GargLogger {
logger_mode :: Mode
, logger_set :: FL.LoggerSet
}
type instance LogInitParams (GargM Env BackendInternalError) = Mode
newtype instance Logger (GargM Env BackendInternalError) =
GargLogger { _GargLogger :: MonadicStdLogger FL.LogStr IO }
type instance LogInitParams (GargM Env BackendInternalError) = LogConfig
type instance LogPayload (GargM Env BackendInternalError) = FL.LogStr
initLogger mode = do
logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize
pure $ GargLogger mode logger_set
destroyLogger (GargLogger{..}) = liftIO $ FL.rmLoggerSet logger_set
logMsg (GargLogger mode logger_set) lvl msg = do
cfg <- view hasConfig
let minLvl = cfg ^. gc_logging . lc_log_level
when (lvl >= minLvl) $ do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
initLogger cfg = fmap GargLogger $ (liftIO $ monadicStdLogger cfg)
destroyLogger = liftIO . _msl_destroy . _GargLogger
logMsg (GargLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
logTxt (GargLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
instance MonadLogger (GargM Env BackendInternalError) where
getLogger = asks _env_logger
......@@ -266,10 +266,11 @@ nodeWriteChecks nid =
-- if:
-- * He/she is a super user
-- * He/she owns the target or the source
-- * The node has been shared with the user
moveChecks :: SourceId -> TargetId -> BoolExpr AccessCheck
moveChecks (SourceId sourceId) (TargetId targetId) =
BAnd (nodeUser sourceId `BOr` nodeSuper sourceId)
(nodeUser targetId `BOr` nodeUser targetId)
BAnd (nodeUser sourceId `BOr` nodeSuper sourceId `BOr` nodeShared sourceId)
(nodeUser targetId `BOr` nodeUser targetId `BOr` nodeShared targetId)
publishChecks :: NodeId -> BoolExpr AccessCheck
publishChecks nodeId =
......
......@@ -16,11 +16,11 @@ import Control.Lens (view)
import Control.Monad (fail)
import Database.PostgreSQL.Simple qualified as PGS
import Data.Pool (withResource)
import Gargantext.API.Admin.EnvTypes ( DevEnv(..), Mode(Dev) )
import Gargantext.API.Admin.EnvTypes ( DevEnv(..) )
import Gargantext.API.Admin.Settings ( newPool )
import Gargantext.API.Errors.Types ( BackendInternalError )
import Gargantext.API.Prelude ( GargM )
import Gargantext.Core.Config (_gc_database_config)
import Gargantext.Core.Config (_gc_database_config, gc_logging)
import Gargantext.Core.Config.Types (SettingsFile(..))
import Gargantext.Core.Config.Utils (readConfig)
import Gargantext.Core.NodeStory (fromDBNodeStoryEnv)
......@@ -32,14 +32,14 @@ import Servant ( ServerError )
-------------------------------------------------------------------
withDevEnv :: SettingsFile -> (DevEnv -> IO a) -> IO a
withDevEnv settingsFile k = withLoggerIO Dev $ \logger -> do
env <- newDevEnv logger
k env -- `finally` cleanEnv env
withDevEnv settingsFile k = do
cfg <- readConfig settingsFile
withLoggerIO (cfg ^. gc_logging) $ \logger -> do
env <- newDevEnv logger cfg
k env -- `finally` cleanEnv env
where
newDevEnv logger = do
cfg <- readConfig settingsFile
--nodeStory_env <- fromDBNodeStoryEnv (_gc_repofilepath cfg)
newDevEnv logger cfg = do
pool <- newPool (_gc_database_config cfg)
nodeStory_env <- fromDBNodeStoryEnv pool
manager <- newTlsManager
......
......@@ -22,8 +22,8 @@ import Data.ByteString.Lazy qualified as BSL
import Data.Csv qualified as Tsv
import Data.HashMap.Strict (HashMap)
import Data.HashMap.Strict qualified as HashMap
import Data.Map.Strict qualified as Map
import Data.Map.Strict (toList)
import Data.Map.Strict qualified as Map
import Data.Set qualified as Set
import Data.Text (concat, pack, splitOn)
import Data.Text.Encoding qualified as TE
......@@ -32,13 +32,13 @@ import Data.Vector (Vector)
import Database.PostgreSQL.Simple.LargeObjects qualified as PSQL
import Gargantext.API.Admin.EnvTypes (Env)
import Gargantext.API.Errors.Types (BackendInternalError(InternalServerError))
import Gargantext.API.Ngrams.List.Types (_wjf_data, _wtf_data)
import Gargantext.API.Ngrams.Prelude (getNgramsList)
import Gargantext.API.Ngrams (setListNgrams)
import Gargantext.API.Ngrams.List.Types (_wjf_data, _wtf_data, _wtf_name)
import Gargantext.API.Ngrams.Prelude (getNgramsList)
import Gargantext.API.Ngrams.Types
import Gargantext.API.Prelude (GargM, serverError, HasServerError)
import Gargantext.API.Routes.Named.List qualified as Named
import Gargantext.API.Worker (serveWorkerAPIm)
import Gargantext.API.Worker (serveWorkerAPIM)
import Gargantext.Core.NodeStory.Types ( HasNodeStory )
import Gargantext.Core.Text.Ngrams (Ngrams, NgramsType(NgramsTerms))
import Gargantext.Core.Types.Main (ListType(..))
......@@ -114,7 +114,7 @@ getTsv lId = do
------------------------------------------------------------------------
jsonPostAsync :: Named.JSONAPI (AsServerT (GargM Env BackendInternalError))
jsonPostAsync = Named.JSONAPI {
updateListJSONEp = \lId -> serveWorkerAPIm $ \p -> do
updateListJSONEp = \lId -> serveWorkerAPIM $ \p -> do
(PSQL.Oid oId) <- createLargeObject $ TE.encodeUtf8 $ _wjf_data p
pure $ Jobs.JSONPost { _jp_list_id = lId
, _jp_ngrams_oid = fromIntegral oId }
......@@ -164,14 +164,14 @@ tsvAPI = tsvPostAsync
tsvPostAsync :: Named.TSVAPI (AsServerT (GargM Env BackendInternalError))
tsvPostAsync =
Named.TSVAPI {
updateListTSVEp = \lId -> serveWorkerAPIm $ \p -> do
updateListTSVEp = \lId -> serveWorkerAPIM $ \p -> do
$(logLocM) DEBUG $ "Started to upload " <> (_wtf_name p)
case ngramsListFromTSVData (_wtf_data p) of
Left err -> throwError $ InternalServerError $ err500 { errReasonPhrase = err }
Right ngramsList -> do
(PSQL.Oid oId) <- createLargeObject $ BSL.toStrict $ Aeson.encode ngramsList
pure $ Jobs.JSONPost { _jp_list_id = lId
, _jp_ngrams_oid = fromIntegral oId }
-- , _jp_ngrams_list = ngramsList }
}
-- | Tries converting a text file into an 'NgramList', so that we can reuse the
......@@ -197,6 +197,9 @@ tsvToNgramsTableMap :: Tsv.Record -> Tsv.Parser (Maybe NgramsTableMap)
tsvToNgramsTableMap record = case Vec.toList record of
(map P.decodeUtf8 -> [status, label, forms])
-> pure $ Just $ conv status label forms
-- Garg #381: alias the forms to the empty text.
(map P.decodeUtf8 -> [status, label])
-> pure $ Just $ conv status label mempty
-- WARNING: This silently ignores errors (#433)
_ -> pure Nothing
......
......@@ -29,7 +29,7 @@ import Gargantext.API.Node.Types (NewWithForm(..), NewWithTempFile(..))
import Gargantext.API.Prelude (GargServer, GargM)
import Gargantext.API.Routes.Named.Annuaire qualified as Named
import Gargantext.API.Routes.Named.Corpus qualified as Named
import Gargantext.API.Worker (serveWorkerAPI, serveWorkerAPIm)
import Gargantext.API.Worker (serveWorkerAPI, serveWorkerAPIM)
import Gargantext.Core.Types.Individu (User(..))
import Gargantext.Core.Worker.Jobs.Types qualified as Jobs
import Gargantext.Database.Prelude (createLargeObject)
......@@ -78,7 +78,7 @@ addWithTempFileApi :: AuthenticatedUser
addWithTempFileApi authenticatedUser =
Named.AddWithTempFile {
addWithTempFileEp = \cId ->
serveWorkerAPIm $ \(NewWithForm { .. }) -> do
serveWorkerAPIM $ \(NewWithForm { .. }) -> do
let bs = case _wf_fileformat of
Plain -> cs _wf_data
ZIP -> case BSB64.decode $ TE.encodeUtf8 _wf_data of
......
......@@ -32,7 +32,19 @@ data WorkerAPI contentType input mode = WorkerAPI
serveWorkerAPI :: IsGargServer env err m
=> (input -> Job)
-> WorkerAPI contentType input (AsServerT m)
serveWorkerAPI f = serveWorkerAPIm (pure . f)
serveWorkerAPI f = serveWorkerAPIM (pure . f)
serveWorkerAPIM :: IsGargServer env err m
=> (input -> m Job)
-> WorkerAPI contentType input (AsServerT m)
serveWorkerAPIM mkJob = WorkerAPI { workerAPIPost }
where
workerAPIPost i = do
job <- mkJob i
logM DEBUG $ "[serveWorkerAPI] sending job " <> show job
mId <- sendJob job
pure $ JobInfo { _ji_message_id = mId
, _ji_mNode_id = getWorkerMNodeId job }
serveWorkerAPIEJob :: (MonadError err m, IsGargServer env err m)
=> (input -> Either err Job)
......@@ -47,15 +59,3 @@ serveWorkerAPIEJob f = WorkerAPI { workerAPIPost }
mId <- sendJob job
pure $ JobInfo { _ji_message_id = mId
, _ji_mNode_id = getWorkerMNodeId job }
serveWorkerAPIm :: IsGargServer env err m
=> (input -> m Job)
-> WorkerAPI contentType input (AsServerT m)
serveWorkerAPIm f = WorkerAPI { workerAPIPost }
where
workerAPIPost i = do
job <- f i
logM DEBUG $ "[serveWorkerAPIm] sending job " <> show job
mId <- sendJob job
pure $ JobInfo { _ji_message_id = mId
, _ji_mNode_id = getWorkerMNodeId job }
......@@ -16,37 +16,27 @@ module Gargantext.Core.Config.Utils (
import Gargantext.Core.Config.Types (SettingsFile(..))
import Gargantext.Prelude
-- import Network.URI (URI)
-- import Network.URI (parseURI)
import Toml
import Toml.Schema
import Gargantext.Core.Config
import System.Environment (lookupEnv)
import Gargantext.System.Logging.Types (parseLogLevel)
import qualified Data.Text as T
readConfig :: FromValue a => SettingsFile -> IO a
readConfig :: SettingsFile -> IO GargConfig
readConfig (SettingsFile fp) = do
c <- readFile fp
case decode c of
Failure err -> panicTrace ("Error reading TOML file: " <> show err)
Success _ r -> return r
-- _URI :: Toml.TomlBiMap URI Text
-- _URI = Toml.BiMap (Right . show) parseURI'
-- where
-- parseURI' :: Text -> Either Toml.TomlBiMapError URI
-- parseURI' t =
-- case parseURI (T.unpack t) of
-- Nothing -> Left $ Toml.ArbitraryError "Cannot parse URI"
-- Just u -> Right u
-- uriToml :: Toml.Key -> Toml.TomlCodec URI
-- uriToml = Toml.match (_URI >>> Toml._Text)
-- _Word16 :: Toml.TomlBiMap Word16 Toml.AnyValue
-- _Word16 = Toml._BoundedInteger >>> Toml._Integer
-- word16Toml :: Toml.Key -> Toml.TomlCodec Word16
-- word16Toml = Toml.match _Word16
Success _ r -> do
-- Ovverride the log level based on the GGTX_LOG_LEVEL (if set)
mLvl <- lookupEnv "GGTX_LOG_LEVEL"
case mLvl of
Nothing -> pure r
Just s ->
case parseLogLevel (T.pack s) of
Left err -> do
putStrLn $ "unknown log level " <> s <> ": " <> T.unpack err <> " , ignoring GGTX_LOG_LEVEL"
pure r
Right lvl' -> pure $ r & gc_logging . lc_log_level .~ lvl'
......@@ -14,6 +14,8 @@ https://dev.sub.gargantext.org/#/share/Notes/187918
-}
{-# LANGUAGE TemplateHaskell #-}
module Gargantext.Core.Notifications.CentralExchange (
gServer
, notify
......@@ -29,8 +31,8 @@ import Gargantext.Core.Config (GargConfig, gc_notifications_config, gc_logging)
import Gargantext.Core.Config.Types (NotificationsConfig(..))
import Gargantext.Core.Notifications.CentralExchange.Types
import Gargantext.Prelude
import Gargantext.System.Logging (LogLevel(..), withLogger, logMsg)
import Nanomsg (Pull(..), Push(..), bind, connect, recv, send, withSocket)
import Gargantext.System.Logging (LogLevel(..), withLogger, logLoc, Logger)
import Nanomsg (Pull(..), Push(..), bind, connect, recv, send, withSocket, shutdown, Socket, Sender)
import System.Timeout (timeout)
{-
......@@ -46,31 +48,31 @@ with many users having updates.
-}
gServer :: GargConfig -> IO ()
gServer :: HasCallStack => GargConfig -> IO ()
gServer cfg = do
withSocket Pull $ \s -> do
withSocket Push $ \s_dispatcher -> do
withLogger log_cfg $ \ioLogger -> do
logMsg ioLogger DEBUG $ "[central_exchange] binding to " <> T.unpack _nc_central_exchange_bind
_ <- bind s $ T.unpack _nc_central_exchange_bind
withLogger log_cfg $ \ioLogger -> do
logMsg ioLogger DEBUG $ "[central_exchange] connecting to " <> T.unpack _nc_dispatcher_bind
_ <- connect s_dispatcher $ T.unpack _nc_dispatcher_connect
tChan <- TChan.newTChanIO
-- | We have 2 threads: one that listens for nanomsg messages
-- | and puts them on the 'tChan' and the second one that reads
-- | the 'tChan' and calls Dispatcher accordingly. This is to
-- | make reading nanomsg as fast as possible.
void $ Async.concurrently (worker s_dispatcher tChan) $ do
withLogger log_cfg $ \ioLogger -> do
forever $ do
-- putText "[central_exchange] receiving"
r <- recv s
logMsg ioLogger DEBUG $ "[central_exchange] received: " <> show r
-- C.putStrLn $ "[central_exchange] " <> r
atomically $ TChan.writeTChan tChan r
withLogger log_cfg $ \ioLogger -> do
withSocket Pull $ \s -> do
withSocket Push $ \s_dispatcher -> do
$(logLoc) ioLogger DEBUG $ "[central_exchange] binding to " <> _nc_central_exchange_bind
bindEndpoint <- bind s $ T.unpack _nc_central_exchange_bind
$(logLoc) ioLogger DEBUG $ "[central_exchange] bound to " <> show bindEndpoint
$(logLoc) ioLogger DEBUG $ "[central_exchange] connecting to " <> _nc_dispatcher_bind
dispatchEndpoint <- connect s_dispatcher $ T.unpack _nc_dispatcher_connect
$(logLoc) ioLogger DEBUG $ "[central_exchange] connected to " <> show dispatchEndpoint
tChan <- TChan.newTChanIO
-- | We have 2 threads: one that listens for nanomsg messages
-- | and puts them on the 'tChan' and the second one that reads
-- | the 'tChan' and calls Dispatcher accordingly. This is to
-- | make reading nanomsg as fast as possible.
void $ Async.concurrently (worker s_dispatcher tChan) $ do
forever $ do
$(logLoc) ioLogger DEBUG $ "[central_exchange] receiving"
r <- recv s
$(logLoc) ioLogger DEBUG $ "[central_exchange] received: " <> show r
-- C.putStrLn $ "[central_exchange] " <> r
atomically $ TChan.writeTChan tChan r
where
NotificationsConfig{..} = cfg ^. gc_notifications_config
log_cfg = cfg ^. gc_logging
......@@ -80,7 +82,7 @@ gServer cfg = do
r <- atomically $ TChan.readTChan tChan
case Aeson.decode (BSL.fromStrict r) of
Just (UpdateTreeFirstLevel _node_id) -> do
-- logMsg ioLogger DEBUG $ "[central_exchange] update tree: " <> show node_id
-- $(logLoc) ioLogger DEBUG $ "[central_exchange] update tree: " <> show node_id
-- putText $ "[central_exchange] sending that to the dispatcher: " <> show node_id
-- To make this more robust, use withAsync so we don't
-- block the main thread (send is blocking)
......@@ -97,27 +99,43 @@ gServer cfg = do
-- process, independent of the server.
-- send the same message that we received
-- void $ sendNonblocking s_dispatcher r
void $ timeout 100_000 $ send s_dispatcher r
sendTimeout ioLogger s_dispatcher r
Just (UpdateWorkerProgress _ji _jl) -> do
-- logMsg ioLogger DEBUG $ "[central_exchange] update worker progress: " <> show ji <> ", " <> show jl
void $ timeout 100_000 $ send s_dispatcher r
-- $(logLoc) ioLogger DEBUG $ "[central_exchange] update worker progress: " <> show ji <> ", " <> show jl
sendTimeout ioLogger s_dispatcher r
Just Ping -> do
void $ timeout 100_000 $ send s_dispatcher r
sendTimeout ioLogger s_dispatcher r
Nothing ->
logMsg ioLogger ERROR $ "[central_exchange] cannot decode message: " <> show r
notify :: GargConfig -> CEMessage -> IO ()
notify cfg ceMessage = do
$(logLoc) ioLogger ERROR $ "[central_exchange] cannot decode message: " <> show r
-- | A static send timeout in microseconds.
send_timeout_us :: Int
send_timeout_us = 50_000
-- | Sends the given payload ensure the send doesn't take more than the static
-- 'send_timeout_ns', logging a message if the timeouts kicks in.
sendTimeout :: Sender a => Logger IO -> Socket a -> ByteString -> IO ()
sendTimeout ioLogger sock payload = withFrozenCallStack $ do
timeoutKickedIn <- timeout send_timeout_us $ send sock $ payload
case timeoutKickedIn of
Nothing ->
$(logLoc) ioLogger ERROR $ "[central_exchange] couldn't send msg in timely fashion."
Just () ->
$(logLoc) ioLogger DEBUG $ "[central_exchange] message sent."
notify :: HasCallStack => GargConfig -> CEMessage -> IO ()
notify cfg ceMessage = withLogger log_cfg $ \ioLogger -> do
Async.withAsync (pure ()) $ \_ -> do
withSocket Push $ \s -> do
_ <- connect s $ T.unpack _nc_central_exchange_connect
let str = Aeson.encode ceMessage
withLogger log_cfg $ \ioLogger ->
logMsg ioLogger DEBUG $ "[central_exchange] sending: " <> (T.unpack $ TE.decodeUtf8 $ BSL.toStrict str)
-- err <- sendNonblocking s $ BSL.toStrict str
-- putText $ "[notify] err: " <> show err
void $ timeout 100_000 $ send s $ BSL.toStrict str
connectEndpoint <- connect s $ T.unpack _nc_central_exchange_connect
let do_work = do
let str = Aeson.encode ceMessage
$(logLoc) ioLogger DEBUG $ "[central_exchange] sending to " <> _nc_central_exchange_connect
$(logLoc) ioLogger DEBUG $ "[central_exchange] sending: " <> (TE.decodeUtf8 $ BSL.toStrict str)
-- err <- sendNonblocking s $ BSL.toStrict str
-- putText $ "[notify] err: " <> show err
sendTimeout ioLogger s (BSL.toStrict str)
do_work `finally` shutdown s connectEndpoint
where
NotificationsConfig { _nc_central_exchange_connect } = cfg ^. gc_notifications_config
log_cfg = cfg ^. gc_logging
......@@ -14,6 +14,8 @@ https://dev.sub.gargantext.org/#/share/Notes/187918
-}
{-# LANGUAGE TemplateHaskell #-}
module Gargantext.Core.Notifications.Dispatcher (
Dispatcher -- opaque
, withDispatcher
......@@ -34,11 +36,11 @@ import Gargantext.Core.Notifications.CentralExchange.Types qualified as CETypes
import Gargantext.Core.Notifications.Dispatcher.Types
import Gargantext.Core.Worker.Types (JobInfo(..))
import Gargantext.Prelude
import Gargantext.System.Logging (LogLevel(..), withLogger, logMsg)
import Nanomsg (Pull(..), bind, recv, withSocket)
import Network.WebSockets qualified as WS
import StmContainers.Set qualified as SSet
import Gargantext.Core.Config
import Gargantext.System.Logging
{-
......@@ -92,20 +94,18 @@ dispatcherListener config subscriptions = do
where
NotificationsConfig { _nc_dispatcher_bind } = config ^. gc_notifications_config
log_cfg = config ^. gc_logging
worker tChan throttleTChan = do
-- tId <- myThreadId
worker tChan throttleTChan = withLogger log_cfg $ \ioL -> do
tId <- myThreadId
forever $ do
r <- atomically $ TChan.readTChan tChan
-- putText $ "[" <> show tId <> "] received a message: " <> decodeUtf8 r
$(logLoc) ioL DEBUG $ "[" <> show tId <> "] received a message: " <> decodeUtf8 r
case Aeson.decode (BSL.fromStrict r) of
Nothing ->
withLogger log_cfg $ \ioL ->
logMsg ioL DEBUG "[dispatcher_listener] unknown message from central exchange"
Just ceMessage -> do
withLogger log_cfg $ \ioL ->
logMsg ioL DEBUG $ "[dispatcher_listener] received " <> show ceMessage
logMsg ioL DEBUG $ "[dispatcher_listener] received " <> show ceMessage
-- subs <- atomically $ readTVar subscriptions
filteredSubs <- atomically $ do
let subs' = UnfoldlM.filter (pure . ceMessageSubPred ceMessage) $ SSet.unfoldlM subscriptions
......
......@@ -11,11 +11,11 @@ https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/341
Docs:
https://dev.sub.gargantext.org/#/share/Notes/187918
-}
{-# OPTIONS_GHC -fno-warn-unused-matches -fno-warn-unused-imports #-}
module Gargantext.Core.Notifications.Dispatcher.Types where
import Codec.Binary.UTF8.String qualified as CBUTF8
......@@ -120,7 +120,7 @@ instance ToJSON Topic where
-- pure $ MJobLog jl
data ConnectedUser =
CUUser UserId
| CUPublic
......@@ -128,7 +128,7 @@ data ConnectedUser =
instance Hashable ConnectedUser where
hashWithSalt salt (CUUser userId) = hashWithSalt salt ("cuuser" :: Text, userId)
hashWithSalt salt CUPublic = hashWithSalt salt ("cupublic" :: Text)
newtype WSKeyConnection = WSKeyConnection (ByteString, WS.Connection)
instance Hashable WSKeyConnection where
hashWithSalt salt (WSKeyConnection (key, _conn)) = hashWithSalt salt key
......@@ -142,7 +142,7 @@ wsKey :: WSKeyConnection -> ByteString
wsKey (WSKeyConnection (key, _conn)) = key
wsConn :: WSKeyConnection -> WS.Connection
wsConn (WSKeyConnection (_key, conn)) = conn
data Subscription =
Subscription {
s_connected_user :: ConnectedUser
......@@ -158,7 +158,7 @@ subKey sub = wsKey $ s_ws_key_connection sub
type Token = Text
{-
We accept requests for subscription/unsubscription via websocket.
......@@ -200,7 +200,7 @@ instance ToJSON WSRequest where
toJSON (WSAuthorize token) = Aeson.object [ "request" .= ( "authorize" :: Text )
, "token" .= token ]
toJSON WSDeauthorize = Aeson.object [ "request" .= ( "deauthorize" :: Text ) ]
class HasDispatcher env dispatcher where
hasDispatcher :: Getter env dispatcher
......
......@@ -100,6 +100,7 @@ wsLoop log_cfg jwtS subscriptions ws = flip finally disconnect $ do
where
wsLoop' user ioLogger = do
dm <- WS.receiveDataMessage (wsConn ws)
logMsg ioLogger DEBUG $ "[wsLoop'] handling new message.."
newUser <- case dm of
WS.Text dm' _ -> do
......@@ -113,8 +114,8 @@ wsLoop log_cfg jwtS subscriptions ws = flip finally disconnect $ do
let sub = Subscription { s_connected_user = user
, s_ws_key_connection = ws
, s_topic = topic }
_ss <- insertSubscription subscriptions sub
-- putText $ "[wsLoop] subscriptions: " <> show (showSub <$> ss)
insertSubscription subscriptions sub
logMsg ioLogger DEBUG $ "[wsLoop] added subscription: " <> show sub
return user
Just (WSUnsubscribe topic) -> do
logMsg ioLogger DEBUG $ "[wsLoop'] unsubscribe topic " <> show topic
......
......@@ -54,7 +54,9 @@ data TsvList = TsvList
instance FromNamedRecord TsvList where
parseNamedRecord r = TsvList <$> r .: "status"
<*> r .: "label"
<*> r .: "forms"
-- Issue #381: be lenient in the forms
-- field, if missing, default to the empty text.
<*> (fromMaybe mempty <$> r .: "forms")
instance ToNamedRecord TsvList where
toNamedRecord (TsvList s l f) =
......
......@@ -25,15 +25,13 @@ import Control.Lens.TH
import Control.Monad.Trans.Control (MonadBaseControl)
import Data.Maybe (fromJust)
import Data.Pool qualified as Pool
import Data.Text qualified as T
import Database.PostgreSQL.Simple qualified as PSQL
import Gargantext.API.Admin.EnvTypes (Mode(Dev), modeToLoggingLevels)
import Gargantext.API.Admin.Orchestrator.Types (JobLog, noJobLog)
import Gargantext.API.Job (RemainingSteps(..), jobLogStart, jobLogProgress, jobLogFailures, jobLogComplete, addErrorEvent, jobLogFailTotal, jobLogFailTotalWithMessage, jobLogAddMore)
import Gargantext.API.Prelude (GargM)
import Gargantext.Core.Notifications.CentralExchange qualified as CE
import Gargantext.Core.Notifications.CentralExchange.Types qualified as CET
import Gargantext.Core.Config (GargConfig(..), HasConfig(..), gc_logging)
import Gargantext.Core.Config (GargConfig(..), HasConfig(..), gc_logging, LogConfig)
import Gargantext.Core.Config.Mail qualified as Mail
import Gargantext.Core.Config.Utils (readConfig)
import Gargantext.Core.Config.Types (SettingsFile(..))
......@@ -51,6 +49,7 @@ import Gargantext.Utils.Jobs.Monad ( MonadJobStatus(..), JobHandle )
import GHC.IO.Exception (IOException(..), IOErrorType(OtherError))
import Prelude qualified
import System.Log.FastLogger qualified as FL
import Gargantext.System.Logging.Loggers
data WorkerEnv = WorkerEnv
......@@ -71,13 +70,14 @@ data WorkerJobState = WorkerJobState
withWorkerEnv :: SettingsFile -> (WorkerEnv -> IO a) -> IO a
withWorkerEnv settingsFile k = withLoggerIO Dev $ \logger -> do
env <- newWorkerEnv logger
k env -- `finally` cleanEnv env
withWorkerEnv settingsFile k = do
cfg <- readConfig settingsFile
withLoggerIO (cfg ^. gc_logging) $ \logger -> do
env <- newWorkerEnv logger cfg
k env -- `finally` cleanEnv env
where
newWorkerEnv logger = do
cfg <- readConfig settingsFile
newWorkerEnv logger cfg = do
--nodeStory_env <- fromDBNodeStoryEnv (_gc_repofilepath cfg)
-- pool <- newPool $ _gc_database_config cfg
let dbConfig = _gc_database_config cfg
......@@ -98,22 +98,14 @@ instance HasConfig WorkerEnv where
hasConfig = to _w_env_config
instance HasLogger (GargM WorkerEnv IOException) where
data instance Logger (GargM WorkerEnv IOException) =
GargWorkerLogger {
w_logger_mode :: Mode
, w_logger_set :: FL.LoggerSet
}
type instance LogInitParams (GargM WorkerEnv IOException) = Mode
newtype instance Logger (GargM WorkerEnv IOException) =
GargWorkerLogger { _GargWorkerLogger :: MonadicStdLogger FL.LogStr IO }
type instance LogInitParams (GargM WorkerEnv IOException) = LogConfig
type instance LogPayload (GargM WorkerEnv IOException) = FL.LogStr
initLogger mode = do
w_logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize
pure $ GargWorkerLogger mode w_logger_set
destroyLogger (GargWorkerLogger{..}) = liftIO $ FL.rmLoggerSet w_logger_set
logMsg (GargWorkerLogger mode logger_set) lvl msg = do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
initLogger cfg = fmap GargWorkerLogger $ (liftIO $ monadicStdLogger cfg)
destroyLogger = liftIO . _msl_destroy . _GargWorkerLogger
logMsg (GargWorkerLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
logTxt (GargWorkerLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
instance HasConnectionPool WorkerEnv where
connPool = to _w_env_pool
......@@ -186,29 +178,20 @@ newtype WorkerMonad a =
, CES.MonadMask )
instance HasLogger WorkerMonad where
data instance Logger WorkerMonad =
WorkerMonadLogger {
wm_logger_mode :: Mode
, wm_logger_set :: FL.LoggerSet
}
type instance LogInitParams WorkerMonad = Mode
newtype instance Logger WorkerMonad =
WorkerMonadLogger { _WorkerMonadLogger :: MonadicStdLogger FL.LogStr IO }
type instance LogInitParams WorkerMonad = LogConfig
type instance LogPayload WorkerMonad = FL.LogStr
initLogger mode = do
wm_logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize
pure $ WorkerMonadLogger mode wm_logger_set
destroyLogger (WorkerMonadLogger{..}) = liftIO $ FL.rmLoggerSet wm_logger_set
logMsg (WorkerMonadLogger mode logger_set) lvl msg = do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
initLogger cfg = fmap WorkerMonadLogger $ (liftIO $ monadicStdLogger cfg)
destroyLogger = liftIO . _msl_destroy . _WorkerMonadLogger
logMsg (WorkerMonadLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
logTxt (WorkerMonadLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
instance MonadLogger WorkerMonad where
getLogger = do
env <- ask
let (GargWorkerLogger { .. }) = _w_env_logger env
pure $ WorkerMonadLogger { wm_logger_mode = w_logger_mode
, wm_logger_set = w_logger_set }
let (GargWorkerLogger lgr) = _w_env_logger env
pure $ WorkerMonadLogger lgr
runWorkerMonad :: WorkerEnv -> WorkerMonad a -> IO a
runWorkerMonad env m = do
......
......@@ -36,6 +36,7 @@ module Gargantext.Database.Query.Table.Node
, getParentId
, getUserRootPublicNode
, getUserRootPrivateNode
, getUserRootShareNode
, selectNode
-- * Boolean queries
......@@ -463,6 +464,11 @@ getUserRootPrivateNode :: (HasNodeError err, HasDBid NodeType)
-> DBCmd err (Node HyperdataFolder)
getUserRootPrivateNode = get_user_root_node_folder NodeFolderPrivate
getUserRootShareNode :: (HasNodeError err, HasDBid NodeType)
=> UserId
-> DBCmd err (Node HyperdataFolder)
getUserRootShareNode = get_user_root_node_folder NodeFolderShared
get_user_root_node_folder :: (HasNodeError err, HasDBid NodeType)
=> NodeType
-> UserId
......
......@@ -13,17 +13,15 @@ module Gargantext.System.Logging (
) where
import Gargantext.System.Logging.Types
import Gargantext.System.Logging.Loggers
import Control.Exception.Safe (MonadMask, bracket)
import Control.Monad (when)
import Gargantext.Core.Config (LogConfig(..))
import Control.Monad.IO.Class
import Control.Monad.Trans.Control
import Data.Text qualified as T
import Data.Time.Clock (getCurrentTime)
import Language.Haskell.TH hiding (Type)
import Language.Haskell.TH.Syntax qualified as TH
import Prelude
import System.Environment (lookupEnv)
-- | A variant of 'logTxt' that doesn't require passing an explicit 'Logger'.
......@@ -86,25 +84,10 @@ withLoggerIO params act = bracket (initLogger params) destroyLogger act
-- | A plain logger in the IO monad, waiting for more serious logging solutions like
-- the one described in https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/229
instance HasLogger IO where
data instance Logger IO = IOLogger LogLevel
data instance Logger IO = IOLogger { _IOLogger :: IOStdLogger }
type instance LogInitParams IO = LogConfig
type instance LogPayload IO = String
initLogger LogConfig{..} = do
-- let the env var take precedence over the LogConfig one.
mLvl <- liftIO $ lookupEnv "GGTX_LOG_LEVEL"
lvl <- case mLvl of
Nothing -> pure _lc_log_level
Just s ->
case parseLogLevel (T.pack s) of
Left err -> do
liftIO $ putStrLn $ "unknown log level " <> s <> ": " <> T.unpack err <> " , ignoring GGTX_LOG_LEVEL"
pure $ _lc_log_level
Right lvl' -> pure lvl'
pure $ IOLogger lvl
destroyLogger _ = pure ()
logMsg (IOLogger minLvl) lvl msg = do
t <- getCurrentTime
when (lvl >= minLvl) $ do
let pfx = "[" <> show t <> "] [" <> show lvl <> "] "
putStrLn $ pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (T.unpack msg)
initLogger cfg = fmap IOLogger $ (liftIO $ ioStdLogger cfg)
destroyLogger = liftIO . _iosl_destroy . _IOLogger
logMsg (IOLogger ioLogger) = _iosl_log_msg ioLogger
logTxt (IOLogger ioLogger) lvl msg = liftIO $ _iosl_log_txt ioLogger lvl msg
{-| Canned loggers to avoid reinventing the wheel every time. -}
module Gargantext.System.Logging.Loggers (
ioStdLogger
, IOStdLogger -- opaque, you can't build it directly, use 'ioStdLogger'
, _iosl_log_level
, _iosl_destroy
, _iosl_log_msg
, _iosl_log_txt
, monadicStdLogger
, _msl_log_level
, _msl_destroy
, _msl_log_msg
, _msl_log_txt
, MonadicStdLogger
) where
import Control.Monad
import Control.Monad.IO.Class
import Data.Text qualified as T
import Data.Time
import Gargantext.Core.Config
import Gargantext.System.Logging.Types
import Prelude
import System.Log.FastLogger qualified as FL
data IOStdLogger =
IOStdLogger { _iosl_log_level :: LogLevel
, _iosl_destroy :: IO ()
, _iosl_log_msg :: LogLevel -> String -> IO ()
, _iosl_log_txt :: LogLevel -> T.Text -> IO ()
}
ioStdLogger :: LogConfig -> IO IOStdLogger
ioStdLogger LogConfig{..} = do
let minLvl = _lc_log_level
let log_msg lvl msg = do
t <- getCurrentTime
when (lvl >= minLvl) $ do
let pfx = "[" <> show t <> "] [" <> show lvl <> "] "
putStrLn $ pfx <> msg
pure $ IOStdLogger
{ _iosl_log_level = minLvl
, _iosl_destroy = pure ()
, _iosl_log_msg = log_msg
, _iosl_log_txt = \lvl msg -> log_msg lvl (T.unpack msg)
}
-- | A monadic standard logger powered by fast-logger underneath.
data MonadicStdLogger payload m =
MonadicStdLogger { _msl_log_level :: LogLevel
, _msl_loggers :: [FL.LoggerSet]
, _msl_destroy :: m ()
, _msl_log_msg :: LogLevel -> payload -> m ()
, _msl_log_txt :: LogLevel -> T.Text -> m ()
}
monadicStdLogger :: MonadIO m => LogConfig -> IO (MonadicStdLogger FL.LogStr m)
monadicStdLogger LogConfig{..} = do
let minLvl = _lc_log_level
stdout_logger <- FL.newStderrLoggerSet FL.defaultBufSize
let log_msg lvl msg = liftIO $ do
t <- getCurrentTime
when (lvl >= minLvl) $ do
let pfx = "[" <> show t <> "] [" <> show lvl <> "] "
FL.pushLogStrLn stdout_logger $ FL.toLogStr pfx <> msg
pure $ MonadicStdLogger
{ _msl_log_level = minLvl
, _msl_loggers = [stdout_logger]
, _msl_destroy = liftIO $ FL.rmLoggerSet stdout_logger
, _msl_log_msg = log_msg
, _msl_log_txt = \lvl msg -> log_msg lvl (FL.toLogStr $ T.unpack msg)
}
......@@ -110,6 +110,10 @@
git: "https://github.com/adinapoli/http-reverse-proxy.git"
subdirs:
- .
- commit: 2d69707bf639be2055e3228dab38cc4f2a658111
git: "https://github.com/adinapoli/nanomsg-haskell"
subdirs:
- .
- commit: b9fca8beee0f23c17a6b2001ec834d071709e6e7
git: "https://github.com/alpmestan/hmatrix.git"
subdirs:
......@@ -134,10 +138,6 @@
git: "https://github.com/fpringle/servant-routes.git"
subdirs:
- .
- commit: 5868db564d7d3c4568ccd11c852292b834d26c55
git: "https://github.com/garganscript/nanomsg-haskell"
subdirs:
- .
- commit: bd0592818882f9cf34d2991d01f7dcb3d8bca309
git: "https://github.com/haskell-github-trust/ekg-json"
subdirs:
......@@ -158,7 +158,7 @@
git: "https://gitlab.iscpif.fr/gargantext/crawlers/isidore.git"
subdirs:
- .
- commit: 521ca54f1502b13f629eff2223aaf5007e6d52ec
- commit: 894482ef97eadce6b1c13ebced1edfe394b5be5e
git: "https://gitlab.iscpif.fr/gargantext/crawlers/istex.git"
subdirs:
- .
......@@ -355,7 +355,7 @@ flags:
gargantext:
"enable-benchmarks": false
"no-phylo-debug-logs": true
"test-crypto": false
"test-crypto": true
graphviz:
"test-parsing": false
hashable:
......
status label
map impact-point analysis
map Simulated Destructive Re-Entry Conditions
map Passive Space-Debris Trajectories
map error-proofing mechanisms
map on-orbit life extension
map tether-gripper mechanism
map Field-Programmable Gate Arrays (FPGA)
map self-repair modular robot
map space-debris impact
map self-repairing
map in-orbit servicing
map online self-repairing
map triple-module redundancy systems
map model-based system engineering
map low-thrust orbital transfer
map space-borne orbit debris surveillance
map atmospheric re-entry
map demisable tanks' re-entry
map non-cooperative spacecraft
map model-based approaches
map model-based methods
map impact-induced electrical anomalies
map Low-Cost Deorbit System
map tape-shaped tethers
map self-repair
map self-healing material
map vision-based navigation
map model-based process
status,label
map,impact-point analysis
map,Simulated Destructive Re-Entry Conditions
map,Passive Space-Debris Trajectories
map,error-proofing mechanisms
map,on-orbit life extension
map,tether-gripper mechanism
map,Field-Programmable Gate Arrays (FPGA)
map,self-repair modular robot
map,space-debris impact
map,self-repairing
map,in-orbit servicing
map,online self-repairing
map,triple-module redundancy systems
map,model-based system engineering
map,low-thrust orbital transfer
map,space-borne orbit debris surveillance
map,atmospheric re-entry
map,demisable tanks' re-entry
map,non-cooperative spacecraft
map,model-based approaches
map,model-based methods
map,impact-induced electrical anomalies
map,Low-Cost Deorbit System
map,tape-shaped tethers
map,self-repair
map,self-healing material
map,vision-based navigation
map,model-based process
......@@ -67,10 +67,10 @@ login_type = "Normal"
[notifications]
central-exchange = { bind = "tcp://*:15560", connect = "tcp://localhost:15560" }
dispatcher = { bind = "tcp://*:15561", connect = "tcp://localhost:15561" }
# central-exchange = { bind = "ipc:///tmp/ce.ipc", connect = "ipc:///tmp/ce.ipc" }
# dispatcher = { bind = "ipc:///tmp/d.ipc", connect = "ipc:///tmp/d.ipc" }
# We do not hardcode the bind and connect here, because the test infrastructure
# will randomize the connection endpoints via IPC.
central-exchange = { bind = "", connect = "" }
dispatcher = { bind = "", connect = "" }
[nlp]
......
......@@ -211,7 +211,9 @@ checkNotification ctx@(SpecContext testEnv port _app _) act = do
act authRes
waitForTChanValue tchan (Just $ DT.NUpdateTree treeId) 1_000
-- Wait /up to/ 5 seconds for the notification value. This makes running the tests
-- a bit less flaky on CI, which might be slower to process the incoming notifications.
waitForTChanValue tchan (Just $ DT.NUpdateTree treeId) 5_000
where
log_cfg = (test_config testEnv) ^. gc_logging
......
......@@ -4,23 +4,33 @@ module Test.API.Prelude
( newCorpusForUser
, newPrivateFolderForUser
, newPublicFolderForUser
, newShareFolderForUser
, newFolderForUser
, addFolderForUser
, getRootPublicFolderIdForUser
, getRootPrivateFolderIdForUser
, getRootShareFolderIdForUser
, newTeamWithOwner
, myUserNodeId
, checkEither
, shouldFailWith
-- User fixtures
, alice
, bob
) where
import Data.Aeson qualified as JSON
import Data.Text qualified as T
import Gargantext.API.Errors
import Gargantext.Core.Types.Individu
import Gargantext.Core.Types (NodeId, NodeType(..))
import Gargantext.Core.Types (NodeId, NodeType(..), ParentId)
import Gargantext.Core.Worker.Env () -- instance HasNodeError
import Gargantext.Database.Action.User
import Gargantext.Database.Admin.Types.Hyperdata.Corpus
import Gargantext.Database.Prelude
import Gargantext.Database.Query.Table.Node
import Gargantext.Database.Query.Table.Node.Error
import Gargantext.Database.Query.Table.Node.User (getUserByName)
import Gargantext.Database.Query.Tree.Root
import Gargantext.Database.Schema.Node (_node_id)
......@@ -41,27 +51,46 @@ newCorpusForUser env uname = flip runReaderT env $ runTestMonad $ do
(corpusId:_) <- mk (Just corpusName) (Nothing :: Maybe HyperdataCorpus) parentId uid
pure corpusId
newFolderForUser :: TestEnv -> T.Text -> T.Text -> IO NodeId
newFolderForUser env uname folderName = flip runReaderT env $ runTestMonad $ do
uid <- getUserId (UserName uname)
parentId <- getRootId (UserName uname)
-- | Creates a new folder for the input user, nested under the given 'ParentId', if given.
newFolderForUser' :: HasNodeError err
=> User
-> T.Text
-> ParentId
-> DBCmd err NodeId
newFolderForUser' ur folderName parentId = do
uid <- getUserId ur
insertNode NodeFolder (Just folderName) Nothing parentId uid
addFolderForUser :: TestEnv
-> User
-> T.Text
-> ParentId
-> IO NodeId
addFolderForUser env ur folderName parentId = flip runReaderT env $ runTestMonad $ do
newFolderForUser' ur folderName parentId
newFolderForUser :: TestEnv -> User -> T.Text -> IO NodeId
newFolderForUser env uname folderName = flip runReaderT env $ runTestMonad $ do
parentId <- getRootId uname
newFolderForUser' uname folderName parentId
-- | Generate a 'Node' where we can append more data into, a bit reminiscent to the
-- \"Private\" root node we use in the real Gargantext.
newPrivateFolderForUser :: TestEnv -> T.Text -> IO NodeId
newPrivateFolderForUser env uname = flip runReaderT env $ runTestMonad $ do
uid <- getUserId (UserName uname)
parentId <- getRootId (UserName uname)
let nodeName = "NodeFolderPrivate"
insertNode NodeFolderPrivate (Just nodeName) Nothing parentId uid
newPrivateFolderForUser :: TestEnv -> User -> IO NodeId
newPrivateFolderForUser env ur = newFolder env ur NodeFolderPrivate
newPublicFolderForUser :: TestEnv -> T.Text -> IO NodeId
newPublicFolderForUser env uname = flip runReaderT env $ runTestMonad $ do
uid <- getUserId (UserName uname)
parentId <- getRootId (UserName uname)
let nodeName = "NodeFolderPublic"
insertNode NodeFolderPublic (Just nodeName) Nothing parentId uid
newPublicFolderForUser :: TestEnv -> User -> IO NodeId
newPublicFolderForUser env ur = newFolder env ur NodeFolderPublic
newShareFolderForUser :: TestEnv -> User -> IO NodeId
newShareFolderForUser env ur = newFolder env ur NodeFolderShared
newFolder :: TestEnv -> User -> NodeType -> IO NodeId
newFolder env ur nt = flip runReaderT env $ runTestMonad $ do
let nodeName = show nt
uid <- getUserId ur
parentId <- getRootId ur
insertNode nt (Just nodeName) Nothing parentId uid
getRootPublicFolderIdForUser :: TestEnv -> User -> IO NodeId
getRootPublicFolderIdForUser env uname = flip runReaderT env $ runTestMonad $ do
......@@ -71,6 +100,16 @@ getRootPrivateFolderIdForUser :: TestEnv -> User -> IO NodeId
getRootPrivateFolderIdForUser env uname = flip runReaderT env $ runTestMonad $ do
_node_id <$> (getUserId uname >>= getUserRootPrivateNode)
getRootShareFolderIdForUser :: TestEnv -> User -> IO NodeId
getRootShareFolderIdForUser env uname = flip runReaderT env $ runTestMonad $ do
_node_id <$> (getUserId uname >>= getUserRootShareNode)
newTeamWithOwner :: TestEnv -> User -> T.Text -> IO NodeId
newTeamWithOwner env uname teamName = flip runReaderT env $ runTestMonad $ do
uid <- getUserId uname
parentId <- liftIO $ getRootShareFolderIdForUser env uname
insertNode NodeTeam (Just teamName) Nothing parentId uid
myUserNodeId :: TestEnv -> T.Text -> IO NodeId
myUserNodeId env uname = flip runReaderT env $ runTestMonad $ do
_node_id <$> getUserByName uname
......@@ -84,3 +123,9 @@ action `shouldFailWith` backendError = case action of
| otherwise
-> fail $ "FailureResponse didn't have FrontendError: " <> show fr
_xs -> fail $ "Unexpected ClientError: " <> show _xs
alice :: User
alice = UserName "alice"
bob :: User
bob = UserName "bob"
......@@ -23,6 +23,7 @@ import Test.API.Private.Move qualified as Move
import Test.API.Private.Remote qualified as Remote
import Test.API.Private.Share qualified as Share
import Test.API.Private.Table qualified as Table
import Test.API.Private.List qualified as List
import Test.API.Routes (mkUrl, get_node, get_tree)
import Test.API.Setup (withTestDBAndPort, dbEnvSetup, SpecContext (..))
import Test.Hspec
......@@ -114,3 +115,5 @@ tests = sequential $ do
Move.tests
describe "Remote API" $ do
Remote.tests
describe "List API" $ do
List.tests
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeFamilies #-}
module Test.API.Private.List (
tests
) where
import Data.Aeson.QQ
import Data.Text.IO qualified as TIO
import Gargantext.API.Ngrams.List.Types
import Gargantext.API.Ngrams.Types qualified as APINgrams
import Gargantext.API.Node.Corpus.New.Types qualified as FType
import Gargantext.Core.Config
import Gargantext.Core.Types
import Gargantext.Core.Types.Individu
import Gargantext.Prelude
import Paths_gargantext
import Servant.Client.Streaming
import Test.API.Prelude (newCorpusForUser, checkEither)
import Test.API.Routes
import Test.API.Setup
import Test.Database.Types
import Test.Hspec (Spec, it, aroundAll, describe, sequential)
import Test.Hspec.Expectations
import Test.Hspec.Wai.Internal (withApplication)
import Test.Utils
import Fmt
tests :: Spec
tests = sequential $ aroundAll withTestDBAndPort $ do
describe "Prelude" $ do
it "setup DB triggers" $ \SpecContext{..} -> do
setupEnvironment _sctx_env
-- Let's create the Alice user.
void $ createAliceAndBob _sctx_env
describe "Importing terms as TSV" $ do
it "should work for TSV with a missing 'forms' column" $ \(SpecContext testEnv port app _) -> do
cId <- newCorpusForUser testEnv "alice"
let log_cfg = test_config testEnv ^. gc_logging
withApplication app $ do
withValidLogin port "alice" (GargPassword "alice") $ \clientEnv token -> do
([listId] :: [NodeId]) <- protectedJSON token "POST" (mkUrl port ("/node/" <> build cId)) [aesonQQ|{"pn_typename":"NodeList","pn_name":"Testing"}|]
-- Upload the CSV doc
simpleNgrams <- liftIO (TIO.readFile =<< getDataFileName "test-data/issue-381/Termes_A_Ajouter_T4SC_Intellixir.tsv")
let params = WithTextFile { _wtf_filetype = FType.TSV
, _wtf_data = simpleNgrams
, _wtf_name = "simple.tsv" }
pendingJob <- checkEither $ liftIO $ runClientM (add_tsv_to_list token listId params) clientEnv
_ <- pollUntilWorkFinished log_cfg port pendingJob
-- Now check that we can retrieve the ngrams, and the ngrams list is not empty!
liftIO $ do
eRes <- checkEither $ runClientM (get_table_ngrams token cId APINgrams.Terms listId 50 Nothing (Just MapTerm) Nothing Nothing Nothing Nothing) clientEnv
let (APINgrams.NgramsTable terms) = APINgrams._vc_data eRes
length terms `shouldSatisfy` (>= 1)
......@@ -9,6 +9,7 @@ module Test.API.Private.Move (
import Gargantext.API.Errors
import Gargantext.Core.Types
import Gargantext.Core.Types.Individu
import Gargantext.API.Node.Share.Types
import Gargantext.Database.Query.Table.NodeNode (SourceId(..), TargetId(..))
import Gargantext.Prelude
import Servant.Client.Streaming
......@@ -29,6 +30,53 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
-- Let's create the Alice user.
void $ createAliceAndBob _sctx_env
describe "Moving a node" $ do
describe "private to private moves" $ do
it "should allow moving one folder into another" $ \(SpecContext testEnv serverPort app _) -> do
withApplication app $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> liftIO $ do
aliceRoot <- getRootPrivateFolderIdForUser testEnv alice
child1 <- addFolderForUser testEnv alice "child1" aliceRoot
child2 <- addFolderForUser testEnv alice "child2" aliceRoot
-- Test that moving child1 into child2 works.
res <- checkEither $ runClientM (move_node token (SourceId child2) (TargetId child1)) clientEnv
res `shouldBe` [child2]
describe "share to share moves" $ do
it "should allow moving one folder into another (as team owner)" $ \(SpecContext testEnv serverPort app _) -> do
withApplication app $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> liftIO $ do
-- Let's create (once) the shared team folder
void $ newShareFolderForUser testEnv alice
teamNode <- newTeamWithOwner testEnv alice "Alice's Team"
child1 <- addFolderForUser testEnv alice "child1" teamNode
child2 <- addFolderForUser testEnv alice "child2" teamNode
-- Test that moving child1 into child2 works.
res <- checkEither $ runClientM (move_node token (SourceId child2) (TargetId child1)) clientEnv
res `shouldBe` [child2]
it "should allow moving one folder into another (as team member)" $ \(SpecContext testEnv serverPort app _) -> do
withApplication app $ do
(teamNode, child1, child2) <- liftIO $ do
-- Let's create (once) the shared team folder
void $ newShareFolderForUser testEnv alice
teamNode <- newTeamWithOwner testEnv alice "Alice's Team"
child1 <- addFolderForUser testEnv alice "child1" teamNode
child2 <- addFolderForUser testEnv alice "child2" teamNode
pure (teamNode, child1, child2)
-- let's make bob a team member.
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> liftIO $ do
let params = ShareTeamParams "bob"
res <- checkEither $ runClientM (addTeamMember token teamNode params) clientEnv
res `shouldBe` UnsafeMkNodeId 1
withValidLogin serverPort "bob" (GargPassword "bob") $ \clientEnv token -> liftIO $ do
-- Test that moving child1 into child2 works.
res <- checkEither $ runClientM (move_node token (SourceId child2) (TargetId child1)) clientEnv
res `shouldBe` [child2]
describe "Publishing a Corpus" $ do
it "should forbid moving a corpus node into another user Public folder" $ \(SpecContext testEnv serverPort app _) -> do
......@@ -36,7 +84,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do
cId <- newCorpusForUser testEnv "alice"
bobPublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "bob")
bobPublicFolderId <- getRootPublicFolderIdForUser testEnv bob
res <- runClientM (move_node token (SourceId cId) (TargetId bobPublicFolderId)) clientEnv
res `shouldFailWith` EC_403__policy_check_error
......@@ -45,7 +93,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
nodes <- liftIO $ do
cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice")
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
liftIO $ length nodes `shouldBe` 1
......@@ -54,8 +102,8 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
nodes <- liftIO $ do
cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice")
alicePrivateFolderId <- getRootPrivateFolderIdForUser testEnv (UserName "alice")
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
alicePrivateFolderId <- getRootPrivateFolderIdForUser testEnv alice
_ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePrivateFolderId)) clientEnv
length nodes `shouldBe` 1
......@@ -65,7 +113,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
aliceCorpusId <- withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do
cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice")
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
_ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
-- Check that we can see the folder
......@@ -85,7 +133,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
aliceCorpusId <- withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do
cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice")
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
_ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
-- Check that we can see the folder
......@@ -103,7 +151,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
-- Now alice moves the node back to her private folder, effectively unpublishing it.
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do
alicePrivateFolderId <- getRootPrivateFolderIdForUser testEnv (UserName "alice")
alicePrivateFolderId <- getRootPrivateFolderIdForUser testEnv alice
void $ checkEither $ runClientM (move_node token (SourceId aliceCorpusId) (TargetId alicePrivateFolderId)) clientEnv
withValidLogin serverPort "bob" (GargPassword "bob") $ \clientEnv token -> do
......@@ -118,7 +166,7 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do
cId <- newCorpusForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice")
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
_ <- checkEither $ runClientM (move_node token (SourceId cId) (TargetId alicePublicFolderId)) clientEnv
-- Trying to delete a strictly published node should fail
......@@ -129,9 +177,9 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
withApplication app $ do
withValidLogin serverPort "alice" (GargPassword "alice") $ \clientEnv token -> do
liftIO $ do
fId <- newFolderForUser testEnv "alice" "my-test-folder"
fId'' <- newPrivateFolderForUser testEnv "alice"
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv (UserName "alice")
fId <- newFolderForUser testEnv alice "my-test-folder"
fId'' <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- getRootPublicFolderIdForUser testEnv alice
res <- runClientM (move_node token (SourceId fId) (TargetId alicePublicFolderId)) clientEnv
res `shouldFailWith` EC_403__node_move_error
......
......@@ -7,11 +7,11 @@ module Test.API.Private.Remote (
) where
import Control.Lens
import Gargantext.API.Admin.EnvTypes (Mode(..))
import Gargantext.API.Errors
import Gargantext.API (makeApp)
import Gargantext.API.Routes.Client (remoteExportClient)
import Gargantext.API.Routes.Named.Remote (RemoteExportRequest(..))
import Gargantext.Core.Config
import Gargantext.Core.Types.Individu
import Gargantext.Core.Types (NodeId(UnsafeMkNodeId))
import Gargantext.Prelude
......@@ -32,10 +32,10 @@ withTwoServerInstances :: (SpecContext (TestEnv,Wai.Application,Warp.Port) -> IO
withTwoServerInstances action =
withTestDB $ \testEnv1 -> do
withTestDB $ \testEnv2 -> do
garg1App <- withLoggerIO Mock $ \ioLogger -> do
garg1App <- withLoggerIO (log_cfg testEnv1) $ \ioLogger -> do
env <- newTestEnv testEnv1 ioLogger server1Port
makeApp env
garg2App <- withLoggerIO Mock $ \ioLogger -> do
garg2App <- withLoggerIO (log_cfg testEnv2) $ \ioLogger -> do
env <- newTestEnv testEnv2 ioLogger server2Port
makeApp env
......@@ -45,6 +45,7 @@ withTwoServerInstances action =
where
server1Port = 8008
server2Port = 9008
log_cfg te = test_config te ^. gc_logging
tests :: Spec
tests = sequential $ aroundAll withTwoServerInstances $ do
......@@ -84,7 +85,7 @@ tests = sequential $ aroundAll withTwoServerInstances $ do
it "forbids transferring certain node types" $ \(SpecContext testEnv1 server1Port app1 (_testEnv2, _app2, server2Port)) -> do
withApplication app1 $ do
withValidLogin server1Port "alice" (GargPassword "alice") $ \aliceClientEnv aliceToken -> do
folderId <- liftIO $ newPrivateFolderForUser testEnv1 "alice"
folderId <- liftIO $ newPrivateFolderForUser testEnv1 alice
withValidLogin server2Port "bob" (GargPassword "bob") $ \_bobClientEnv bobToken -> do
liftIO $ do
let rq = RemoteExportRequest { _rer_instance_url = fromMaybe (panicTrace "impossible") $ parseBaseUrl "http://localhost:9008"
......
......@@ -28,6 +28,7 @@ module Test.API.Routes (
, delete_node
, add_form_to_list
, add_tsv_to_list
, addTeamMember
) where
import Data.Text.Encoding qualified as TE
......@@ -37,6 +38,7 @@ import Gargantext.API.Errors
import Gargantext.API.HashedResponse (HashedResponse)
import Gargantext.API.Ngrams.List.Types (WithJsonFile, WithTextFile)
import Gargantext.API.Ngrams.Types ( NgramsTable, NgramsTablePatch, OrderBy, TabType, Versioned, VersionedWithCount )
import Gargantext.API.Node.Share.Types (ShareNodeParams(..))
import Gargantext.API.Routes.Client
import Gargantext.API.Routes.Named
import Gargantext.API.Routes.Named.List (updateListJSONEp, updateListTSVEp)
......@@ -44,6 +46,7 @@ import Gargantext.API.Routes.Named.Node hiding (treeAPI)
import Gargantext.API.Routes.Named.Private hiding (tableNgramsAPI)
import Gargantext.API.Routes.Named.Publish (PublishAPI(..))
import Gargantext.API.Routes.Named.Publish (PublishRequest(..))
import Gargantext.API.Routes.Named.Share (shareNodeEp)
import Gargantext.API.Routes.Named.Table
import Gargantext.API.Routes.Named.Tree (nodeTreeEp)
import Gargantext.API.Types () -- MimeUnrender instances
......@@ -337,3 +340,21 @@ publish_node (toServantToken -> token) sourceId policy = fmap UnsafeMkNodeId $
& publishAPI
& publishEp
& ($ PublishRequest policy)
addTeamMember :: Token -> NodeId -> ShareNodeParams -> ClientM NodeId
addTeamMember (toServantToken -> token) nodeId params = fmap UnsafeMkNodeId $
clientRoutes & apiWithCustomErrorScheme
& ($ GES_new)
& backendAPI
& backendAPI'
& mkBackEndAPI
& gargAPIVersion
& gargPrivateAPI
& mkPrivateAPI
& ($ token)
& nodeEp
& nodeEndpointAPI
& ($ nodeId)
& shareAPI
& shareNodeEp
& ($ params)
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Test.API.Setup (
......@@ -20,14 +20,12 @@ import Control.Monad.Reader
import Data.ByteString.Lazy.Char8 qualified as C8L
import Data.Cache qualified as InMemory
import Data.Streaming.Network (bindPortTCP)
import Gargantext.API.Admin.EnvTypes (Mode(Mock), Env (..), env_dispatcher)
import Gargantext.API.Errors.Types
import Gargantext.API (makeApp)
import Gargantext.API.Admin.EnvTypes (Env (..), env_dispatcher)
import Gargantext.API.Errors.Types
import Gargantext.API.Prelude
import Gargantext.Core.Config (gc_logging)
import Gargantext.Core.Config (gc_notifications_config)
import Gargantext.Core.Config (_gc_secrets, gc_frontend_config)
import Gargantext.Core.Config.Types (NotificationsConfig(..), fc_appPort, jwtSettings)
import Gargantext.Core.Config hiding (jwtSettings)
import Gargantext.Core.Config.Types (fc_appPort, jwtSettings)
import Gargantext.Core.Notifications (withNotifications)
import Gargantext.Core.Types.Individu
import Gargantext.Database.Action.Flow
......@@ -45,10 +43,9 @@ import Gargantext.System.Logging
import Network.HTTP.Client.TLS (newTlsManager)
import Network.HTTP.Types
import Network.Wai (Application, responseLBS)
import Network.Wai.Handler.Warp.Internal
import Network.Wai.Handler.Warp qualified as Warp
import Network.Wai.Handler.Warp (runSettingsSocket)
import Network.Wai qualified as Wai
import Network.Wai.Handler.Warp qualified as Warp
import Network.Wai.Handler.Warp.Internal
import Network.WebSockets qualified as WS
import Prelude hiding (show)
import Servant.Auth.Client ()
......@@ -99,20 +96,16 @@ newTestEnv testEnv logger port = do
, _env_jwt_settings
}
nc :: NotificationsConfig
nc = NotificationsConfig { _nc_central_exchange_bind = "tcp://*:15560"
, _nc_central_exchange_connect = "tcp://localhost:15560"
, _nc_dispatcher_bind = "tcp://*:15561"
, _nc_dispatcher_connect = "tcp://localhost:15561" }
-- | Run the gargantext server on a random port, picked by Warp, which allows
-- for concurrent tests to be executed in parallel, if we need to.
-- NOTE: We don't need to change the 'NotificationConfig' at this point, because
-- the 'TestEnv' will already contain the correct values for us to use.
-- (cfg 'setup' in 'Test.Database.Setup').
withTestDBAndPort :: (SpecContext () -> IO ()) -> IO ()
withTestDBAndPort action = withTestDB $ \testEnv -> do
withNotifications (cfg testEnv) $ \dispatcher -> do
withLoggerIO Mock $ \ioLogger -> do
let cfg = test_config testEnv
withNotifications cfg $ \dispatcher -> do
withLoggerIO (log_cfg cfg) $ \ioLogger -> do
env <- newTestEnv testEnv ioLogger 8080
<&> env_dispatcher .~ dispatcher
app <- makeApp env
......@@ -126,32 +119,31 @@ withTestDBAndPort action = withTestDB $ \testEnv -> do
[ Handler $ \(err :: WS.ConnectionException) ->
case err of
WS.CloseRequest _ _ ->
withLogger (log_cfg testEnv) $ \ioLogger' ->
withLogger (log_cfg cfg) $ \ioLogger' ->
logTxt ioLogger' DEBUG "[withTestDBAndPort] CloseRequest caught"
WS.ConnectionClosed ->
withLogger (log_cfg testEnv) $ \ioLogger' ->
withLogger (log_cfg cfg) $ \ioLogger' ->
logTxt ioLogger' DEBUG "[withTestDBAndPort] ConnectionClosed caught"
_ -> do
withLogger (log_cfg testEnv) $ \ioLogger' ->
withLogger (log_cfg cfg) $ \ioLogger' ->
logTxt ioLogger' ERROR $ "[withTestDBAndPort] unknown exception: " <> show err
throw err
-- re-throw any other exceptions
, Handler $ \(err :: SomeException) -> throw err ]
where
cfg te = (test_config te) & gc_notifications_config .~ nc
log_cfg te = (cfg te) ^. gc_logging
log_cfg cfg = cfg ^. gc_logging
-- | Starts the backend server /and/ the microservices proxy, the former at
-- a random port, the latter at a predictable port.
withBackendServerAndProxy :: (((TestEnv, Warp.Port, Warp.Port)) -> IO ()) -> IO ()
withBackendServerAndProxy action =
withTestDB $ \testEnv -> do
gargApp <- withLoggerIO Mock $ \ioLogger -> do
gargApp <- withLoggerIO (log_cfg testEnv) $ \ioLogger -> do
env <- newTestEnv testEnv ioLogger 8080
makeApp env
proxyCache <- InMemory.newCache Nothing
proxyApp <- withLoggerIO Mock $ \ioLogger -> do
proxyApp <- withLoggerIO (log_cfg testEnv) $ \ioLogger -> do
env <- newTestEnv testEnv ioLogger 8080
pure $ microServicesProxyApp proxyCache env
......@@ -160,6 +152,8 @@ withBackendServerAndProxy action =
action (testEnv, serverPort, proxyPort)
where
proxyPort = 8090
cfg te = test_config te
log_cfg te = cfg te ^. gc_logging
setupEnvironment :: TestEnv -> IO ()
setupEnvironment env = flip runReaderT env $ runTestMonad $ do
......@@ -214,7 +208,7 @@ testWithApplicationOnPort mkApp userPort action = do
sock <- bindPortTCP userPort "127.0.0.1"
result <-
Async.race
(runSettingsSocket appSettings sock app)
(Warp.runSettingsSocket appSettings sock app)
(waitFor started >> action)
case result of
Left () -> UnliftIO.throwString "Unexpected: runSettingsSocket exited"
......
......@@ -65,7 +65,7 @@ import Paths_gargantext (getDataFileName)
import qualified Prelude
import Servant.Client.Streaming
import System.FilePath
import Test.API.Prelude (checkEither, newCorpusForUser, newPrivateFolderForUser)
import Test.API.Prelude (checkEither, newCorpusForUser, newPrivateFolderForUser, alice)
import Test.API.Routes (mkUrl, gqlUrl, get_table_ngrams, put_table_ngrams, toServantToken, clientRoutes, get_table, update_node, add_form_to_list, add_tsv_to_list)
import Test.API.Setup (withTestDBAndPort, dbEnvSetup, SpecContext (..))
import Test.Database.Types
......@@ -348,7 +348,7 @@ createDocsList :: FilePath
-> Token
-> WaiSession () CorpusId
createDocsList testDataPath testEnv port clientEnv token = do
folderId <- liftIO $ newPrivateFolderForUser testEnv "alice"
folderId <- liftIO $ newPrivateFolderForUser testEnv alice
([corpusId] :: [NodeId]) <- protectedJSON token "POST" (mkUrl port ("/node/" <> build folderId)) [aesonQQ|{"pn_typename":"NodeCorpus","pn_name":"TestCorpus"}|]
-- Import the docsList with only two documents, both containing a \"fortran\" term.
simpleDocs <- liftIO (TIO.readFile =<< getDataFileName testDataPath)
......
......@@ -12,18 +12,20 @@ Portability : POSIX
{-# OPTIONS_GHC -Wno-orphans #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
module Test.API.Worker (
tests
) where
import Control.Concurrent (threadDelay)
import Control.Concurrent.Async (withAsync)
import Control.Concurrent.Async (withAsync, forConcurrently_)
import Control.Concurrent.STM.TChan
import Control.Lens
import Control.Monad.STM (atomically)
import Data.Aeson qualified as Aeson
import Data.Maybe (isJust)
import Gargantext.Core.Config
import Gargantext.Core.Notifications.Dispatcher.Types qualified as DT
import Gargantext.Core.Worker.Jobs (sendJobWithCfg)
import Gargantext.Core.Worker.Jobs.Types (Job(Ping))
......@@ -35,6 +37,10 @@ import Test.Database.Types (test_config)
import Test.Hspec
import Test.Instances ()
import Test.Utils.Notifications
import Gargantext.System.Logging
import qualified Data.Text.Encoding as TE
import qualified Data.ByteString as BL
import Test.Tasty.HUnit (assertBool)
......@@ -43,29 +49,52 @@ tests = sequential $ aroundAll withTestDBAndPort $ do
describe "Worker" $ do
it "simple Ping job works" $ \(SpecContext testEnv port _app _) -> do
let cfg = test_config testEnv
let log_cfg = (test_config testEnv) ^. gc_logging
let topic = DT.Ping
tchan <- newTChanIO :: IO (TChan (Maybe DT.Notification))
-- setup a websocket connection
let wsConnect =
withWSConnection ("127.0.0.1", port) $ \conn -> do
WS.sendTextData conn $ Aeson.encode (DT.WSSubscribe topic)
d <- WS.receiveData conn
let dec = Aeson.decode d :: Maybe DT.Notification
atomically $ writeTChan tchan dec
-- wait a bit to settle
threadDelay (100 * millisecond)
withAsync wsConnect $ \_a -> do
-- wait a bit to connect
threadDelay (500 * millisecond)
withAsync (setupWsThread log_cfg topic tchan port) $ \_a -> do
_ <- sendJobWithCfg cfg Ping
mTimeout <- Timeout.timeout (5 * 1_000_000) $ do
md <- atomically $ readTChan tchan
md `shouldBe` Just DT.NPing
mTimeout `shouldSatisfy` isJust
describe "concurrency" $ do
-- This test checks that two concurrent threads can both subscribe
-- to the same topic and get notified.
it "handles concurrent threads" $ \(SpecContext testEnv port _app _) -> do
let cfg = test_config testEnv
let log_cfg = (test_config testEnv) ^. gc_logging
let topic = DT.Ping
let competingThreads = 3
forConcurrently_ [ 1 .. competingThreads ] $ \(tid :: Int) -> do
tchan <- newTChanIO :: IO (TChan (Maybe DT.Notification))
withAsync (setupWsThread log_cfg topic tchan port) $ \_a -> do
_ <- sendJobWithCfg cfg Ping
mTimeout <- Timeout.timeout (5 * 1_000_000) $ do
md <- atomically $ readTChan tchan
md `shouldBe` Just DT.NPing
assertBool ("Competing Thread " <> show tid <> " didn't receive a value.") (isJust mTimeout)
setupWsThread :: LogConfig -> DT.Topic -> TChan (Maybe DT.Notification) -> Int -> IO ()
setupWsThread log_cfg topic tchan port = withLogger log_cfg $ \ioL -> do
withWSConnection ("127.0.0.1", port) $ \conn -> do
let payload = Aeson.encode (DT.WSSubscribe topic)
$(logLoc) ioL DEBUG $ "Sending payload: " <> (TE.decodeUtf8 $ BL.toStrict $ payload)
WS.sendTextData conn $ Aeson.encode (DT.WSSubscribe topic)
d <- WS.receiveData conn
$(logLoc) ioL DEBUG $ "Received: " <> (TE.decodeUtf8 $ BL.toStrict d)
let dec = Aeson.decode d :: Maybe DT.Notification
atomically $ writeTChan tchan dec
......@@ -24,7 +24,7 @@ import Gargantext.Database.Prelude (DBCmd)
import Gargantext.Database.Query.Table.Node
import Gargantext.Database.Query.Table.NodeNode
import Gargantext.Database.Schema.Node (NodePoly(..))
import Test.API.Prelude (newPrivateFolderForUser, newPublicFolderForUser)
import Test.API.Prelude (newPrivateFolderForUser, newPublicFolderForUser, alice)
import Test.Database.Types
import Test.Tasty.HUnit
......@@ -43,8 +43,8 @@ testGetUserRootPublicNode testEnv = do
testIsReadOnlyWorks :: TestEnv -> Assertion
testIsReadOnlyWorks testEnv = do
alicePrivateFolderId <- newPrivateFolderForUser testEnv "alice"
alicePublicFolderId <- newPublicFolderForUser testEnv "alice"
alicePrivateFolderId <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- newPublicFolderForUser testEnv alice
flip runReaderT testEnv $ runTestMonad $ do
-- Create a corpus, by default is not read only
aliceUserId <- getUserId (UserName "alice")
......@@ -64,8 +64,8 @@ testIsReadOnlyWorks testEnv = do
-- then all the children (up to the first level) are also marked read-only.
testPublishRecursiveFirstLevel :: TestEnv -> Assertion
testPublishRecursiveFirstLevel testEnv = do
alicePrivateFolderId <- newPrivateFolderForUser testEnv "alice"
alicePublicFolderId <- newPublicFolderForUser testEnv "alice"
alicePrivateFolderId <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- newPublicFolderForUser testEnv alice
flip runReaderT testEnv $ runTestMonad $ do
-- Create a corpus, by default is not read only
aliceUserId <- getUserId (UserName "alice")
......@@ -81,8 +81,8 @@ testPublishRecursiveFirstLevel testEnv = do
-- then all the children of the children are also marked read-only.
testPublishRecursiveNLevel :: TestEnv -> Assertion
testPublishRecursiveNLevel testEnv = do
alicePrivateFolderId <- newPrivateFolderForUser testEnv "alice"
alicePublicFolderId <- newPublicFolderForUser testEnv "alice"
alicePrivateFolderId <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- newPublicFolderForUser testEnv alice
flip runReaderT testEnv $ runTestMonad $ do
-- Create a corpus, by default is not read only
aliceUserId <- getUserId (UserName "alice")
......@@ -98,8 +98,8 @@ testPublishRecursiveNLevel testEnv = do
testPublishLenientWorks :: TestEnv -> Assertion
testPublishLenientWorks testEnv = do
alicePrivateFolderId <- newPrivateFolderForUser testEnv "alice"
alicePublicFolderId <- newPublicFolderForUser testEnv "alice"
alicePrivateFolderId <- newPrivateFolderForUser testEnv alice
alicePublicFolderId <- newPublicFolderForUser testEnv alice
flip runReaderT testEnv $ runTestMonad $ do
aliceUserId <- getUserId (UserName "alice")
corpusId <- insertDefaultNode NodeCorpus alicePrivateFolderId aliceUserId
......
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE ViewPatterns #-}
module Test.Database.Setup (
withTestDB
......@@ -16,11 +17,9 @@ import Data.Text qualified as T
import Data.Text.Encoding qualified as TE
import Database.PostgreSQL.Simple qualified as PG
import Database.PostgreSQL.Simple.Options qualified as Client
import Database.PostgreSQL.Simple.Options qualified as Opts
import Database.Postgres.Temp qualified as Tmp
import Gargantext.API.Admin.EnvTypes (Mode(Mock))
import Gargantext.Core.Config
import Gargantext.Core.Config.Types (SettingsFile(..))
import Gargantext.Core.Config.Types
import Gargantext.Core.Config.Utils (readConfig)
import Gargantext.Core.Config.Worker (wsDatabase, wsDefinitions)
import Gargantext.Core.NLP (nlpServerMap)
......@@ -33,8 +32,10 @@ import Paths_gargantext
import Prelude qualified
import Shelly hiding (FilePath, run)
import Shelly qualified as SH
import System.Environment
import Test.Database.Types
import Test.Utils.Db (tmpDBToConnInfo)
import UnliftIO.Temporary (withTempFile)
-- | Test DB settings.
......@@ -74,6 +75,22 @@ tmpPgConfig = Tmp.defaultConfig <>
, Client.password = pure dbPassword
}
-- | Overrides the 'NotificationsConfig' of the input 'GargConfig' with something suitable
-- for testing. It will replace the bind sites for the CE and the dispatcher with random temp files
-- to be used for IPC communication.
withTestNotificationConfig :: GargConfig -> (GargConfig -> IO a) -> IO a
withTestNotificationConfig cfg action = do
tmpDir <- fromMaybe "/tmp" <$> lookupEnv "TMPDIR"
withTempFile tmpDir "ce_test.ipc" $ \(T.pack -> ce_fp) _hdl1 -> do
withTempFile tmpDir "ds_test.ipc" $ \(T.pack -> ds_fp) _hdl2 -> do
action $ cfg & gc_notifications_config
.~ NotificationsConfig { _nc_central_exchange_bind = "ipc://" <> ce_fp
, _nc_central_exchange_connect = "ipc://" <> ce_fp
, _nc_dispatcher_bind = "ipc://" <> ds_fp
, _nc_dispatcher_connect = "ipc://" <> ds_fp
}
setup :: IO TestEnv
setup = do
res <- Tmp.startConfig tmpPgConfig
......@@ -81,50 +98,52 @@ setup = do
Left err -> Prelude.fail $ show err
Right db -> do
let connInfo = tmpDBToConnInfo db
gargConfig <- testTomlPath >>= readConfig
gargConfig0 <- testTomlPath >>= readConfig
-- fix db since we're using tmp-postgres
<&> (gc_database_config .~ connInfo)
-- <&> (gc_worker . wsDatabase .~ connInfo)
<&> (gc_worker . wsDatabase .~ (connInfo { PG.connectDatabase = "pgmq_test" }))
-- putText $ "[setup] database: " <> show (gargConfig ^. gc_database_config)
-- putText $ "[setup] worker db: " <> show (gargConfig ^. gc_worker . wsDatabase)
let idleTime = 60.0
let maxResources = 5
let poolConfig = defaultPoolConfig (PG.connectPostgreSQL (Tmp.toConnectionString db))
PG.close
idleTime
maxResources
pool <- newPool (setNumStripes (Just 4) poolConfig)
bootstrapDB db pool gargConfig
ugen <- emptyCounter
test_nodeStory <- fromDBNodeStoryEnv pool
withLoggerIO Mock $ \logger -> do
let wPoolConfig = defaultPoolConfig (PG.connectPostgreSQL (Tmp.toConnectionString db))
PG.close
idleTime
maxResources
wPool <- newPool (setNumStripes (Just 4) wPoolConfig)
wNodeStory <- fromDBNodeStoryEnv wPool
_w_env_job_state <- newTVarIO Nothing
withLoggerIO Mock $ \wioLogger -> do
let wEnv = WorkerEnv { _w_env_config = gargConfig
, _w_env_logger = wioLogger
, _w_env_pool = wPool
, _w_env_nodeStory = wNodeStory
, _w_env_mail = errorTrace "[wEnv] w_env_mail requested but not available"
, _w_env_nlp = nlpServerMap $ gargConfig ^. gc_nlp_config
, _w_env_job_state }
wState <- initWorkerState wEnv (fromJust $ head $ gargConfig ^. gc_worker . wsDefinitions)
test_worker_tid <- forkIO $ Worker.run wState
pure $ TestEnv { test_db = DBHandle pool db
, test_config = gargConfig
, test_nodeStory
, test_usernameGen = ugen
, test_logger = logger
, test_worker_tid
}
withTestNotificationConfig gargConfig0 $ \gargConfig -> do
let log_cfg = gargConfig ^. gc_logging
let idleTime = 60.0
let maxResources = 2
let poolConfig = defaultPoolConfig (PG.connectPostgreSQL (Tmp.toConnectionString db))
PG.close
idleTime
maxResources
pool <- newPool (setNumStripes (Just 2) poolConfig)
bootstrapDB db pool gargConfig
ugen <- emptyCounter
test_nodeStory <- fromDBNodeStoryEnv pool
withLoggerIO log_cfg $ \logger -> do
let wPoolConfig = defaultPoolConfig (PG.connectPostgreSQL (Tmp.toConnectionString db))
PG.close
idleTime
maxResources
wPool <- newPool (setNumStripes (Just 2) wPoolConfig)
wNodeStory <- fromDBNodeStoryEnv wPool
_w_env_job_state <- newTVarIO Nothing
withLoggerIO log_cfg $ \wioLogger -> do
let wEnv = WorkerEnv { _w_env_config = gargConfig
, _w_env_logger = wioLogger
, _w_env_pool = wPool
, _w_env_nodeStory = wNodeStory
, _w_env_mail = errorTrace "[wEnv] w_env_mail requested but not available"
, _w_env_nlp = nlpServerMap $ gargConfig ^. gc_nlp_config
, _w_env_job_state }
wState <- initWorkerState wEnv (fromJust $ head $ gargConfig ^. gc_worker . wsDefinitions)
test_worker_tid <- forkIO $ Worker.run wState
pure $ TestEnv { test_db = DBHandle pool db
, test_config = gargConfig
, test_nodeStory
, test_usernameGen = ugen
, test_logger = logger
, test_worker_tid
}
withTestDB :: (TestEnv -> IO ()) -> IO ()
withTestDB = bracket setup teardown
......@@ -134,7 +153,7 @@ testEnvToPgConnectionInfo TestEnv{..} =
PG.ConnectInfo { PG.connectHost = "0.0.0.0"
, PG.connectPort = fromIntegral $ fromMaybe 5432
$ getLast
$ Opts.port
$ Client.port
$ Tmp.toConnectionOptions
$ _DBTmp test_db
, PG.connectUser = dbUser
......
......@@ -22,11 +22,9 @@ import Control.Monad.Trans.Control
import Data.IORef
import Data.Map qualified as Map
import Data.Pool
import Data.Text qualified as T
import Database.PostgreSQL.Simple qualified as PG
import Database.Postgres.Temp qualified as Tmp
import Gargantext hiding (to)
import Gargantext.API.Admin.EnvTypes
import Gargantext.API.Admin.Orchestrator.Types
import Gargantext.API.Errors.Types
import Gargantext.API.Prelude
......@@ -36,6 +34,7 @@ import Gargantext.Core.Mail.Types (HasMail(..))
import Gargantext.Core.NLP (HasNLPServer(..))
import Gargantext.Core.NodeStory
import Gargantext.System.Logging (HasLogger(..), Logger, MonadLogger(..))
import Gargantext.System.Logging.Loggers
import Gargantext.Utils.Jobs.Monad (MonadJobStatus(..))
import Network.URI (parseURI)
import Prelude qualified
......@@ -132,22 +131,11 @@ instance MonadLogger (GargM TestEnv BackendInternalError) where
getLogger = asks test_logger
instance HasLogger (GargM TestEnv BackendInternalError) where
data instance Logger (GargM TestEnv BackendInternalError) =
GargTestLogger {
test_logger_mode :: Mode
, test_logger_set :: FL.LoggerSet
}
type instance LogInitParams (GargM TestEnv BackendInternalError) = Mode
newtype instance Logger (GargM TestEnv BackendInternalError) =
GargTestLogger { _GargTestLogger :: MonadicStdLogger FL.LogStr IO }
type instance LogInitParams (GargM TestEnv BackendInternalError) = LogConfig
type instance LogPayload (GargM TestEnv BackendInternalError) = FL.LogStr
initLogger mode = do
test_logger_set <- liftIO $ FL.newStderrLoggerSet FL.defaultBufSize
pure $ GargTestLogger mode test_logger_set
destroyLogger GargTestLogger{..} = liftIO $ FL.rmLoggerSet test_logger_set
logMsg (GargTestLogger mode logger_set) lvl msg = do
cfg <- view hasConfig
let minLvl = cfg ^. gc_logging . lc_log_level
when (lvl >= minLvl) $ do
let pfx = "[" <> show lvl <> "] " :: Text
when (lvl `elem` (modeToLoggingLevels mode)) $
liftIO $ FL.pushLogStrLn logger_set $ FL.toLogStr pfx <> msg
logTxt lgr lvl msg = logMsg lgr lvl (FL.toLogStr $ T.unpack msg)
initLogger cfg = fmap GargTestLogger $ (liftIO $ monadicStdLogger cfg)
destroyLogger = liftIO . _msl_destroy . _GargTestLogger
logMsg (GargTestLogger ioLogger) lvl msg = liftIO $ _msl_log_msg ioLogger lvl msg
logTxt (GargTestLogger ioLogger) lvl msg = liftIO $ _msl_log_txt ioLogger lvl msg
......@@ -333,15 +333,17 @@ waitUntil pred' timeoutMs = do
-- wait for given number of milliseconds for a given tchan value
waitForTChanValue :: (HasCallStack, Eq a, Show a) => TChan a -> a -> Int -> IO ()
waitForTChanValue tchan expected timeoutMs = do
mTimeout <- Timeout.timeout (timeoutMs * 1000) $ do
mTimeout <- Timeout.timeout total_wait $ do
v <- atomically $ readTChan tchan
unless (v == expected) $ panicTrace $ "[waitForTChanValue] v != expected (" <> show v <> " != " <> show expected <> ")"
-- v `shouldBe` expected
-- no timeout should have occurred
-- mTimeout `shouldSatisfy` isJust
when (isNothing mTimeout) $
panicTrace $ "[waitForTChanValue] timeout when waiting for " <> show expected <> " on tchan"
panicTrace $ "[waitForTChanValue] timeout of " <> show total_wait <> " milliseconds exhausted when waiting for " <> show expected <> " on tchan"
where
total_wait :: Int
total_wait = timeoutMs * 1_000
waitForTSem :: HasCallStack => TSem -> Int -> IO ()
waitForTSem tsem timeoutMs = do
......
......@@ -20,8 +20,8 @@ startCoreNLPServer :: IO ProcessHandle
startCoreNLPServer = do
putText "calling start core nlp"
devNull <- openFile "/dev/null" WriteMode
let p = proc "./startServer.sh" []
(_, _, _, hdl) <- (createProcess $ p { cwd = Just "devops/coreNLP/stanford-corenlp-current"
let p = proc "startCoreNLPServer.sh" []
(_, _, _, hdl) <- (createProcess $ p { cwd = Nothing
-- NOTE(adn) Issue #451, this one has to stay disabled, because if we
-- turn it on, despite the confusing documentation on the `process` library
-- it will cause the Haskell RTS to completely ignore the Ctrl^c and instead
......@@ -34,10 +34,7 @@ startCoreNLPServer = do
, std_err = UseHandle devNull
}) `catch` \e -> case e of
_ | True <- "does not exist" `isInfixOf` (T.pack . show @SomeException $ e)
-> fail $ "Cannot execute the 'startServer.sh' script. If this is the " <>
"first time you are running the tests, you have to run " <>
"cd devops/coreNLP && ./build.sh first. You have to run it only once, " <>
"and then you are good to go for the time being."
-> fail $ "Cannot execute the 'startCoreNLPServer.sh' script. Make sure you are in a nix environment."
| otherwise -> throwIO e
pure hdl
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment