Commit 74bc33a7 authored by Loïc Chapron's avatar Loïc Chapron

add script, TermOcc, fix error lang

parent fffc1c1b
#!/usr/bin/env python
# coding: utf-8
# In[74]:
import json
import pandas as pd
import numpy as np
import glob
import sys
import datetime
# In[80]:
import zipfile
# In[83]:
input_file=sys.argv[1]
output_file=sys.argv[2]
# In[84]:
#list_articles=glob.glob("tmp/*/*.json")
# In[85]:
output=[]
with zipfile.ZipFile(input_file, 'r') as zip_ref:
for file in zip_ref.namelist():
if file.split('.')[1] != 'json' or file.split('.')[0] == 'manifest':
continue
try:
article=json.load(zip_ref.open(file))
temp={}
temp["title"]=article.get("title","").encode(encoding = 'UTF-8', errors = 'ignore').decode("utf-8").replace("\t", " ")
temp["abstract"]=article.get("abstract","").encode(encoding = 'UTF-8', errors = 'ignore').decode("utf-8").replace("\t", " ")
authors=""
for author in article.get("author",[]):
authors+=author["name"]+", "
authors=authors[:-2]
temp["code"] = article.get("_id")
temp["authors"]=authors.encode(encoding = 'UTF-8', errors = 'ignore').decode("utf-8").replace("\t", " ").replace(",", ";")
temp["source"]=article["host"]["title"].encode(encoding = 'UTF-8', errors = 'ignore').decode("utf-8").replace("\t", " ")
temp["publication_year"]=article.get("publicationDate", datetime.date.today().year)
temp["publication_month"]=1
temp["publication_day"]=1
output.append(temp)
except Exception as e:
print(file,e)
# In[86]:
output=pd.DataFrame(output)
tmp = output.size
duplicated = output['title'].str.lower().replace(",", "", regex=True).duplicated()
if (duplicated.any()):
print("\nQuelques fichiers n'ont pas été introduits dans le TSV car ils pourraient apparaitre plusieurs fois:")
for i in range(0, output["title"].size - 1):
if (duplicated[i]):
print("\t" + output["code"][i] + " " + output["title"][i])
output.drop(['code'], axis=1)
output = output[~duplicated]
# In[87]:
output.to_csv(output_file,sep='\t',index=False)
print("")
# In[ ]:
# IstexToGargantext
## About The project
IstexToGargantext convert a zip file from Istex into a TSV file for GarganText
## Usage
```shell
python3 Istex2ggtx.py file.zip
```
## Date
This script have been last updated the 2023/07/24.
It can be outdated if the futur.
# ZoteroToGargantext
## About The project
ZoteroToGarganText isn't usable right now, need modification to transform txt and pdf file from zotero into tsv for gargantext
## Usage
```shell
python3 ZoteroToGarganText.py
```
from pyzotero import zotero
from datetime import date
def getDataFromWebPage(item):
# Title
title = item['data']['title']
# Authors
if 'creators' in item['data'].keys():
authors = []
for author in item['data']['creators']:
authors.append(author['lastName'])
authors = ';'.join(authors)
else:
authors = ''
# Source
source = item['data']['url']
# Abstract
if 'abstractNote' in item['data'].keys():
abstract = item['data']['abstractNote']
else:
abstract = ''
# Date
if 'date' in item['data'].keys() and item['data']['date'] != '':
pdate = item['data']['date'].split('-')
pdate[2] = pdate[2].split('T')[0]
pdate = '\t'.join(pdate)
else:
pdate = str(date.today().year) + '\t1\t1'
abstract = abstract.encode(encoding='UTF-8', errors='ignore').decode(
"utf-8").replace('\t', '').replace('"', '').replace('\n', '')
title = title.encode(encoding='UTF-8', errors='ignore').decode(
"utf-8").replace('\t', '').replace('"', '').replace('\n', '')
source = source.encode(encoding='UTF-8', errors='ignore').decode(
"utf-8").replace('\t', '').replace('"', '').replace('\n', '').replace('\n', '')
# Output
return str(title) + "\t" + source + "\t" + str(pdate) + "\t" + abstract + "\t" + authors + "\t" + str(1) + "\n"
def makeTSV(items):
txt = "title\tsource\tpublication_year\tpublication_month\tpublication_day\tabstract\tauthors\tweight\n"
for item in items:
if item['data']['itemType'] in ['webpage', 'encyclopediaArticle', 'blogPost']:
txt += getDataFromWebPage(item)
elif item['data']['itemType'] == 'attachment':
#with open('tmp/' + item['data']['title'], 'wb') as f:
# f.write(zot.file(item['data']['key']))
print(item)
else:
print("??")
#print(item['data']['itemType'])
with open('output.tsv', 'w') as f:
f.write(txt)
print("Id:")
id = input()
zot = zotero.Zotero(id, 'user')
print("Items (i)/ Collection (c)")
t = input()
if t == 'i':
print('Search :')
search = input()
zot.add_parameters(q=search)
items = zot.top()
else:
docs = zot.collections()
tmp = {}
print('Collection :')
for doc in docs:
tmp[doc['data']['name']] = doc['data']['key']
print(doc['data']['name'])
print("choose collection")
col = input()
items = []
for elem in col.split(' '):
items += zot.collection_items(tmp[elem])
txt = makeTSV(items)
import requests as req
import json
import sys
from datetime import date
# python3 IsidoreAPIToGarganText search nb_replies language
# ex : python3 IsidoreAPIToGarganText "brain muscle" 100 fra
try :
search = sys.argv[1]
replies = sys.argv[2]
language = sys.argv[3]
except :
print ("! args error\n")
sys.exit(0)
if replies > 1000:
print("The number of replier must be less than 1000")
sys.exit(0)
url = 'https://api.isidore.science/resource/search?q=' + search + '&output=json&replies=' + replies + '&language=http://lexvo.org/id/iso639-3/' + language
resp = req.get(url)
jsontxt = json.loads(resp.content)
docs = jsontxt["response"]["replies"]["content"]["reply"]
# Output File
output = open("output.csv", "w")
header = "title\tsource\tpublication_year\tpublication_month\tpublication_day\tabstract\tauthors\tweight\n"
output.write(header)
for doc in docs:
# Title
title = doc["isidore"]["title"]
if (type(title) != str):
if(type(title) == list):
tmp = ''
for lang in title:
if type(lang) != str and lang['@xml:lang'] == language[:2]:
tmp = lang['$']
if tmp == '':
if type(title[0]) == str:
title = title[0]
else:
title = title[0]['$']
else:
title = tmp
else:
title = title['$']
# Source
source =doc["isidore"]["source_info"]["sourceName"]["$"]
# Author
if doc['isidore']['enrichedCreators'] != []:
list_author = doc["isidore"]["enrichedCreators"]["creator"]
authors = []
if(type(list_author) == list):
for author in list_author:
authors.append(author["@origin"])
authors = ';'.join(authors)
else:
authors = list_author["@origin"]
else:
authors = ''
#Abstract
if 'abstract' in doc['isidore'].keys() and doc["isidore"]["abstract"] != []:
abstract = doc["isidore"]["abstract"]
else:
abstract = ''
if (type(abstract) != str):
if type(abstract) == list:
tmp = ''
for lang in abstract:
if type(lang) != str and lang['@xml:lang'] == language[:2]:
tmp = lang['$']
if tmp == '':
if type(abstract[0]) == str:
abstract = abstract[0]
else:
abstract = abstract[0]['$']
else:
abstract = tmp
else :
abstract = abstract['$']
# Publication Date
try:
pdate = '\t'.join(doc["isidore"]["date"]["normalizedDate"].split('-'))
except Exception as e:
pdate = str(date.today().year) + '\t01\t01'
abstract = abstract.encode(encoding = 'UTF-8', errors = 'ignore').decode("utf-8").replace('\t', '').replace('"', '')
title = title.encode(encoding = 'UTF-8', errors = 'ignore').decode("utf-8").replace('\t', '').replace('"', '')
source = source.encode(encoding = 'UTF-8', errors = 'ignore').decode("utf-8").replace('\t', '').replace('"', '').replace('\n', '')
# Output
row = str(title) + "\t" + source + "\t" + pdate + "\t" + abstract + "\t" + authors + "\t" + str(1) + "\n"
output.write(row)
# IsidoreAPIToGargantext
## About The project
IsidoreAPIToGargantext call isidore API to make a research using the parameter given and create a TSV file usable in GraganText
## Usage
```shell
python3 IsidoreAPIToGargantext.py search replies lang
```
search is what you want tu search in Isidore
replies is the number of reply take fron the answer of Isidore
lang is the language (see note)
Output a TSV legacy corpus named output.tsv
## Date
This script have been last updated the 2023/07/24.
It can be outdated if the futur.
## Note
language | lang | work?
| :--- |:--- |:---
French | fra | fine
English | eng | fine
Deutch | deu | fine
Spanish | spa | fine
Italian | ita | fine
Portuguese | por | fine
Polish | nld | low answer
Russian | rus | low answer
Chiniese | lzh | should work but don't actually
\ No newline at end of file
...@@ -79,3 +79,7 @@ is_section = true ...@@ -79,3 +79,7 @@ is_section = true
path = "pages/Merge_Term_GarganText.py" path = "pages/Merge_Term_GarganText.py"
name = "Merge GarganText Terms" name = "Merge GarganText Terms"
[[pages]]
path = "pages/GEXF_To_TermOcc.py"
name = "GEXF To Term"
locale,key,value
fr,title,"# Term / Occurrence"
en,title,"# Json To TSV"
fr,text,"Transforme un fichier GEXF venant du graphe de GarganText en un fichier TSV de terme et d'occurrence."
en,text,"Transform a GEXF file of a graph from GarganText to a TSV file of term and occurrence."
fr,file,"Choisir un fichier"
en,file,"Choose a file"
fr,new_file,"Téléchargez votre fichier TSV :"
en,new_file,"Download your TSV file:"
fr,error,"Erreur : le fichier n'est pas valide"
en,error,"Error : the file isn't valid"
\ No newline at end of file
...@@ -2,18 +2,18 @@ locale,key,value ...@@ -2,18 +2,18 @@ locale,key,value
fr,title,"# Istex Vers GarganText" fr,title,"# Istex Vers GarganText"
en,title,"# Istex To GarganText" en,title,"# Istex To GarganText"
fr,text,"Importe un ZIP de documents provenant d'Istex et le transforme en fichier TSV." fr,text,"Importe un ZIP de documents au format Lodex après une [recherche réalisée dans Istex](https://dl.istex.fr/) et le transforme en fichier TSV."
en,text,"Import a ZIP file coming from Istex and convert it into a TSV file." en,text,"Import a ZIP file coming from Istex and convert it into a TSV file."
fr,file,"Choisir un fichier" fr,file,"Choisir un fichier"
en,file,"Choose a file" en,file,"Choose a file"
fr,dup1,"Certains fichiers (" fr,dup1,"Certains fichiers ("
fr,dup2,") ont été retirés pour divers raisons (fichier au mauvais format, fichier identique...)" fr,dup2,") ont été retirés pour différentes raisons (notamment fichiers identiques au regard des critères considérés par GarganText, format inexploitable...)"
en,dup1,"Some file (" en,dup1,"Some file ("
en,dup2,") have been removed for various reasons (file with wrong format, file already present...)" en,dup2,") have been removed for various reasons (especially indentic file, unusable format...)"
fr,new_file,"Télécharge ton fichier TSV :" fr,new_file,"Téléchargez votre fichier TSV :"
en,new_file,"Download your TSV file:" en,new_file,"Download your TSV file:"
fr,error,"Erreur : le fichier n'est pas valide" fr,error,"Erreur : le fichier n'est pas valide"
......
import streamlit as st
import networkx as nx
import src.basic as tmp
tmp.base('GEXFToTermOcc')
def create_file(file):
tmp = file.getvalue().decode('utf-8') \
.replace('version="1.3"', 'version="1.2"') \
.replace('xmlns="http://www.gexf.net/1.3"', 'xmlns="http://www.gexf.net/1.2draft"') \
.replace('xmlns:viz="http://gexf.net/1.3/viz"', 'xmlns:viz="http://www.gexf.net/1.2draft/viz"') \
.replace('xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"', 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"') \
.replace('xsi:schemaLocation="http://gexf.net/1.3 http://gexf.net/1.3/gexf.xsd"', 'xsi:schemaLocation="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd"') \
.encode()
file.seek(0,0)
file.write(tmp)
file.seek(0,0)
tmp = nx.read_gexf(file, version='1.2draft')
lst=[]
for elem in tmp.nodes(True):
lst.append((elem[1]['label'],elem[1]['viz']['size']))
lst.sort(key= lambda x: x[1], reverse=True)
res = 'mapTerm\tocc\n'
for elem in lst:
res += elem[0] + '\t' + str(int(elem[1])) + '\n'
return res
st.write(st.session_state.general_text_dict['text'])
file = st.file_uploader(st.session_state.general_text_dict['file'],type=["gexf"],key='file')
if file:
try:
st.write(st.session_state.general_text_dict['new_file'])
st.download_button('Download TSV', create_file(file), 'output.csv')
except Exception as e:
st.write(st.session_state.general_text_dict['error'])
\ No newline at end of file
...@@ -171,10 +171,10 @@ if st.session_state.stage_isidore > 0: ...@@ -171,10 +171,10 @@ if st.session_state.stage_isidore > 0:
form2.write(st.session_state.general_text_dict['perform1'] + str( form2.write(st.session_state.general_text_dict['perform1'] + str(
limitItems) + st.session_state.general_text_dict['perform2']) limitItems) + st.session_state.general_text_dict['perform2'])
st.session_state.nb_wanted = form2.slider( st.session_state.nb_wanted = form2.slider(
st.session_state.general_text_dict['nb_taken'], 1, limitItems) st.session_state.general_text_dict['nb_taken'], 10, limitItems, 10, 10)
else: else:
st.session_state.nb_wanted = form2.slider( st.session_state.nb_wanted = form2.slider(
st.session_state.general_text_dict['nb_taken'], 1, int(st.session_state.nb_doc)) st.session_state.general_text_dict['nb_taken'], 10, int(st.session_state.nb_doc), 10, 10)
form2.form_submit_button( form2.form_submit_button(
st.session_state.general_text_dict['submit'], on_click=set_stage, args=(2,)) st.session_state.general_text_dict['submit'], on_click=set_stage, args=(2,))
...@@ -190,5 +190,7 @@ if st.session_state.stage_isidore > 1: ...@@ -190,5 +190,7 @@ if st.session_state.stage_isidore > 1:
print(st.session_state.nb_wanted) print(st.session_state.nb_wanted)
st.session_state.output = create_output( st.session_state.output = create_output(
st.session_state.search, lang[st.session_state.language], st.session_state.nb_wanted) st.session_state.search, lang[st.session_state.language], st.session_state.nb_wanted)
st.download_button('Download TSV', st.session_state.output, 'output.csv')
fileName = "HALOutput_" + str(datetime.now().strftime("%Y-%m-%d_%H:%M:%S")) + '.csv'
st.download_button('Download TSV', st.session_state.output, fileName)
...@@ -7,6 +7,7 @@ import streamlit as st ...@@ -7,6 +7,7 @@ import streamlit as st
import requests as req import requests as req
import json import json
import time import time
from datetime import datetime
from json import JSONDecodeError from json import JSONDecodeError
import src.basic as tmp import src.basic as tmp
...@@ -18,7 +19,7 @@ numberReplies = 500 # Dont' exceed 1 000 ...@@ -18,7 +19,7 @@ numberReplies = 500 # Dont' exceed 1 000
limitItems = 5000 # Can't be superior of 10 times numberReplies limitItems = 5000 # Can't be superior of 10 times numberReplies
retryTime = 2 retryTime = 2
## Connect to Isidore API to get the numbers of docs from the research
def loadApiIsidoreNumberFile(search, language): def loadApiIsidoreNumberFile(search, language):
while (True): while (True):
url = 'https://api.isidore.science/resource/search?q=' + search + \ url = 'https://api.isidore.science/resource/search?q=' + search + \
...@@ -38,7 +39,7 @@ def loadApiIsidoreNumberFile(search, language): ...@@ -38,7 +39,7 @@ def loadApiIsidoreNumberFile(search, language):
return docs return docs
## Connect to Isidore API to get the documents from the pages
def loadApiIsidorePage(search, language, page): def loadApiIsidorePage(search, language, page):
url = 'https://api.isidore.science/resource/search?q=' + search + '&output=json&replies=' + \ url = 'https://api.isidore.science/resource/search?q=' + search + '&output=json&replies=' + \
str(numberReplies) + '&page=' + str(page) + \ str(numberReplies) + '&page=' + str(page) + \
...@@ -57,6 +58,7 @@ def loadApiIsidorePage(search, language, page): ...@@ -57,6 +58,7 @@ def loadApiIsidorePage(search, language, page):
def create_output(search, language, nb_doc): def create_output(search, language, nb_doc):
output = "title\tsource\tpublication_year\tpublication_month\tpublication_day\tabstract\tauthors\tweight\n" output = "title\tsource\tpublication_year\tpublication_month\tpublication_day\tabstract\tauthors\tweight\n"
nb = 0 nb = 0
## nb is used to return ther number of file with
for i in range(1, nb_doc//numberReplies + 1): for i in range(1, nb_doc//numberReplies + 1):
while (True): while (True):
txt = loadApiIsidorePage(search, language, i) txt = loadApiIsidorePage(search, language, i)
...@@ -64,11 +66,18 @@ def create_output(search, language, nb_doc): ...@@ -64,11 +66,18 @@ def create_output(search, language, nb_doc):
break break
time.sleep(retryTime) time.sleep(retryTime)
print('Retry') print('Retry')
tmp, nb_tmp = createFile(txt, nb_doc % numberReplies, language) tmp, nb_tmp = createFile(txt, numberReplies, language)
output += tmp output += tmp
nb += nb_tmp nb += nb_tmp
## If their is still some document do find (for exampe with 1160 documents, their is still 160 documents to find after the first part)
if nb_doc % numberReplies != 0: if nb_doc % numberReplies != 0:
txt = loadApiIsidorePage(search, language, nb_doc//numberReplies + 1) while (True):
txt = loadApiIsidorePage(search, language, nb_doc//numberReplies + 1)
if txt != 0:
break
time.sleep(retryTime)
print('Retry')
tmp, nb_tmp = createFile(txt, nb_doc % numberReplies, language) tmp, nb_tmp = createFile(txt, nb_doc % numberReplies, language)
output += tmp output += tmp
nb += nb_tmp nb += nb_tmp
...@@ -139,12 +148,15 @@ def createFile(docs, limit, language): ...@@ -139,12 +148,15 @@ def createFile(docs, limit, language):
else: else:
abstract = tmp abstract = tmp
else: else:
abstract = abstract['$'] if '$' in abstract.keys():
abstract = abstract['$']
else:
abstract = ''
if 'types' in doc['isidore'].keys(): if 'types' in doc['isidore'].keys():
if type(doc['isidore']['types']['type'] == str) and doc['isidore']['types']['type'] in ['Books', 'text']: if type(doc['isidore']['types']['type']) == str and doc['isidore']['types']['type'] in ['Books', 'text']:
nb += 1 nb += 1
elif type(doc['isidore']['types']['type'] == dict) and doc['isidore']['types']['type'][1] in ['Books', 'text']: elif type(doc['isidore']['types']['type']) == dict and doc['isidore']['types']['type']['$'] in ['Books', 'text']:
nb += 1 nb += 1
else: else:
print(title) print(title)
...@@ -239,7 +251,7 @@ form.form_submit_button( ...@@ -239,7 +251,7 @@ form.form_submit_button(
# API and Slider # API and Slider
if st.session_state.stage_isidore > 0: if st.session_state.stage_isidore > 0:
# Only call first time and after # Only call first time and after an update in the first form
if 'search' not in st.session_state or 'language' not in st.session_state or search != st.session_state.search or language != st.session_state.language: if 'search' not in st.session_state or 'language' not in st.session_state or search != st.session_state.search or language != st.session_state.language:
with st.spinner(st.session_state.general_text_dict['load_api']): with st.spinner(st.session_state.general_text_dict['load_api']):
nb_doc = int(loadApiIsidoreNumberFile(search, lang[language])) nb_doc = int(loadApiIsidoreNumberFile(search, lang[language]))
...@@ -259,10 +271,10 @@ if st.session_state.stage_isidore > 0: ...@@ -259,10 +271,10 @@ if st.session_state.stage_isidore > 0:
form2.write(st.session_state.general_text_dict['perform1'] + str( form2.write(st.session_state.general_text_dict['perform1'] + str(
limitItems) + st.session_state.general_text_dict['perform2']) limitItems) + st.session_state.general_text_dict['perform2'])
st.session_state.nb_wanted = form2.slider( st.session_state.nb_wanted = form2.slider(
st.session_state.general_text_dict['nb_taken'], 1, limitItems) st.session_state.general_text_dict['nb_taken'], 10, limitItems, 10, 10)
else: else:
st.session_state.nb_wanted = form2.slider( st.session_state.nb_wanted = form2.slider(
st.session_state.general_text_dict['nb_taken'], 1, int(st.session_state.nb_doc)) st.session_state.general_text_dict['nb_taken'], 10, int(st.session_state.nb_doc), 10, 10)
form2.form_submit_button( form2.form_submit_button(
st.session_state.general_text_dict['submit'], on_click=set_stage, args=(2,)) st.session_state.general_text_dict['submit'], on_click=set_stage, args=(2,))
...@@ -280,4 +292,5 @@ if st.session_state.stage_isidore > 1: ...@@ -280,4 +292,5 @@ if st.session_state.stage_isidore > 1:
st.write(st.session_state.general_text_dict['doc_abstract1'] + str( st.write(st.session_state.general_text_dict['doc_abstract1'] + str(
st.session_state.nb_bad_file) + st.session_state.general_text_dict['doc_abstract2']) st.session_state.nb_bad_file) + st.session_state.general_text_dict['doc_abstract2'])
st.download_button('Download TSV', st.session_state.output, 'output.csv') fileName = "isidoreOutput_" + str(datetime.now().strftime("%Y-%m-%d_%H:%M:%S")) + '.csv'
st.download_button('Download TSV', st.session_state.output, fileName)
...@@ -5,7 +5,7 @@ Loïc Chapron ...@@ -5,7 +5,7 @@ Loïc Chapron
import json import json
import pandas as pd import pandas as pd
import datetime from datetime import datetime
import zipfile import zipfile
import streamlit as st import streamlit as st
import src.basic as tmp import src.basic as tmp
...@@ -60,8 +60,6 @@ def read_zip(zip_file): ...@@ -60,8 +60,6 @@ def read_zip(zip_file):
temp["publication_year"] = article["publicationDate"][0] temp["publication_year"] = article["publicationDate"][0]
except: except:
temp["publication_year"] = datetime.date.today().year temp["publication_year"] = datetime.date.today().year
temp["publication_year"] = article.get(
"publicationDate", datetime.date.today().year)[0]
temp["publication_month"] = 1 temp["publication_month"] = 1
temp["publication_day"] = 1 temp["publication_day"] = 1
...@@ -91,13 +89,13 @@ file = st.file_uploader( ...@@ -91,13 +89,13 @@ file = st.file_uploader(
if file: if file:
try: try:
name = file.name.split('.')[0] + '.csv' fileName = "istexOutput_" + str(datetime.now().strftime("%Y-%m-%d_%H:%M:%S")) + '.csv'
res, nb_dup = read_zip(file) res, nb_dup = read_zip(file)
if nb_dup: if nb_dup:
st.write(st.session_state.general_text_dict['dup1'] + str( st.write(st.session_state.general_text_dict['dup1'] + str(
nb_dup) + st.session_state.general_text_dict['dup2']) nb_dup) + st.session_state.general_text_dict['dup2'])
st.write(st.session_state.general_text_dict['new_file']) st.write(st.session_state.general_text_dict['new_file'])
st.download_button(name, res, name) st.download_button('Download TSV', res, fileName)
except Exception as e: except Exception as e:
st.write(st.session_state.general_text_dict['error']) st.write(st.session_state.general_text_dict['error'])
print(e) print(e)
......
...@@ -6,7 +6,7 @@ Loïc Chapron ...@@ -6,7 +6,7 @@ Loïc Chapron
import streamlit as st import streamlit as st
import requests as req import requests as req
import json import json
from datetime import date from datetime import date, datetime
import src.basic as tmp import src.basic as tmp
...@@ -308,7 +308,8 @@ if st.session_state.stage == 2 and st.session_state.format == 'collections': ...@@ -308,7 +308,8 @@ if st.session_state.stage == 2 and st.session_state.format == 'collections':
output = createTSVfromCollections() output = createTSVfromCollections()
st.write(st.session_state.general_text_dict['fileTSV1'] + str( st.write(st.session_state.general_text_dict['fileTSV1'] + str(
len(output.split('\n'))-2) + st.session_state.general_text_dict['fileTSV2']) len(output.split('\n'))-2) + st.session_state.general_text_dict['fileTSV2'])
st.download_button('Download TSV', output, 'output.csv') fileName = "zoteroOutput_" + str(datetime.now().strftime("%Y-%m-%d_%H:%M:%S")) + '.csv'
st.download_button('Download TSV', output, fileName)
if st.session_state.stage > 0: if st.session_state.stage > 0:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment