Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
gargantext
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
humanities
gargantext
Commits
b9062d6c
Commit
b9062d6c
authored
Jul 10, 2017
by
sim
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[FIX] Bug #103: use timezone aware datetimes while parsing docs
parent
663a31db
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
88 additions
and
76 deletions
+88
-76
constants.py
gargantext/constants.py
+4
-4
__init__.py
gargantext/util/__init__.py
+1
-0
dates.py
gargantext/util/dates.py
+33
-6
MULTIVAC.py
gargantext/util/parsers/MULTIVAC.py
+12
-15
_Parser.py
gargantext/util/parsers/_Parser.py
+29
-40
hyperdata_indexing.py
gargantext/util/toolchain/hyperdata_indexing.py
+1
-2
ngram_coocs.py
gargantext/util/toolchain/ngram_coocs.py
+8
-9
No files found.
gargantext/constants.py
View file @
b9062d6c
...
@@ -36,7 +36,7 @@ import os
...
@@ -36,7 +36,7 @@ import os
import
re
import
re
import
importlib
import
importlib
from
gargantext.util.lists
import
*
from
gargantext.util.lists
import
*
from
gargantext.util
.tools
import
datetime
,
convert_to_dat
e
from
gargantext.util
import
datetime
,
convert_to_datetim
e
from
.settings
import
BASE_DIR
from
.settings
import
BASE_DIR
# types & models (nodes, lists, hyperdata, resource) ---------------------------------------------
# types & models (nodes, lists, hyperdata, resource) ---------------------------------------------
...
@@ -108,9 +108,9 @@ INDEXED_HYPERDATA = {
...
@@ -108,9 +108,9 @@ INDEXED_HYPERDATA = {
'publication_date'
:
'publication_date'
:
{
'id'
:
2
{
'id'
:
2
,
'type'
:
datetime
.
datetime
,
'type'
:
datetime
,
'convert_to_db'
:
convert_to_date
,
'convert_to_db'
:
convert_to_date
time
,
'convert_from_db'
:
datetime
.
datetime
.
fromtimestamp
,
'convert_from_db'
:
convert_to_datetime
},
},
'title'
:
'title'
:
...
...
gargantext/util/__init__.py
0 → 100644
View file @
b9062d6c
from
.dates
import
datetime
,
convert_to_datetime
,
MINYEAR
gargantext/util/dates.py
View file @
b9062d6c
import
os
import
os
from
gargantext.settings
import
MEDIA_ROOT
from
gargantext.settings
import
MEDIA_ROOT
import
datetime
from
datetime
import
MINYEAR
import
dateutil
from
django.utils.dateparse
import
parse_datetime
from
django.utils.timezone
import
datetime
as
_datetime
,
utc
as
UTC
,
now
as
utcnow
__all__
=
[
'convert_to_datetime'
,
'datetime'
,
'MINYEAR'
]
class
datetime
(
_datetime
):
@
staticmethod
def
now
():
return
utcnow
()
@
staticmethod
def
utcfromtimestamp
(
ts
):
return
_datetime
.
utcfromtimestamp
(
ts
)
.
replace
(
tzinfo
=
UTC
)
@
staticmethod
def
parse
(
s
):
dt
=
parse_datetime
(
s
)
return
dt
.
astimezone
(
UTC
)
if
dt
.
tzinfo
else
dt
.
replace
(
tzinfo
=
UTC
)
def
convert_to_datetime
(
dt
):
if
isinstance
(
dt
,
(
int
,
float
)):
return
datetime
.
utcfromtimestamp
(
dt
)
elif
isinstance
(
dt
,
str
):
return
datetime
.
parse
(
dt
)
elif
isinstance
(
dt
,
_datetime
):
args
=
(
dt
.
year
,
dt
.
month
,
dt
.
day
,
dt
.
hour
,
dt
.
minute
,
dt
.
second
)
return
datetime
(
*
args
,
tzinfo
=
dt
.
tzinfo
or
UTC
)
.
astimezone
(
UTC
)
def
convert_to_date
(
date
):
if
isinstance
(
date
,
(
int
,
float
)):
return
datetime
.
datetime
.
timestamp
(
date
)
else
:
else
:
r
eturn
dateutil
.
parser
.
parse
(
date
)
r
aise
ValueError
(
"Can't convert to datetime:
%
r"
%
dt
)
gargantext/util/parsers/MULTIVAC.py
View file @
b9062d6c
...
@@ -18,30 +18,30 @@ class MultivacParser(Parser):
...
@@ -18,30 +18,30 @@ class MultivacParser(Parser):
'''
'''
contents
=
filebuf
.
read
()
.
decode
(
"UTF-8"
)
contents
=
filebuf
.
read
()
.
decode
(
"UTF-8"
)
data
=
json
.
loads
(
contents
)
data
=
json
.
loads
(
contents
)
filebuf
.
close
()
filebuf
.
close
()
json_docs
=
data
json_docs
=
data
hyperdata_list
=
[]
hyperdata_list
=
[]
hyperdata_path
=
{
"id"
:
"id"
hyperdata_path
=
{
"id"
:
"id"
,
"title"
:
"title"
,
"title"
:
"title"
,
"abstract"
:
"abstract"
,
"abstract"
:
"abstract"
,
"type"
:
"type"
,
"type"
:
"type"
}
}
for
json_doc
in
json_docs
:
for
json_doc
in
json_docs
:
hyperdata
=
{}
hyperdata
=
{}
doc
=
json_doc
[
"_source"
]
doc
=
json_doc
[
"_source"
]
for
key
,
path
in
hyperdata_path
.
items
():
for
key
,
path
in
hyperdata_path
.
items
():
hyperdata
[
key
]
=
doc
.
get
(
path
,
""
)
hyperdata
[
key
]
=
doc
.
get
(
path
,
""
)
hyperdata
[
"source"
]
=
doc
.
get
(
"serial"
,
{})
\
hyperdata
[
"source"
]
=
doc
.
get
(
"serial"
,
{})
\
.
get
(
"journaltitle"
,
"REPEC Database"
)
.
get
(
"journaltitle"
,
"REPEC Database"
)
try
:
try
:
hyperdata
[
"url"
]
=
doc
.
get
(
"file"
,
{})
\
hyperdata
[
"url"
]
=
doc
.
get
(
"file"
,
{})
\
.
get
(
"url"
,
""
)
.
get
(
"url"
,
""
)
...
@@ -51,15 +51,15 @@ class MultivacParser(Parser):
...
@@ -51,15 +51,15 @@ class MultivacParser(Parser):
hyperdata
[
"authors"
]
=
", "
.
join
(
hyperdata
[
"authors"
]
=
", "
.
join
(
[
p
.
get
(
"person"
,
{})
[
p
.
get
(
"person"
,
{})
.
get
(
"name"
,
""
)
.
get
(
"name"
,
""
)
for
p
in
doc
.
get
(
"hasauthor"
,
[])
for
p
in
doc
.
get
(
"hasauthor"
,
[])
]
]
)
)
year
=
doc
.
get
(
"serial"
,
{})
\
year
=
doc
.
get
(
"serial"
,
{})
\
.
get
(
"issuedate"
,
None
)
.
get
(
"issuedate"
,
None
)
if
year
==
"Invalide date"
:
if
year
==
"Invalide date"
:
year
=
doc
.
get
(
"issuedate"
,
None
)
year
=
doc
.
get
(
"issuedate"
,
None
)
...
@@ -73,10 +73,7 @@ class MultivacParser(Parser):
...
@@ -73,10 +73,7 @@ class MultivacParser(Parser):
date
=
datetime
.
now
()
date
=
datetime
.
now
()
hyperdata
[
"publication_date"
]
=
date
hyperdata
[
"publication_date"
]
=
date
hyperdata
[
"publication_year"
]
=
str
(
date
.
year
)
hyperdata
[
"publication_month"
]
=
str
(
date
.
month
)
hyperdata
[
"publication_day"
]
=
str
(
date
.
day
)
hyperdata_list
.
append
(
hyperdata
)
hyperdata_list
.
append
(
hyperdata
)
return
hyperdata_list
return
hyperdata_list
gargantext/util/parsers/_Parser.py
View file @
b9062d6c
import
datetime
import
dateutil.parser
import
dateutil.parser
import
zipfile
import
zipfile
import
re
import
re
import
dateparser
as
date_parser
import
dateparser
as
date_parser
from
gargantext.util.languages
import
languages
from
gargantext.util.languages
import
languages
from
gargantext.util
import
datetime
,
convert_to_datetime
,
MINYEAR
DEFAULT_DATE
=
datetime
.
datetime
(
datetime
.
MINYEAR
,
1
,
1
)
DEFAULT_DATE
=
datetime
(
MINYEAR
,
1
,
1
)
class
Parser
:
class
Parser
:
...
@@ -34,29 +34,29 @@ class Parser:
...
@@ -34,29 +34,29 @@ class Parser:
def
format_hyperdata_dates
(
self
,
hyperdata
):
def
format_hyperdata_dates
(
self
,
hyperdata
):
"""Format the dates found in the hyperdata.
"""Format the dates found in the hyperdata.
Examples:
Examples:
{"publication_date": "2014-10-23 09:57:42"}
{"publication_date": "2014-10-23 09:57:42
+00:00
"}
-> {"publication_date": "2014-10-23 09:57:42", "publication_year": "2014", ...}
-> {"publication_date": "2014-10-23 09:57:42
+00:00
", "publication_year": "2014", ...}
{"publication_year": "2014"}
{"publication_year": "2014"}
-> {"publication_date": "2014-01-01 00:00:00", "publication_year": "2014", ...}
-> {"publication_date": "2014-01-01 00:00:00
+00:00
", "publication_year": "2014", ...}
"""
"""
# First, check the split dates...
# First, check the split dates...
# This part mainly deal with Zotero data but can be usefull for others
# This part mainly deal with Zotero data but can be usefull for others
# parts
# parts
date_string
=
hyperdata
.
get
(
'publication_date_to_parse'
,
None
)
date_string
=
hyperdata
.
get
(
'publication_date_to_parse'
)
if
date_string
is
not
None
:
if
date_string
is
not
None
:
date_string
=
re
.
sub
(
r'\/\/+(\w*|\d*)'
,
''
,
date_string
)
date_string
=
re
.
sub
(
r'\/\/+(\w*|\d*)'
,
''
,
date_string
)
try
:
try
:
hyperdata
[
'publication
'
+
"_date"
]
=
dateutil
.
parser
.
parse
(
hyperdata
[
'publication
_date'
]
=
dateutil
.
parser
.
parse
(
date_string
,
date_string
,
default
=
DEFAULT_DATE
default
=
DEFAULT_DATE
)
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
)
except
Exception
as
error
:
except
Exception
as
error
:
print
(
error
,
'Date not parsed for:'
,
date_string
)
print
(
error
,
'Date not parsed for:'
,
date_string
)
hyperdata
[
'publication_date'
]
=
datetime
.
datetime
.
now
()
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
hyperdata
[
'publication_date'
]
=
datetime
.
now
(
)
elif
hyperdata
.
get
(
'publication_year'
,
None
)
is
not
None
:
elif
hyperdata
.
get
(
'publication_year'
)
is
not
None
:
prefixes
=
[
key
[:
-
5
]
for
key
in
hyperdata
.
keys
()
if
key
[
-
5
:]
==
"_year"
]
prefixes
=
[
key
[:
-
5
]
for
key
in
hyperdata
.
keys
()
if
key
[
-
5
:]
==
"_year"
]
# eg prefixes : ['publication']
# eg prefixes : ['publication']
...
@@ -64,56 +64,45 @@ class Parser:
...
@@ -64,56 +64,45 @@ class Parser:
for
prefix
in
prefixes
:
for
prefix
in
prefixes
:
date_string
=
hyperdata
[
prefix
+
"_year"
]
date_string
=
hyperdata
[
prefix
+
"_year"
]
# FIXME: except for year is it necessary to test that key exists
for
part
in
(
'month'
,
'day'
,
'hour'
,
'minute'
,
'second'
):
# when we have a default value in .get(key, "01") ??
key
=
prefix
+
'_'
+
part
key
=
prefix
+
"_month"
if
key
not
in
hyperdata
:
if
key
in
hyperdata
:
break
date_string
+=
" "
+
hyperdata
.
get
(
key
,
"01"
)
key
=
prefix
+
"_day"
sep
=
":"
if
key
in
(
'minute'
,
'second'
)
else
" "
if
key
in
hyperdata
:
date_string
+=
sep
+
hyperdata
.
get
(
key
,
'01'
)
date_string
+=
" "
+
hyperdata
.
get
(
key
,
"01"
)
key
=
prefix
+
"_hour"
if
key
in
hyperdata
:
date_string
+=
" "
+
hyperdata
.
get
(
key
,
"01"
)
key
=
prefix
+
"_minute"
if
key
in
hyperdata
:
date_string
+=
":"
+
hyperdata
.
get
(
key
,
"01"
)
key
=
prefix
+
"_second"
if
key
in
hyperdata
:
date_string
+=
":"
+
hyperdata
.
get
(
key
,
"01"
)
try
:
try
:
hyperdata
[
prefix
+
"_date"
]
=
dateutil
.
parser
.
parse
(
date_string
)
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
hyperdata
[
prefix
+
"_date"
]
=
dateutil
.
parser
.
parse
(
date_string
)
except
Exception
as
error
:
except
Exception
as
error
:
try
:
try
:
print
(
"_Parser: error in full date parse"
,
error
,
date_string
)
print
(
"_Parser: error in full date parse"
,
error
,
date_string
)
# Date format: 1994 NOV-DEC
# Date format: 1994 NOV-DEC
hyperdata
[
prefix
+
"_date"
]
=
date_parser
.
parse
(
str
(
date_string
)[:
8
])
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
hyperdata
[
prefix
+
"_date"
]
=
date_parser
.
parse
(
str
(
date_string
)[:
8
])
except
Exception
as
error
:
except
Exception
as
error
:
try
:
try
:
print
(
"_Parser: error in short date parse"
,
error
)
print
(
"_Parser: error in short date parse"
,
error
)
# FIXME Date format: 1994 SPR
# FIXME Date format: 1994 SPR
# By default, we take the year only
# By default, we take the year only
hyperdata
[
prefix
+
"_date"
]
=
date_parser
.
parse
(
str
(
date_string
)[:
4
])
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
hyperdata
[
prefix
+
"_date"
]
=
date_parser
.
parse
(
str
(
date_string
)[:
4
])
except
Exception
as
error
:
except
Exception
as
error
:
print
(
"_Parser:"
,
error
)
print
(
"_Parser:"
,
error
)
else
:
else
:
print
(
"WARNING: Date unknown at _Parser level, using now()"
)
print
(
"WARNING: Date unknown at _Parser level, using now()"
)
hyperdata
[
'publication_date'
]
=
datetime
.
datetime
.
now
()
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
hyperdata
[
'publication_date'
]
=
datetime
.
now
(
)
# ...then parse all the "date" fields, to parse it into separate elements
# ...then parse all the "date" fields, to parse it into separate elements
prefixes
=
[
key
[:
-
5
]
for
key
in
hyperdata
.
keys
()
if
key
[
-
5
:]
==
"_date"
]
prefixes
=
[
key
[:
-
5
]
for
key
in
hyperdata
.
keys
()
if
key
[
-
5
:]
==
"_date"
]
for
prefix
in
prefixes
:
for
prefix
in
prefixes
:
date
=
dateutil
.
parser
.
parse
(
hyperdata
[
prefix
+
"_date"
])
name
=
prefix
+
"_date"
#print(date)
date
=
hyperdata
[
name
]
hyperdata
[
name
]
=
str
(
convert_to_datetime
(
date
))
hyperdata
[
prefix
+
"_year"
]
=
date
.
strftime
(
"
%
Y"
)
hyperdata
[
prefix
+
"_month"
]
=
date
.
strftime
(
"
%
m"
)
for
part
in
(
'year'
,
'month'
,
'day'
,
'hour'
,
'minute'
,
'second'
):
hyperdata
[
prefix
+
"_day"
]
=
date
.
strftime
(
"
%
d"
)
hyperdata
[
prefix
+
'_'
+
part
]
=
getattr
(
date
,
part
)
hyperdata
[
prefix
+
"_hour"
]
=
date
.
strftime
(
"
%
H"
)
hyperdata
[
prefix
+
"_minute"
]
=
date
.
strftime
(
"
%
M"
)
hyperdata
[
prefix
+
"_second"
]
=
date
.
strftime
(
"
%
S"
)
# print("line 116", hyperdata['publication_date'])
# print("line 116", hyperdata['publication_date'])
# finally, return the transformed result!
# finally, return the transformed result!
return
hyperdata
return
hyperdata
...
...
gargantext/util/toolchain/hyperdata_indexing.py
View file @
b9062d6c
...
@@ -43,8 +43,7 @@ def _nodes_hyperdata_generator(corpus):
...
@@ -43,8 +43,7 @@ def _nodes_hyperdata_generator(corpus):
key
[
'id'
],
key
[
'id'
],
None
,
None
,
None
,
None
,
value
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
),
str
(
value
),
# FIXME check timestamp +%Z
None
,
None
,
None
,
None
,
)
)
...
...
gargantext/util/toolchain/ngram_coocs.py
View file @
b9062d6c
...
@@ -9,7 +9,6 @@ from gargantext.util.db import get_engine
...
@@ -9,7 +9,6 @@ from gargantext.util.db import get_engine
from
gargantext.util.db_cache
import
cache
from
gargantext.util.db_cache
import
cache
from
gargantext.constants
import
DEFAULT_COOC_THRESHOLD
,
NODETYPES
from
gargantext.constants
import
DEFAULT_COOC_THRESHOLD
,
NODETYPES
from
gargantext.constants
import
INDEXED_HYPERDATA
from
gargantext.constants
import
INDEXED_HYPERDATA
from
gargantext.util.tools
import
datetime
,
convert_to_date
def
compute_coocs
(
corpus
,
def
compute_coocs
(
corpus
,
overwrite_id
=
None
,
overwrite_id
=
None
,
...
@@ -95,7 +94,7 @@ def compute_coocs( corpus,
...
@@ -95,7 +94,7 @@ def compute_coocs( corpus,
# 2b) stating the filters
# 2b) stating the filters
cooc_filter_sql
=
"""
cooc_filter_sql
=
"""
WHERE
WHERE
n.typename = {nodetype_id}
n.typename = {nodetype_id}
AND n.parent_id = {corpus_id}
AND n.parent_id = {corpus_id}
GROUP BY 1,2
GROUP BY 1,2
...
@@ -105,7 +104,7 @@ def compute_coocs( corpus,
...
@@ -105,7 +104,7 @@ def compute_coocs( corpus,
"""
.
format
(
nodetype_id
=
NODETYPES
.
index
(
'DOCUMENT'
)
"""
.
format
(
nodetype_id
=
NODETYPES
.
index
(
'DOCUMENT'
)
,
corpus_id
=
corpus
.
id
,
corpus_id
=
corpus
.
id
)
)
# 3) taking the cooccurrences of ngram x2
# 3) taking the cooccurrences of ngram x2
ngram_filter_A_sql
+=
"""
ngram_filter_A_sql
+=
"""
-- STEP 1: X axis of the matrix
-- STEP 1: X axis of the matrix
...
@@ -162,25 +161,25 @@ def compute_coocs( corpus,
...
@@ -162,25 +161,25 @@ def compute_coocs( corpus,
# 4) prepare the synonyms
# 4) prepare the synonyms
if
groupings_id
:
if
groupings_id
:
ngram_filter_A_sql
+=
"""
ngram_filter_A_sql
+=
"""
LEFT JOIN nodes_ngrams_ngrams
LEFT JOIN nodes_ngrams_ngrams
AS grA ON wlA.ngram_id = grA.ngram1_id
AS grA ON wlA.ngram_id = grA.ngram1_id
AND grA.node_id = {groupings_id}
AND grA.node_id = {groupings_id}
--
\
--> adding (joining) ngrams that are grouped
--
\
--> adding (joining) ngrams that are grouped
LEFT JOIN nodes_ngrams
LEFT JOIN nodes_ngrams
AS wlAA ON grA.ngram2_id = wlAA.ngram_id
AS wlAA ON grA.ngram2_id = wlAA.ngram_id
AND wlAA.node_id = wlA.node_id
AND wlAA.node_id = wlA.node_id
--
\
--> adding (joining) ngrams that are not grouped
--
\
--> adding (joining) ngrams that are not grouped
--LEFT JOIN ngrams AS wlAA ON grA.ngram2_id = wlAA.id
--LEFT JOIN ngrams AS wlAA ON grA.ngram2_id = wlAA.id
--
\
--> for joining all synonyms even if they are not in the main list (white list)
--
\
--> for joining all synonyms even if they are not in the main list (white list)
"""
.
format
(
groupings_id
=
groupings_id
)
"""
.
format
(
groupings_id
=
groupings_id
)
ngram_filter_B_sql
+=
"""
ngram_filter_B_sql
+=
"""
LEFT JOIN nodes_ngrams_ngrams
LEFT JOIN nodes_ngrams_ngrams
AS grB ON wlB.ngram_id = grB.ngram1_id
AS grB ON wlB.ngram_id = grB.ngram1_id
AND grB.node_id = {groupings_id}
AND grB.node_id = {groupings_id}
--
\
--> adding (joining) ngrams that are grouped
--
\
--> adding (joining) ngrams that are grouped
LEFT JOIN nodes_ngrams
LEFT JOIN nodes_ngrams
AS wlBB ON grB.ngram2_id = wlBB.ngram_id
AS wlBB ON grB.ngram2_id = wlBB.ngram_id
AND wlBB.node_id = wlB.node_id
AND wlBB.node_id = wlB.node_id
--
\
--> adding (joining) ngrams that are not grouped
--
\
--> adding (joining) ngrams that are not grouped
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment