6 Commits

4 changed files with 84 additions and 51 deletions

12
nlp
View File

@ -71,6 +71,7 @@ class NLPPipeline(WorkflowRunner):
''' '''
nlp_tasks = [] nlp_tasks = []
n_cores = max(1, int(self.getNCores() / len(self.jobs))) n_cores = max(1, int(self.getNCores() / len(self.jobs)))
mem_mb = min(n_cores * 2048, int(self.getMemMb() / len(self.jobs)))
for i, job in enumerate(self.jobs): for i, job in enumerate(self.jobs):
output_file = os.path.join(job.output_dir, '{}.nopaque-stand-off.json'.format(job.name)) # noqa output_file = os.path.join(job.output_dir, '{}.nopaque-stand-off.json'.format(job.name)) # noqa
cmd = 'spacy-nlp' cmd = 'spacy-nlp'
@ -81,7 +82,7 @@ class NLPPipeline(WorkflowRunner):
deps = 'setup_output_directory_-_{}'.format(i) deps = 'setup_output_directory_-_{}'.format(i)
lbl = 'nlp_-_{}'.format(i) lbl = 'nlp_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl, task = self.addTask(command=cmd, dependencies=deps, label=lbl,
nCores=n_cores) memMb=mem_mb, nCores=n_cores)
nlp_tasks.append(task) nlp_tasks.append(task)
''' '''
@ -128,9 +129,8 @@ def collect_jobs(input_dir, output_dir):
jobs = [] jobs = []
for file in os.listdir(input_dir): for file in os.listdir(input_dir):
if os.path.isdir(os.path.join(input_dir, file)): if os.path.isdir(os.path.join(input_dir, file)):
jobs += collect_jobs(os.path.join(input_dir, file), continue
os.path.join(output_dir, file)) if file.lower().endswith('.txt'):
elif file.lower().endswith('.txt'):
job = NLPPipelineJob(os.path.join(input_dir, file), job = NLPPipelineJob(os.path.join(input_dir, file),
os.path.join(output_dir, file)) os.path.join(output_dir, file))
jobs.append(job) jobs.append(job)
@ -148,9 +148,11 @@ def parse_args():
required=True) required=True)
parser.add_argument('-l', '--language', parser.add_argument('-l', '--language',
choices=SPACY_MODELS.keys(), choices=SPACY_MODELS.keys(),
help='Language of the input (2-character ISO 639-1 language codes)', # noqa
required=True) required=True)
parser.add_argument('--check-encoding', parser.add_argument('--check-encoding',
action='store_true') action='store_true',
help='Check encoding of the input file, UTF-8 is used instead') # noqa
parser.add_argument('--log-dir', parser.add_argument('--log-dir',
help='Logging directory') help='Logging directory')
parser.add_argument('--mem-mb', parser.add_argument('--mem-mb',

View File

@ -16,35 +16,40 @@ spacy_models = {spacy.info(pipeline)['lang']: pipeline
# Parse the given arguments # Parse the given arguments
parser = ArgumentParser(description='Create annotations for a given txt file') parser = ArgumentParser(description='Create annotations for a given txt file')
parser.add_argument('input', metavar='Path to txt input file') parser.add_argument('input', help='Path to txt input file')
parser.add_argument('output', metavar='Path to JSON output file') parser.add_argument('output', help='Path to JSON output file')
parser.add_argument('-l', '--language', parser.add_argument('-l', '--language',
choices=spacy_models.keys(), choices=spacy_models.keys(),
help='Language of the input (2-character ISO 639-1 language codes)', # noqa
required=True) required=True)
parser.add_argument('-c', '--check-encoding', action='store_true') parser.add_argument('-c', '--check-encoding',
action='store_true',
help='Check encoding of the input file, UTF-8 is used instead') # noqa
args = parser.parse_args() args = parser.parse_args()
with open(args.input, "rb") as text_file:
# If requested: Check the encoding of the text contents from the input file
# Else: Use utf-8
with open(args.input, "rb") as input_file:
if args.check_encoding: if args.check_encoding:
encoding = chardet.detect(input_file.read())['encoding'] encoding = chardet.detect(text_file.read())['encoding']
else: else:
encoding = 'utf-8' encoding = 'utf-8'
text_file.seek(0)
text_md5 = hashlib.md5() text_md5 = hashlib.md5()
for chunk in iter(lambda: input_file.read(128 * text_md5.block_size), b''): for chunk in iter(lambda: text_file.read(128 * text_md5.block_size), b''):
text_md5.update(chunk) text_md5.update(chunk)
# Load the text contents from the input file # Load the text contents from the input file
with open(args.input, encoding=encoding) as input_file: with open(args.input, encoding=encoding) as text_file:
text = input_file.read() # spaCy NLP is limited to strings with a maximum of 1 million characters at
# spaCys NLP is limited to strings with maximum 1 million characters at
# once. So we split it into suitable chunks. # once. So we split it into suitable chunks.
text_chunks = textwrap.wrap(text, 1000000, break_long_words=False) text_chunks = textwrap.wrap(
# the text variable potentially occupies a lot of system memory and is no text_file.read(),
# longer needed... 1000000,
del text break_long_words=False,
break_on_hyphens=False,
drop_whitespace=False,
expand_tabs=False,
replace_whitespace=False
)
model = spacy_models[args.language] model = spacy_models[args.language]
nlp = spacy.load(model) nlp = spacy.load(model)
@ -59,6 +64,7 @@ meta = {
} }
}, },
'file': { 'file': {
'encoding': encoding,
'md5': text_md5.hexdigest(), 'md5': text_md5.hexdigest(),
'name': os.path.basename(args.input) 'name': os.path.basename(args.input)
} }
@ -127,7 +133,8 @@ tags = {
annotations = [] annotations = []
chunk_offset = 0 chunk_offset = 0
for text_chunk in text_chunks: while text_chunks:
text_chunk = text_chunks.pop(0)
doc = nlp(text_chunk) doc = nlp(text_chunk)
for token in doc: for token in doc:
if token.is_space: if token.is_space:
@ -142,12 +149,12 @@ for text_chunk in text_chunks:
for ent_candidate in token.sent.ents: for ent_candidate in token.sent.ents:
if ent_candidate.start_char == token.idx: if ent_candidate.start_char == token.idx:
ent = ent_candidate ent = ent_candidate
annotation = {'start': ent.start_char + chunk_offset,
'end': ent.end_char + chunk_offset,
'tag': 'ent',
'properties': {'type': token.ent_type_}}
annotations.append(annotation)
break break
annotation = {'start': ent.start_char + chunk_offset,
'end': ent.end_char + chunk_offset,
'tag': 'ent',
'properties': {'type': token.ent_type_}}
annotations.append(annotation)
annotation = {'start': token.idx + chunk_offset, annotation = {'start': token.idx + chunk_offset,
'end': token.idx + len(token.text) + chunk_offset, 'end': token.idx + len(token.text) + chunk_offset,
'tag': 'token', 'tag': 'token',
@ -157,7 +164,8 @@ for text_chunk in text_chunks:
if token.ent_type_: if token.ent_type_:
annotation['properties']['ner'] = token.ent_type_ annotation['properties']['ner'] = token.ent_type_
annotations.append(annotation) annotations.append(annotation)
chunk_offset = len(text_chunk) chunk_offset += len(text_chunk)
text_chunk = None
with open(args.output, 'w') as output_file: with open(args.output, 'w') as output_file:
json.dump({'meta': meta, 'tags': tags, 'annotations': annotations}, json.dump({'meta': meta, 'tags': tags, 'annotations': annotations},

View File

@ -3,19 +3,13 @@
from argparse import ArgumentParser from argparse import ArgumentParser
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
import hashlib
import json import json
# Parse the given arguments
parser = ArgumentParser(description='Create annotations for a given txt file')
parser.add_argument('input', metavar='Path to txt input file')
parser.add_argument('annotations', metavar='Path to JSON annotation file')
parser.add_argument('output', metavar='Path to vrt output file')
args = parser.parse_args()
with open(args.input) as text_file, \ # Two global ressources - Not very elegant but it works for now
open(args.annotations) as data_file: stand_off_data = None
text = text_file.read() text = None
stand_off_data = json.load(data_file)
def meta_to_string(): def meta_to_string():
@ -26,7 +20,8 @@ def meta_to_string():
stand_off_data['meta']['generator']['arguments']['check_encoding'], stand_off_data['meta']['generator']['arguments']['check_encoding'],
stand_off_data['meta']['generator']['arguments']['language'] stand_off_data['meta']['generator']['arguments']['language']
) )
string += '<file name="{}" md5="{}"/>\n'.format( string += '<file encoding="{}" name="{}" md5="{}"/>\n'.format(
stand_off_data['meta']['file']['encoding'],
stand_off_data['meta']['file']['name'], stand_off_data['meta']['file']['name'],
stand_off_data['meta']['file']['md5'] stand_off_data['meta']['file']['md5']
) )
@ -93,15 +88,43 @@ def annotations_to_string(end=float('inf')):
return string return string
vrt = '' def main():
vrt += '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n' global stand_off_data
vrt += '<corpus>\n' global text
vrt += '<text>\n'
vrt += meta_to_string()
vrt += tags_to_string()
vrt += annotations_to_string()
vrt += '</text>\n'
vrt += '</corpus>'
with open(args.output, 'w') as vrt_file: # Parse the given arguments
vrt_file.write(vrt) parser = ArgumentParser(description='Create a vrt from JSON and txt')
parser.add_argument('text', help='Path to txt file')
parser.add_argument('stand_off_data', help='Path to JSON file')
parser.add_argument('output', help='Path to vrt output file')
args = parser.parse_args()
with open(args.stand_off_data) as stand_of_data_file:
stand_off_data = json.load(stand_of_data_file)
with open(args.text, "rb") as text_file:
text_md5 = hashlib.md5()
for chunk in iter(lambda: text_file.read(128 * text_md5.block_size), b''): # noqa
text_md5.update(chunk)
if text_md5.hexdigest() != stand_off_data['meta']['file']['md5']:
raise Exception('md5 not equal')
with open(args.text, encoding=stand_off_data['meta']['file']['encoding']) as text_file: # noqa
text = text_file.read()
vrt = ''
vrt += '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
vrt += '<corpus>\n'
vrt += '<text>\n'
vrt += meta_to_string()
vrt += tags_to_string()
vrt += annotations_to_string()
vrt += '</text>\n'
vrt += '</corpus>'
with open(args.output, 'w') as vrt_file:
vrt_file.write(vrt)
if __name__ == '__main__':
main()

View File

@ -6,7 +6,7 @@ import os
import subprocess import subprocess
import sys import sys
CONTAINER_IMAGE = 'gitlab.ub.uni-bielefeld.de:4567/sfb1288inf/nlp:1.0.0' CONTAINER_IMAGE = 'gitlab.ub.uni-bielefeld.de:4567/sfb1288inf/nlp:1.0.0b'
CONTAINER_INPUT_DIR = '/input' CONTAINER_INPUT_DIR = '/input'
CONTAINER_OUTPUT_DIR = '/output' CONTAINER_OUTPUT_DIR = '/output'
CONTAINER_LOG_DIR = '/logs' CONTAINER_LOG_DIR = '/logs'