Fix problems caused by wrong textwrap.wrap usage

This commit is contained in:
Patrick Jentsch 2021-04-30 09:44:35 +02:00
parent f7b7da2b1f
commit bd5d8ddedb
2 changed files with 63 additions and 33 deletions

View File

@ -27,24 +27,28 @@ args = parser.parse_args()
# If requested: Check the encoding of the text contents from the input file # If requested: Check the encoding of the text contents from the input file
# Else: Use utf-8 # Else: Use utf-8
with open(args.input, "rb") as input_file: with open(args.input, "rb") as text_file:
if args.check_encoding: if args.check_encoding:
encoding = chardet.detect(input_file.read())['encoding'] encoding = chardet.detect(text_file.read())['encoding']
else: else:
encoding = 'utf-8' encoding = 'utf-8'
text_md5 = hashlib.md5() text_md5 = hashlib.md5()
for chunk in iter(lambda: input_file.read(128 * text_md5.block_size), b''): for chunk in iter(lambda: text_file.read(128 * text_md5.block_size), b''):
text_md5.update(chunk) text_md5.update(chunk)
# Load the text contents from the input file # Load the text contents from the input file
with open(args.input, encoding=encoding) as input_file: with open(args.input, encoding=encoding) as text_file:
text = input_file.read() # spaCy NLP is limited to strings with maximum 1 million characters at
# spaCys NLP is limited to strings with maximum 1 million characters at
# once. So we split it into suitable chunks. # once. So we split it into suitable chunks.
text_chunks = textwrap.wrap(text, 1000000, break_long_words=False) text_chunks = textwrap.wrap(
# the text variable potentially occupies a lot of system memory and is no text_file.read(),
# longer needed... 1000000,
del text break_long_words=False,
break_on_hyphens=False,
drop_whitespace=False,
expand_tabs=False,
replace_whitespace=False
)
model = spacy_models[args.language] model = spacy_models[args.language]
nlp = spacy.load(model) nlp = spacy.load(model)
@ -59,6 +63,7 @@ meta = {
} }
}, },
'file': { 'file': {
'encoding': encoding,
'md5': text_md5.hexdigest(), 'md5': text_md5.hexdigest(),
'name': os.path.basename(args.input) 'name': os.path.basename(args.input)
} }
@ -127,7 +132,8 @@ tags = {
annotations = [] annotations = []
chunk_offset = 0 chunk_offset = 0
for text_chunk in text_chunks: while text_chunks:
text_chunk = text_chunks.pop(0)
doc = nlp(text_chunk) doc = nlp(text_chunk)
for token in doc: for token in doc:
if token.is_space: if token.is_space:
@ -158,6 +164,7 @@ for text_chunk in text_chunks:
annotation['properties']['ner'] = token.ent_type_ annotation['properties']['ner'] = token.ent_type_
annotations.append(annotation) annotations.append(annotation)
chunk_offset += len(text_chunk) chunk_offset += len(text_chunk)
text_chunk = None
with open(args.output, 'w') as output_file: with open(args.output, 'w') as output_file:
json.dump({'meta': meta, 'tags': tags, 'annotations': annotations}, json.dump({'meta': meta, 'tags': tags, 'annotations': annotations},

View File

@ -3,19 +3,13 @@
from argparse import ArgumentParser from argparse import ArgumentParser
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
import hashlib
import json import json
# Parse the given arguments
parser = ArgumentParser(description='Create annotations for a given txt file')
parser.add_argument('input', metavar='Path to txt input file')
parser.add_argument('annotations', metavar='Path to JSON annotation file')
parser.add_argument('output', metavar='Path to vrt output file')
args = parser.parse_args()
with open(args.input) as text_file, \ # Two global ressources - Not very elegant but it works for now
open(args.annotations) as data_file: stand_off_data = None
text = text_file.read() text = None
stand_off_data = json.load(data_file)
def meta_to_string(): def meta_to_string():
@ -26,7 +20,8 @@ def meta_to_string():
stand_off_data['meta']['generator']['arguments']['check_encoding'], stand_off_data['meta']['generator']['arguments']['check_encoding'],
stand_off_data['meta']['generator']['arguments']['language'] stand_off_data['meta']['generator']['arguments']['language']
) )
string += '<file name="{}" md5="{}"/>\n'.format( string += '<file encoding="{}" name="{}" md5="{}"/>\n'.format(
stand_off_data['meta']['file']['encoding'],
stand_off_data['meta']['file']['name'], stand_off_data['meta']['file']['name'],
stand_off_data['meta']['file']['md5'] stand_off_data['meta']['file']['md5']
) )
@ -93,15 +88,43 @@ def annotations_to_string(end=float('inf')):
return string return string
vrt = '' def main():
vrt += '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n' global stand_off_data
vrt += '<corpus>\n' global text
vrt += '<text>\n'
vrt += meta_to_string()
vrt += tags_to_string()
vrt += annotations_to_string()
vrt += '</text>\n'
vrt += '</corpus>'
with open(args.output, 'w') as vrt_file: # Parse the given arguments
parser = ArgumentParser(description='Create a vrt from JSON and txt')
parser.add_argument('text', metavar='Path to txt file')
parser.add_argument('stand_off_data', metavar='Path to JSON file')
parser.add_argument('output', metavar='Path to vrt output file')
args = parser.parse_args()
with open(args.stand_off_data) as stand_of_data_file:
stand_off_data = json.load(stand_of_data_file)
with open(args.text, "rb") as text_file:
text_md5 = hashlib.md5()
for chunk in iter(lambda: text_file.read(128 * text_md5.block_size), b''): # noqa
text_md5.update(chunk)
if text_md5.hexdigest() != stand_off_data['meta']['file']['md5']:
raise Exception('md5 not equal')
with open(args.text, encoding=stand_off_data['meta']['file']['encoding']) as text_file: # noqa
text = text_file.read()
vrt = ''
vrt += '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
vrt += '<corpus>\n'
vrt += '<text>\n'
vrt += meta_to_string()
vrt += tags_to_string()
vrt += annotations_to_string()
vrt += '</text>\n'
vrt += '</corpus>'
with open(args.output, 'w') as vrt_file:
vrt_file.write(vrt) vrt_file.write(vrt)
if __name__ == '__main__':
main()