Compare commits

..

No commits in common. "f7b7da2b1f0e5f9d46054f37194718c338a26495" and "cd976692d6aa9cf8e073e5dfbc7d4e115d291424" have entirely different histories.

2 changed files with 7 additions and 8 deletions

3
nlp
View File

@ -71,7 +71,6 @@ class NLPPipeline(WorkflowRunner):
''' '''
nlp_tasks = [] nlp_tasks = []
n_cores = max(1, int(self.getNCores() / len(self.jobs))) n_cores = max(1, int(self.getNCores() / len(self.jobs)))
mem_mb = min(n_cores * 2048, int(self.getMemMb() / len(self.jobs)))
for i, job in enumerate(self.jobs): for i, job in enumerate(self.jobs):
output_file = os.path.join(job.output_dir, '{}.nopaque-stand-off.json'.format(job.name)) # noqa output_file = os.path.join(job.output_dir, '{}.nopaque-stand-off.json'.format(job.name)) # noqa
cmd = 'spacy-nlp' cmd = 'spacy-nlp'
@ -82,7 +81,7 @@ class NLPPipeline(WorkflowRunner):
deps = 'setup_output_directory_-_{}'.format(i) deps = 'setup_output_directory_-_{}'.format(i)
lbl = 'nlp_-_{}'.format(i) lbl = 'nlp_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl, task = self.addTask(command=cmd, dependencies=deps, label=lbl,
memMb=mem_mb, nCores=n_cores) nCores=n_cores)
nlp_tasks.append(task) nlp_tasks.append(task)
''' '''

View File

@ -142,12 +142,12 @@ for text_chunk in text_chunks:
for ent_candidate in token.sent.ents: for ent_candidate in token.sent.ents:
if ent_candidate.start_char == token.idx: if ent_candidate.start_char == token.idx:
ent = ent_candidate ent = ent_candidate
annotation = {'start': ent.start_char + chunk_offset,
'end': ent.end_char + chunk_offset,
'tag': 'ent',
'properties': {'type': token.ent_type_}}
annotations.append(annotation)
break break
annotation = {'start': ent.start_char + chunk_offset,
'end': ent.end_char + chunk_offset,
'tag': 'ent',
'properties': {'type': token.ent_type_}}
annotations.append(annotation)
annotation = {'start': token.idx + chunk_offset, annotation = {'start': token.idx + chunk_offset,
'end': token.idx + len(token.text) + chunk_offset, 'end': token.idx + len(token.text) + chunk_offset,
'tag': 'token', 'tag': 'token',
@ -157,7 +157,7 @@ for text_chunk in text_chunks:
if token.ent_type_: if token.ent_type_:
annotation['properties']['ner'] = token.ent_type_ annotation['properties']['ner'] = token.ent_type_
annotations.append(annotation) annotations.append(annotation)
chunk_offset += len(text_chunk) chunk_offset = len(text_chunk)
with open(args.output, 'w') as output_file: with open(args.output, 'w') as output_file:
json.dump({'meta': meta, 'tags': tags, 'annotations': annotations}, json.dump({'meta': meta, 'tags': tags, 'annotations': annotations},