52 Commits

Author SHA1 Message Date
cd976692d6 Don't process files in subdirectories 2021-04-12 13:24:31 +02:00
4e7669d009 Return the returncode 2021-04-12 09:26:21 +02:00
8105edfd1b Add missing argument to wrapper script 2021-04-12 09:20:28 +02:00
72409bd12d Fix race condition 2021-03-26 14:48:38 +01:00
54f336e620 Fix permissions 2021-03-26 10:09:45 +01:00
3b570e5df1 more pipeline help tweaks 2021-03-26 10:02:14 +01:00
dc62755d12 Update README and pipeline help 2021-03-26 10:01:51 +01:00
aa1bfa259d Use JSON files for stand-off annotations. 2021-03-26 09:46:17 +01:00
d620c29f27 Fix version 1.0.0 2021-02-25 11:26:11 +01:00
2ced38504c Use "buster" instead of "10" in FROM 2020-10-08 23:17:58 +02:00
f02c0953bf Use new Dockerfile structure 2020-10-08 23:08:49 +02:00
5329446277 Update CI script 2020-10-07 17:09:09 +02:00
15e373db58 fix gitlab ci 2020-09-23 16:53:16 +02:00
8afdfb13b2 Use smaller models 2020-09-23 15:46:43 +02:00
1ed42f68ad Remove clean stage from stages 2020-09-23 15:27:31 +02:00
42583fea46 Update to newer Version 2020-09-23 15:26:53 +02:00
5bd0feda5c fix pipeline 2020-06-23 15:19:39 +02:00
5980a995e5 Add missing newline 2020-06-10 14:23:43 +02:00
fe7ab93513 Update nlp software metadata represantation 2020-06-10 13:14:34 +02:00
91708308bc Add model version number 2020-05-20 15:35:45 +02:00
887e814020 Fix 2020-05-20 15:01:52 +02:00
3fc6ebff4c Add stand off varaiant and metadata 2020-05-20 14:55:52 +02:00
bef51b7d81 Keep uncompressed output files after zip jobs. 2020-05-13 09:07:31 +02:00
68e86338d4 Bump versions 2020-04-06 09:21:38 +02:00
30d127f3af Fix zip creation 2020-04-04 15:37:12 +02:00
e061a7426d Update NLP Pipeline 2020-04-03 17:35:05 +02:00
41910afb79 Add nlp to filename 2020-02-18 10:17:24 +01:00
5d2fee029e Some cosmetics 2020-02-17 14:58:18 +01:00
6e87e0decd Add filename argument for zip results 2020-02-17 11:57:55 +01:00
79043f3dd7 Fix last errors 2020-02-12 14:25:08 +01:00
1a3e4a0a02 Fix check_encoding functionality 2020-02-12 14:16:36 +01:00
504861ae07 Update Dockerfile 2020-02-12 13:48:30 +01:00
88d03d4360 Add function to check the encoding of input text files. 2020-02-12 13:46:43 +01:00
6769be049a Escape text and lemma 2020-02-04 13:12:31 +01:00
ec2cf1dcff Fix zip switch integration 2020-02-03 15:26:04 +01:00
e4ef4835e5 Add a switch for zip functionality 2020-02-03 15:02:26 +01:00
5f20f9be40 Remove id xml attribute from output file 2020-01-27 15:59:32 +01:00
b0a402b3ac Add zip creation 2020-01-20 15:09:38 +01:00
543a1ba29a Bump version 2020-01-07 11:24:11 +01:00
d5a2d38c17 fix 2019-11-04 15:18:52 +01:00
4af9d9c899 Update 2019-11-04 15:15:41 +01:00
de8160a5b6 Update .gitlab-ci.yml 2019-09-19 09:25:29 +02:00
d564ed0464 Update .gitlab-ci.yml 2019-09-19 09:24:04 +02:00
abf6c430c3 Update .gitlab-ci.yml 2019-09-16 15:52:23 +02:00
19426a4c78 Set charset again! 2019-09-12 11:42:42 +02:00
a32184db5c Codestyle changes. 2019-09-12 10:06:29 +02:00
a16b010bdc Install models via an alternative way. 2019-09-12 09:56:13 +02:00
af293d6141 Codestyle 2019-09-11 16:15:41 +02:00
43717de313 Use latest image tag for master. 2019-09-11 13:39:51 +02:00
48fb20ae6b Change the documentation style. 2019-09-11 13:34:01 +02:00
2f57b1a0af Use fix version numbers. 2019-09-11 13:20:07 +02:00
e68d5c39ee Update CI script and remove unused code. 2019-07-31 11:39:54 +02:00
8 changed files with 581 additions and 303 deletions

View File

@ -1,42 +1,68 @@
image: docker:latest
image: docker:19.03.13
services:
- docker:dind
- docker:19.03.13-dind
stages:
- build
- push
before_script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
variables:
DOCKER_TLS_CERTDIR: "/certs"
INTERMEDIATE_IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA
Build:
.reg_setup:
before_script:
- apk add --no-cache curl
- curl --fail --show-error --location "https://github.com/genuinetools/reg/releases/download/v$REG_VERSION/reg-linux-amd64" --output /usr/local/bin/reg
- echo "$REG_SHA256 /usr/local/bin/reg" | sha256sum -c -
- chmod a+x /usr/local/bin/reg
variables:
REG_SHA256: ade837fc5224acd8c34732bf54a94f579b47851cc6a7fd5899a98386b782e228
REG_VERSION: 0.16.1
build_image:
script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
- docker build --pull -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA .
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker build -t $INTERMEDIATE_IMAGE_TAG .
- docker push $INTERMEDIATE_IMAGE_TAG
stage: build
tags:
- docker
- docker
Push latest:
push_master:
extends:
- .reg_setup
only:
- master
script:
- docker pull $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA $CI_REGISTRY_IMAGE:latest
- docker push $CI_REGISTRY_IMAGE:latest
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
- docker pull $INTERMEDIATE_IMAGE_TAG
- /usr/local/bin/reg rm -d --auth-url $CI_REGISTRY -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $INTERMEDIATE_IMAGE_TAG
- docker tag $INTERMEDIATE_IMAGE_TAG $IMAGE_TAG
- docker push $IMAGE_TAG
stage: push
tags:
- docker
- docker
variables:
IMAGE_TAG: $CI_REGISTRY_IMAGE:latest
Push tag:
push_other:
extends:
- .reg_setup
except:
- master
only:
- branches
- tags
script:
- docker pull $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
- docker pull $INTERMEDIATE_IMAGE_TAG
- /usr/local/bin/reg rm -d --auth-url $CI_REGISTRY -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $INTERMEDIATE_IMAGE_TAG
- docker tag $INTERMEDIATE_IMAGE_TAG $IMAGE_TAG
- docker push $IMAGE_TAG
stage: push
tags:
- docker
- docker
variables:
IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME

View File

@ -1,43 +1,62 @@
FROM debian:9-slim
FROM debian:buster-slim
LABEL authors="Patrick Jentsch <p.jentsch@uni-bielefeld.de>, Stephan Porada <porada@posteo.de>"
LABEL maintainer="inf_sfb1288@lists.uni-bielefeld.de"
ENV DEBIAN_FRONTEND=noninteractive
ENV LANG=C.UTF-8
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
python2.7 \
python3.5 \
python3-dev \
python3-pip \
python3-setuptools \
wget
# Install pyFlow
ENV PYFLOW_VERSION 1.1.20
RUN wget -nv https://github.com/Illumina/pyflow/releases/download/v"$PYFLOW_VERSION"/pyflow-"$PYFLOW_VERSION".tar.gz && \
tar -xzf pyflow-"$PYFLOW_VERSION".tar.gz && \
cd pyflow-"$PYFLOW_VERSION" && \
python2.7 setup.py build install && \
cd .. && \
rm -r pyflow-"$PYFLOW_VERSION" pyflow-"$PYFLOW_VERSION".tar.gz
RUN apt-get update \
&& apt-get install --no-install-recommends --yes \
wget
# Install spaCy
RUN pip3 install wheel && pip3 install -U spacy && \
python3.5 -m spacy download de && \
python3.5 -m spacy download en && \
python3.5 -m spacy download es && \
python3.5 -m spacy download fr && \
python3.5 -m spacy download it && \
python3.5 -m spacy download pt
# Install the NLP pipeline and it's dependencies #
## Install pyFlow ##
ENV PYFLOW_VERSION=1.1.20
RUN wget --no-check-certificate --quiet \
"https://github.com/Illumina/pyflow/releases/download/v${PYFLOW_VERSION}/pyflow-${PYFLOW_VERSION}.tar.gz" \
&& tar -xzf "pyflow-${PYFLOW_VERSION}.tar.gz" \
&& cd "pyflow-${PYFLOW_VERSION}" \
&& apt-get install --no-install-recommends --yes \
python2.7 \
&& python2.7 setup.py build install \
&& cd .. \
&& rm -r "pyflow-${PYFLOW_VERSION}" "pyflow-${PYFLOW_VERSION}.tar.gz"
COPY nlp /usr/local/bin
COPY spacy_nlp /usr/local/bin
RUN mkdir /input /output && \
chmod a+rw /input /output
## Install spaCy ##
ENV SPACY_VERSION=3.0.5
RUN apt-get install --no-install-recommends --yes \
python3.7 \
python3-pip \
&& pip3 install \
chardet \
setuptools \
wheel \
&& pip3 install --upgrade pip \
&& pip3 install "spacy==${SPACY_VERSION}"
# Only models that include the following components are compatibel:
# lemmatizer, ner, parser, senter, tagger,
ENV SPACY_MODELS="de_core_news_md,en_core_web_md,it_core_news_md,nl_core_news_md,pl_core_news_md,zh_core_web_md"
ENV SPACY_MODELS_VERSION=3.0.0
RUN for spacy_model in $(echo ${SPACY_MODELS} | tr "," "\n"); do python3 -m spacy download "${spacy_model}-${SPACY_MODELS_VERSION}" --direct; done
## Further dependencies ##
RUN apt-get install --no-install-recommends --yes \
procps \
zip
## Install Pipeline ##
COPY nlp spacy-nlp vrt-creator /usr/local/bin/
RUN rm -r /var/lib/apt/lists/*
ENTRYPOINT ["nlp"]
CMD ["--help"]

View File

@ -1,74 +1,48 @@
# Natural language processing
# NLP - Natural Language Processing
This repository provides all code that is needed to build a container image for natural language processing utilizing [spaCy](https://spacy.io).
This software implements a heavily parallelized pipeline for Natural Language Processing of text files. It is used for nopaque's NLP service but you can also use it standalone, for that purpose a convenient wrapper script is provided.
## Build image
## Software used in this pipeline implementation
- Official Debian Docker image (buster-slim) and programs from its free repositories: https://hub.docker.com/_/debian
- pyFlow (1.1.20): https://github.com/Illumina/pyflow/releases/tag/v1.1.20
- spaCy (3.0.5): https://github.com/tesseract-ocr/tesseract/releases/tag/4.1.1
- spaCy medium sized models (3.0.0):
- https://github.com/explosion/spacy-models/releases/tag/de_core_news_md-3.0.0
- https://github.com/explosion/spacy-models/releases/tag/en_core_web_md-3.0.0
- https://github.com/explosion/spacy-models/releases/tag/it_core_news_md-3.0.0
- https://github.com/explosion/spacy-models/releases/tag/nl_core_news_md-3.0.0
- https://github.com/explosion/spacy-models/releases/tag/pl_core_news_md-3.0.0
- https://github.com/explosion/spacy-models/releases/tag/zh_core_web_md-3.0.0
1. Clone this repository and navigate into it:
```
git clone https://gitlab.ub.uni-bielefeld.de/sfb1288inf/nlp.git && cd nlp
## Use this image
1. Create input and output directories for the pipeline.
``` bash
mkdir -p /<my_data_location>/input /<my_data_location>/output
```
2. Build image:
```
docker build -t sfb1288inf/nlp:latest .
```
2. Place your text files inside `/<my_data_location>/input`. Files should all contain text of the same language.
Alternatively build from the GitLab repository without cloning:
1. Build image:
```
docker build -t sfb1288inf/nlp:latest https://gitlab.ub.uni-bielefeld.de/sfb1288inf/nlp.git
3. Start the pipeline process. Check the pipeline help (`nlp --help`) for more details.
```
# Option one: Use the wrapper script
## Install the wrapper script (only on first run). Get it from https://gitlab.ub.uni-bielefeld.de/sfb1288inf/nlp/-/raw/1.0.0/wrapper/nlp, make it executeable and add it to your ${PATH}
cd /<my_data_location>
nlp -i input -l <language_code> -o output <optional_pipeline_arguments>
## Download prebuilt image
The GitLab registry provides a prebuilt image. It is automatically created, utilizing the conquaire build servers.
1. Download image:
```
docker pull gitlab.ub.uni-bielefeld.de:4567/sfb1288inf/nlp:latest
```
## Run
1. Create input and output directories for the NLP software:
```
mkdir -p /<mydatalocation>/files_for_nlp /<mydatalocation>/files_from_nlp
```
2. Place your text files inside the `/<mydatalocation>/files_for_nlp` directory. Files should all contain text of the same language.
3. Start the NLP process.
```
# Option two: Classic Docker style
docker run \
--rm \
-it \
-u $(id -u $USER):$(id -g $USER) \
-v /<mydatalocation>/files_for_nlp:/input \
-v /<mydatalocation>/files_from_nlp:/output \
sfb1288inf/nlp:latest \
-v /<my_data_location>/input:/input \
-v /<my_data_location>/output:/output \
gitlab.ub.uni-bielefeld.de:4567/sfb1288inf/nlp:1.0.0 \
-i /input \
-l <languagecode> \
-o /output
-l <language_code>
-o /output \
<optional_pipeline_arguments>
```
The arguments below `sfb1288inf/nlp:latest` are described in the [NLP arguments](#nlp-arguments) part.
If you want to use the prebuilt image, replace `sfb1288inf/nlp:latest` with `gitlab.ub.uni-bielefeld.de:4567/sfb1288inf/nlp:latest`.
4. Check your results in the `/<mydatalocation>/files_from_nlp` directory.
### NLP arguments
`-i path`
* Sets the input directory using the specified path.
* required = True
`-o path`
* Sets the output directory using the specified path.
* required = True
`-l languagecode`
* Tells spaCy which language will be used.
* options = de (German), el (Greek), en (English), es (Spanish), fr (French), it (Italian), nl (Dutch), pt (Portuguese)
* required = True
4. Check your results in the `/<my_data_location>/output` directory.

252
nlp
View File

@ -1,133 +1,193 @@
#!/usr/bin/env python2.7
# coding=utf-8
"""
nlp
"""A NLP pipeline for text file processing."""
Usage: For usage instructions run with option --help
Author: Patrick Jentsch <p.jentsch@uni-bielefeld.de>
"""
__author__ = 'Patrick Jentsch <p.jentsch@uni-bielefeld.de>,' \
'Stephan Porada <porada@posteo.de>'
__version__ = '1.0.0'
import argparse
from argparse import ArgumentParser
from pyflow import WorkflowRunner
import multiprocessing
import os
import sys
from pyflow import WorkflowRunner
def parse_arguments():
parser = argparse.ArgumentParser(
description='Performs NLP of documents utilizing spaCy. The results are served as verticalized text files.'
)
parser.add_argument(
'-i',
dest='input_dir',
required=True
)
parser.add_argument(
'-l',
choices=['de', 'el', 'en', 'es', 'fr', 'it', 'nl', 'pt'],
dest='lang',
required=True
)
parser.add_argument(
'-o',
dest='output_dir',
required=True
)
parser.add_argument(
'--nCores',
default=min(4, multiprocessing.cpu_count()),
dest='n_cores',
help='total number of cores available',
required=False,
type=int
)
return parser.parse_args()
SPACY_MODELS = {'de': 'de_core_news_md',
'en': 'en_core_web_md',
'it': 'it_core_news_md',
'nl': 'nl_core_news_md',
'pl': 'pl_core_news_md',
'zh': 'zh_core_web_md'}
class NLPWorkflow(WorkflowRunner):
def __init__(self, args):
self.jobs = analyze_jobs(args.input_dir, args.output_dir)
self.lang = args.lang
self.n_cores = args.n_cores
class NLPPipelineJob:
"""An NLP pipeline job class
Each input file of the pipeline is represented as an NLP pipeline job,
which holds all necessary information for the pipeline to process it.
Arguments:
file -- Path to the file
output_dir -- Path to a directory, where job results a stored
"""
def __init__(self, file, output_dir):
self.file = file
self.name = os.path.basename(file).rsplit('.', 1)[0]
self.output_dir = output_dir
class NLPPipeline(WorkflowRunner):
def __init__(self, input_dir, output_dir, check_encoding, lang, zip):
self.input_dir = input_dir
self.output_dir = output_dir
self.check_encoding = check_encoding
self.lang = lang
self.zip = zip
self.jobs = collect_jobs(self.input_dir, self.output_dir)
def workflow(self):
if len(self.jobs) == 0:
if not self.jobs:
return
'''
' ##################################################
' # Create output directories #
' # setup output directory #
' ##################################################
'''
create_output_directories_jobs = []
for index, job in enumerate(self.jobs):
cmd = 'mkdir -p "%s"' % (job['output_dir'])
create_output_directories_jobs.append(
self.addTask(
command=cmd,
label='create_output_directories_job_-_%i' % (index)
)
)
setup_output_directory_tasks = []
for i, job in enumerate(self.jobs):
cmd = 'mkdir -p "{}"'.format(job.output_dir)
lbl = 'setup_output_directory_-_{}'.format(i)
task = self.addTask(command=cmd, label=lbl)
setup_output_directory_tasks.append(task)
'''
' ##################################################
' # Natural language processing #
' # nlp #
' ##################################################
'''
nlp_jobs = []
nlp_job_n_cores = min(
self.n_cores,
max(1, int(self.n_cores / len(self.jobs)))
)
for index, job in enumerate(self.jobs):
cmd = 'spacy_nlp -l "%s" "%s" "%s"' % (
self.lang,
job['path'],
os.path.join(job['output_dir'], job['name'] + '.vrt')
)
nlp_jobs.append(
self.addTask(
command=cmd,
dependencies='create_output_directories_job_-_%i' % (index),
label='nlp_job_-_%i' % (index),
nCores=nlp_job_n_cores
)
)
nlp_tasks = []
n_cores = max(1, int(self.getNCores() / len(self.jobs)))
for i, job in enumerate(self.jobs):
output_file = os.path.join(job.output_dir, '{}.nopaque-stand-off.json'.format(job.name)) # noqa
cmd = 'spacy-nlp'
cmd += ' -l "{}"'.format(self.lang)
cmd += ' --check-encoding' if self.check_encoding else ''
cmd += ' "{}"'.format(job.file)
cmd += ' "{}"'.format(output_file)
deps = 'setup_output_directory_-_{}'.format(i)
lbl = 'nlp_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl,
nCores=n_cores)
nlp_tasks.append(task)
'''
' ##################################################
' # vrt creation #
' ##################################################
'''
vrt_creation_tasks = []
for i, job in enumerate(self.jobs):
output_file = os.path.join(job.output_dir, '{}.vrt'.format(job.name)) # noqa
nlp_file = os.path.join(job.output_dir, '{}.nopaque-stand-off.json'.format(job.name)) # noqa
cmd = 'vrt-creator'
cmd += ' "{}"'.format(job.file)
cmd += ' "{}"'.format(nlp_file)
cmd += ' "{}"'.format(output_file)
deps = 'nlp_-_{}'.format(i)
lbl = 'vrt_creation_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
vrt_creation_tasks.append(task)
'''
' ##################################################
' # zip creation #
' ##################################################
'''
zip_creation_tasks = []
if self.zip is not None:
cmd = 'cd "{}"'.format(self.output_dir)
cmd += ' && '
cmd += 'zip'
cmd += ' -r'
cmd += ' "{}.zip" .'.format(self.zip)
cmd += ' -x "pyflow.data*"'
cmd += ' -i "*.vrt" "*.json"'
cmd += ' && '
cmd += 'cd -'
deps = vrt_creation_tasks
lbl = 'zip_creation'
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
zip_creation_tasks.append(task)
def analyze_jobs(input_dir, output_dir):
def collect_jobs(input_dir, output_dir):
jobs = []
for file in os.listdir(input_dir):
if os.path.isdir(os.path.join(input_dir, file)):
jobs += analyze_jobs(
os.path.join(input_dir, file),
os.path.join(output_dir, file),
)
elif file.endswith('.txt'):
jobs.append(
{
'filename': file,
'name': file.rsplit('.', 1)[0],
'output_dir': os.path.join(output_dir, file),
'path': os.path.join(input_dir, file)
}
)
continue
if file.lower().endswith('.txt'):
job = NLPPipelineJob(os.path.join(input_dir, file),
os.path.join(output_dir, file))
jobs.append(job)
return jobs
def parse_args():
parser = ArgumentParser(description='NLP pipeline for TXT file processing',
prog='NLP pipeline')
parser.add_argument('-i', '--input-dir',
help='Input directory',
required=True)
parser.add_argument('-o', '--output-dir',
help='Output directory',
required=True)
parser.add_argument('-l', '--language',
choices=SPACY_MODELS.keys(),
required=True)
parser.add_argument('--check-encoding',
action='store_true')
parser.add_argument('--log-dir',
help='Logging directory')
parser.add_argument('--mem-mb',
help='Amount of system memory to be used (Default: min(--n-cores * 2048, available system memory))', # noqa
type=int)
parser.add_argument('--n-cores',
default=min(4, multiprocessing.cpu_count()),
help='Number of CPU threads to be used (Default: min(4, number of CPUs))', # noqa
type=int)
parser.add_argument('--zip',
help='Create one zip file per filetype')
parser.add_argument('-v', '--version',
action='version',
help='Returns the current version of the NLP pipeline',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
# Set some tricky default values and check for insufficient input
if args.log_dir is None:
args.log_dir = args.output_dir
if args.n_cores < 1:
raise Exception('--n-cores must be greater or equal 1')
if args.mem_mb is None:
max_mem_mb = int(os.popen('free -t -m').readlines()[-1].split()[1:][0])
args.mem_mb = min(args.n_cores * 2048, max_mem_mb)
if args.mem_mb < 2048:
raise Exception('--mem-mb must be greater or equal 2048')
if args.zip is not None and args.zip.lower().endswith('.zip'):
# Remove .zip file extension if provided
args.zip = args.zip[:-4]
args.zip = args.zip if args.zip else 'output'
return args
def main():
args = parse_arguments()
wflow = NLPWorkflow(args)
retval = wflow.run(dataDirRoot=args.output_dir, nCores=args.n_cores)
args = parse_args()
nlp_pipeline = NLPPipeline(args.input_dir, args.output_dir, args.check_encoding, args.language, args.zip) # noqa
retval = nlp_pipeline.run(dataDirRoot=args.log_dir, memMb=args.mem_mb, nCores=args.n_cores) # noqa
sys.exit(retval)

164
spacy-nlp Executable file
View File

@ -0,0 +1,164 @@
#!/usr/bin/env python3.7
# coding=utf-8
from argparse import ArgumentParser
import chardet
import hashlib
import json
import os
import spacy
import textwrap
spacy_models = {spacy.info(pipeline)['lang']: pipeline
for pipeline in spacy.info()['pipelines']}
# Parse the given arguments
parser = ArgumentParser(description='Create annotations for a given txt file')
parser.add_argument('input', metavar='Path to txt input file')
parser.add_argument('output', metavar='Path to JSON output file')
parser.add_argument('-l', '--language',
choices=spacy_models.keys(),
required=True)
parser.add_argument('-c', '--check-encoding', action='store_true')
args = parser.parse_args()
# If requested: Check the encoding of the text contents from the input file
# Else: Use utf-8
with open(args.input, "rb") as input_file:
if args.check_encoding:
encoding = chardet.detect(input_file.read())['encoding']
else:
encoding = 'utf-8'
text_md5 = hashlib.md5()
for chunk in iter(lambda: input_file.read(128 * text_md5.block_size), b''):
text_md5.update(chunk)
# Load the text contents from the input file
with open(args.input, encoding=encoding) as input_file:
text = input_file.read()
# spaCys NLP is limited to strings with maximum 1 million characters at
# once. So we split it into suitable chunks.
text_chunks = textwrap.wrap(text, 1000000, break_long_words=False)
# the text variable potentially occupies a lot of system memory and is no
# longer needed...
del text
model = spacy_models[args.language]
nlp = spacy.load(model)
meta = {
'generator': {
'name': 'nopaque NLP service',
'version': '1.0.0',
'arguments': {
'check_encoding': args.check_encoding,
'language': args.language
}
},
'file': {
'md5': text_md5.hexdigest(),
'name': os.path.basename(args.input)
}
}
tags = {
'token': {
'description': '',
'properties': {
'lemma': {
'description': 'The base form of the word',
'flags': ['required'],
'tagset': None
},
'pos': {
'description': 'The detailed part-of-speech tag',
'flags': ['required'],
'tagset': {label: spacy.explain(label) for label in spacy.info(model)['labels']['tagger']} # noqa
},
'simple_pos': {
'description': 'The simple UPOS part-of-speech tag',
'flags': ['required'],
'tagset': {
'ADJ': 'adjective',
'ADP': 'adposition',
'ADV': 'adverb',
'AUX': 'auxiliary verb',
'CONJ': 'coordinating conjunction',
'DET': 'determiner',
'INTJ': 'interjection',
'NOUN': 'noun',
'NUM': 'numeral',
'PART': 'particle',
'PRON': 'pronoun',
'PROPN': 'proper noun',
'PUNCT': 'punctuation',
'SCONJ': 'subordinating conjunction',
'SYM': 'symbol',
'VERB': 'verb',
'X': 'other'
}
},
'ner': {
'description': 'Label indicating the type of the entity',
'tagset': {label: spacy.explain(label) for label in spacy.info(model)['labels']['ner']} # noqa
}
}
},
's': {
'description': 'Encodes the start and end of a sentence',
'properties': None
},
'ent': {
'description': 'Encodes the start and end of a named entity',
'properties': {
'type': {
'description': 'Label indicating the type of the entity',
'flags': ['required'],
'tagset': {label: spacy.explain(label) for label in spacy.info(model)['labels']['ner']} # noqa
}
}
}
}
annotations = []
chunk_offset = 0
for text_chunk in text_chunks:
doc = nlp(text_chunk)
for token in doc:
if token.is_space:
continue
if token.is_sent_start:
annotation = {'start': token.sent.start_char + chunk_offset,
'end': token.sent.end_char + chunk_offset,
'tag': 's'}
annotations.append(annotation)
# Check if the token is the start of an entity
if token.ent_iob == 3:
for ent_candidate in token.sent.ents:
if ent_candidate.start_char == token.idx:
ent = ent_candidate
break
annotation = {'start': ent.start_char + chunk_offset,
'end': ent.end_char + chunk_offset,
'tag': 'ent',
'properties': {'type': token.ent_type_}}
annotations.append(annotation)
annotation = {'start': token.idx + chunk_offset,
'end': token.idx + len(token.text) + chunk_offset,
'tag': 'token',
'properties': {'pos': token.tag_,
'lemma': token.lemma_,
'simple_pos': token.pos_}}
if token.ent_type_:
annotation['properties']['ner'] = token.ent_type_
annotations.append(annotation)
chunk_offset = len(text_chunk)
with open(args.output, 'w') as output_file:
json.dump({'meta': meta, 'tags': tags, 'annotations': annotations},
output_file, indent=4)

View File

@ -1,71 +0,0 @@
#!/usr/bin/env python3.5
# coding=utf-8
import argparse
import os
import spacy
import textwrap
parser = argparse.ArgumentParser(
description='Tag a text file with spaCy and save it as a verticalized text file.'
)
parser.add_argument(
'i',
metavar='txt-sourcefile',
)
parser.add_argument(
'-l',
choices=['de', 'el', 'en', 'es', 'fr', 'it', 'nl', 'pt'],
dest='lang',
required=True
)
parser.add_argument(
'o',
metavar='vrt-destfile',
)
args = parser.parse_args()
SPACY_MODELS = {
'de': 'de_core_news_sm', 'el': 'el_core_news_sm', 'en': 'en_core_web_sm',
'es': 'es_core_news_sm', 'fr': 'fr_core_news_sm', 'it': 'it_core_news_sm',
'nl': 'nl_core_news_sm', 'pt': 'pt_core_news_sm'
}
# Set the language model for spacy
nlp = spacy.load(SPACY_MODELS[args.lang])
# Read text from the input file and if neccessary split it into parts with a
# length of less than 1 million characters.
with open(args.i) as input_file:
text = input_file.read()
texts = textwrap.wrap(text, 1000000, break_long_words=False)
text = None
# Create and open the output file
output_file = open(args.o, 'w+')
output_file.write(
'<?xml version="1.0" encoding="UTF-8"?>\n<corpus>\n<text id="%s">\n' % (
os.path.basename(args.i).rsplit(".", 1)[0]
)
)
for text in texts:
# Run spacy nlp over the text (partial string if above 1 million chars)
doc = nlp(text)
for sent in doc.sents:
output_file.write('<s>\n')
for token in sent:
# Skip whitespace tokens like "\n" or "\t"
if token.text.isspace():
continue
# Write all information in .vrt style to the output file
# text, lemma, simple_pos, pos, ner
output_file.write(
token.text + '\t' + token.lemma_ + '\t'
+ token.pos_ + '\t' + token.tag_ + '\t'
+ (token.ent_type_ if token.ent_type_ != '' else 'NULL') + '\n'
)
output_file.write('</s>\n')
output_file.write('</text>\n</corpus>')
output_file.close()

107
vrt-creator Executable file
View File

@ -0,0 +1,107 @@
#!/usr/bin/env python3.7
# coding=utf-8
from argparse import ArgumentParser
from xml.sax.saxutils import escape
import json
# Parse the given arguments
parser = ArgumentParser(description='Create annotations for a given txt file')
parser.add_argument('input', metavar='Path to txt input file')
parser.add_argument('annotations', metavar='Path to JSON annotation file')
parser.add_argument('output', metavar='Path to vrt output file')
args = parser.parse_args()
with open(args.input) as text_file, \
open(args.annotations) as data_file:
text = text_file.read()
stand_off_data = json.load(data_file)
def meta_to_string():
string = ''
string += '<generator software="{} ({})" arguments="check_encoding: {}; language: {}"/>\n'.format( # noqa
stand_off_data['meta']['generator']['name'],
stand_off_data['meta']['generator']['version'],
stand_off_data['meta']['generator']['arguments']['check_encoding'],
stand_off_data['meta']['generator']['arguments']['language']
)
string += '<file name="{}" md5="{}"/>\n'.format(
stand_off_data['meta']['file']['name'],
stand_off_data['meta']['file']['md5']
)
return string
def tags_to_string():
return ''
def annotations_to_string(end=float('inf')):
string = ''
while stand_off_data['annotations']:
if stand_off_data['annotations'][0]['start'] >= end:
break
annotation = stand_off_data['annotations'].pop(0)
#######################################################################
# Check for malformed annotations #
#######################################################################
if 'tag' not in annotation:
raise Exception('Annotation tag is missing')
if annotation['tag'] not in stand_off_data['tags']:
raise Exception('Unknown annotation tag: ' + annotation['tag'])
tag_model = stand_off_data['tags'][annotation['tag']]
if 'properties' in tag_model:
properties_model = tag_model['properties']
if properties_model is not None:
required_properties = filter(lambda x: 'flags' in x and 'required' in x['flags'], properties_model) # noqa
if required_properties and annotation['properties'] is None:
raise Exception('There are required properties but the "Properties" attribute is missing') # noqa
for property in required_properties:
if property not in annotation['properties']:
raise Exception('Required property is missing: ' + property) # noqa
#######################################################################
# Process tokens ~ cwb's positional attributes #
#######################################################################
if annotation['tag'] == 'token':
string += '{}\t{}\t{}\t{}\t{}\n'.format(
escape(text[annotation['start']:annotation['end']]),
escape(annotation['properties']['pos']),
escape(annotation['properties']['lemma']),
escape(annotation['properties']['simple_pos']),
escape(annotation['properties']['ner'] if 'ner' in annotation['properties'] else 'None') # noqa
)
#######################################################################
# Process other tags ~ cwb's structural attributes #
#######################################################################
else:
properties = ''
if 'properties' in annotation and annotation['properties'] is not None: # noqa
for property, value in annotation['properties'].items():
if not value:
continue
if properties_model and property in properties_model:
if 'flags' in properties_model and 'multiple' in properties_model['flags']: # noqa
properties += ' {}="|{}|"'.format(property, '|'.join(value)) # noqa
else:
properties += ' {}="{}"'.format(property, value)
string += '<' + annotation['tag'] + properties + '>\n'
string += annotations_to_string(end=min(annotation['end'], end))
string += '</' + annotation['tag'] + '>\n'
return string
vrt = ''
vrt += '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
vrt += '<corpus>\n'
vrt += '<text>\n'
vrt += meta_to_string()
vrt += tags_to_string()
vrt += annotations_to_string()
vrt += '</text>\n'
vrt += '</corpus>'
with open(args.output, 'w') as vrt_file:
vrt_file.write(vrt)

View File

@ -1,39 +1,38 @@
#!/usr/bin/env python3
# coding=utf-8
import argparse
from argparse import ArgumentParser
import os
import subprocess
import sys
container_image = 'gitlab.ub.uni-bielefeld.de:4567/sfb1288inf/nlp:latest'
container_input_dir = '/input'
container_output_dir = '/output'
uid = str(os.getuid())
gid = str(os.getgid())
CONTAINER_IMAGE = 'gitlab.ub.uni-bielefeld.de:4567/sfb1288inf/nlp:1.0.0'
CONTAINER_INPUT_DIR = '/input'
CONTAINER_OUTPUT_DIR = '/output'
CONTAINER_LOG_DIR = '/logs'
UID = str(os.getuid())
GID = str(os.getgid())
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-i',
dest='input_dir',
required=False
)
parser.add_argument(
'-o',
dest='output_dir',
required=False
)
parser = ArgumentParser(add_help=False)
parser.add_argument('-i', '--input-dir')
parser.add_argument('-o', '--output-dir')
parser.add_argument('--log-dir')
args, remaining_args = parser.parse_known_args()
cmd = ['docker', 'run', '--rm', '-it', '-u', uid + ':' + gid]
cmd = ['docker', 'run', '--rm', '-it', '-u', '{}:{}'.format(UID, GID)]
if args.input_dir is not None:
host_input_dir = os.path.abspath(args.input_dir)
cmd += ['-v', host_input_dir + ':' + container_input_dir]
remaining_args += ['-i', container_input_dir]
mapping = os.path.abspath(args.input_dir) + ':' + CONTAINER_INPUT_DIR
cmd += ['-v', mapping]
remaining_args += ['-i', CONTAINER_INPUT_DIR]
if args.output_dir is not None:
host_output_dir = os.path.abspath(args.output_dir)
cmd += ['-v', host_output_dir + ':' + container_output_dir]
remaining_args += ['-o', container_output_dir]
cmd.append(container_image)
mapping = os.path.abspath(args.output_dir) + ':' + CONTAINER_OUTPUT_DIR
cmd += ['-v', mapping]
remaining_args += ['-o', CONTAINER_OUTPUT_DIR]
if args.log_dir is not None:
mapping = os.path.abspath(args.log_dir) + ':' + CONTAINER_LOG_DIR
cmd += ['-v', mapping]
remaining_args += ['--log-dir', CONTAINER_LOG_DIR]
cmd.append(CONTAINER_IMAGE)
cmd += remaining_args
subprocess.run(cmd)
sys.exit(subprocess.run(cmd).returncode)