#!/usr/bin/env python3.7
# coding=utf-8

from argparse import ArgumentParser
import chardet
import hashlib
import json
import os
import spacy
import textwrap
import uuid


spacy_models = {
    spacy.info(pipeline)['lang']: pipeline
    for pipeline in spacy.info()['pipelines']
}


# Parse the given arguments
parser = ArgumentParser(
    description='Create annotations for a given plain txt file'
)
parser.add_argument(
    '-i', '--input-file',
    help='Input file'
)
parser.add_argument(
    '-o', '--output-file',
    help='Output file',
    required=True
)
parser.add_argument(
    '-m', '--model',
    choices=spacy_models.keys(),
    help='The model to be used',
    required=True
)
parser.add_argument(
    '-c', '--check-encoding',
    action='store_true',
    help='Check encoding of the input file, UTF-8 is used instead'
)
parser.add_argument(
    '--id-prefix',
    default='',
    help='A prefix for all the ids within the stand off annotations'
)
args = parser.parse_args()


def generate_id(name):
    return f'{args.id_prefix}{uuid.uuid3(uuid.NAMESPACE_DNS, name)}'


with open(args.input_file, "rb") as input_file:
    if args.check_encoding:
        encoding = chardet.detect(input_file.read())['encoding']
    else:
        encoding = 'utf-8'
    input_file.seek(0)
    text_md5 = hashlib.md5()
    for chunk in iter(lambda: input_file.read(128 * text_md5.block_size), b''):
        text_md5.update(chunk)

# Load the text contents from the input file
with open(args.input_file, encoding=encoding) as input_file:
    # spaCy NLP is limited to strings with a maximum of 1 million characters at
    # once. So we split it into suitable chunks.
    text_chunks = textwrap.wrap(
        input_file.read(),
        1000000,
        break_long_words=False,
        break_on_hyphens=False,
        drop_whitespace=False,
        expand_tabs=False,
        replace_whitespace=False
    )

model_name = spacy_models[args.model]
nlp = spacy.load(model_name)

meta = {
    'generator': {
        'name': 'nopaque spacy NLP',
        'version': '0.1.0',
        'arguments': {
            'check_encoding': args.check_encoding,
            'model': args.model
        }
    },
    'file': {
        'encoding': encoding,
        'md5': text_md5.hexdigest(),
        'name': os.path.basename(args.input_file)
    }
}

tags = []
token = {
    'id': generate_id('token'),
    'name': 'token',
    'description': 'An individual token — i.e. a word, punctuation symbol, whitespace, etc.',  # noqa
    'properties': []
}
# TODO: Check if all languages support token.sentiment
token['properties'].append(
    {
        'id': generate_id('token.sentiment'),
        'name': 'sentiment',
        'description': 'A scalar value indicating the positivity or negativity of the token.'  # noqa
    }
)
if nlp.has_pipe('lemmatizer'):
    token['properties'].append(
        {
            'id': generate_id('token.lemma'),
            'name': 'lemma',
            'description': 'The base form of the word'
        }
    )
if nlp.has_pipe('morphologizer') or nlp.has_pipe('tagger'):
    token['properties'].append(
        {
            'id': generate_id('token.simple_pos'),
            'name': 'simple_pos',
            'description': 'The simple UPOS part-of-speech tag',
            'labels': [
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'ADJ',
                    'description': 'adjective'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'ADP',
                    'description': 'adposition'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'ADV',
                    'description': 'adverb'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'AUX',
                    'description': 'auxiliary verb'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'CONJ',
                    'description': 'coordinating conjunction'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'DET',
                    'description': 'determiner'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'INTJ',
                    'description': 'interjection'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'NOUN',
                    'description': 'noun'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'NUM',
                    'description': 'numeral'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'PART',
                    'description': 'particle'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'PRON',
                    'description': 'pronoun'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'PROPN',
                    'description': 'proper noun'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'PUNCT',
                    'description': 'punctuation'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'SCONJ',
                    'description': 'subordinating conjunction'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'SYM',
                    'description': 'symbol'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'VERB',
                    'description': 'verb'
                },
                {
                    'id': generate_id('token.simple_pos=ADJ'),
                    'name': 'X',
                    'description': 'other'
                }
            ]
        }
    )
if nlp.has_pipe('tagger'):
    token['properties'].append(
        {
            'id': generate_id('token.pos'),
            'name': 'pos',
            'description': 'The detailed part-of-speech tag',
            'labels': [
                {
                    'id': generate_id(f'token.pos={label}'),
                    'name': label,
                    'description': spacy.explain(label) or ''
                } for label in spacy.info(model_name)['labels']['tagger']
            ]
        }
    )
if nlp.has_pipe('ner') or nlp.has_pipe('entity_ruler'):
    tags.append(
        {
            'id': generate_id('ent'),
            'name': 'ent',
            'description': 'Encodes the start and end of a named entity',
            'properties': [
                {
                    'id': generate_id('ent.type'),
                    'name': 'type',
                    'description': 'Label indicating the type of the entity',
                    'labels': [
                        {
                            'id': generate_id('ent.type={}'.format(label)),
                            'name': label,
                            'description': spacy.explain(label) or ''
                        } for label in spacy.info(model_name)['labels']['ner']
                    ]
                }
            ]
        }
    )
if nlp.has_pipe('parser') or nlp.has_pipe('senter') or nlp.has_pipe('sentencizer'):  # noqa
    # TODO: Check if all languages support sent.sentiment
    tags.append(
        {
            'id': generate_id('s'),
            'name': 's',
            'description': 'Encodes the start and end of a sentence',
            'properties': [
                {
                    'id': generate_id('s.sentiment'),
                    'name': 'sentiment',
                    'description': 'A scalar value indicating the positivity or negativity of the sentence.'  # noqa
                }
            ]
        }
    )
tags.append(token)

annotations = []

chunk_offset = 0
while text_chunks:
    text_chunk = text_chunks.pop(0)
    doc = nlp(text_chunk)
    if hasattr(doc, 'ents'):
        for ent in doc.ents:
            annotation = {
                'start': ent.start_char + chunk_offset,
                'end': ent.end_char + chunk_offset,
                'tag_id': generate_id('ent'),
                'properties': [
                    {
                        'property_id': generate_id('ent.type'),
                        'value': ent.label_
                    }
                ]
            }
            annotations.append(annotation)
    if hasattr(doc, 'sents'):
        for sent in doc.sents:
            annotation = {
                'start': sent.start_char + chunk_offset,
                'end': sent.end_char + chunk_offset,
                'tag_id': generate_id('s'),
                'properties': []
            }
            if hasattr(sent, 'sentiment'):
                annotation['properties'].append(
                    {
                        'property_id': generate_id('s.sentiment'),
                        'value': sent.sentiment
                    }
                )
            annotations.append(annotation)
    for token in doc:
        annotation = {
            'start': token.idx + chunk_offset,
            'end': token.idx + len(token.text) + chunk_offset,
            'tag_id': generate_id('token'),
            'properties': []
        }
        if hasattr(token, 'lemma_'):
            annotation['properties'].append(
                {
                    'property_id': generate_id('token.lemma'),
                    'value': token.lemma_
                }
            )
        if hasattr(token, 'pos_'):
            annotation['properties'].append(
                {
                    'property_id': generate_id('token.simple_pos'),
                    'value': token.pos_
                }
            )
        if hasattr(token, 'sentiment'):
            annotation['properties'].append(
                {
                    'property_id': generate_id('token.sentiment'),
                    'value': token.sentiment
                }
            )
        if hasattr(token, 'tag_'):
            annotation['properties'].append(
                {
                   'property_id': generate_id('token.pos'),
                   'value': token.tag_
                }
            )
        annotations.append(annotation)
    chunk_offset += len(text_chunk)
    text_chunk = None

with open(args.output_file, 'w') as output_file:
    json.dump(
        {'meta': meta, 'tags': tags, 'annotations': annotations},
        output_file,
        indent=4
    )