mirror of
				https://gitlab.ub.uni-bielefeld.de/sfb1288inf/nlp.git
				synced 2025-10-31 07:02:43 +00:00 
			
		
		
		
	Add stand off varaiant and metadata
This commit is contained in:
		
							
								
								
									
										65
									
								
								spacy-nlp
									
									
									
									
									
								
							
							
						
						
									
										65
									
								
								spacy-nlp
									
									
									
									
									
								
							| @@ -6,6 +6,7 @@ from xml.sax.saxutils import escape | |||||||
| import chardet | import chardet | ||||||
| import spacy | import spacy | ||||||
| import textwrap | import textwrap | ||||||
|  | import hashlib | ||||||
|  |  | ||||||
|  |  | ||||||
| SPACY_MODELS = {'de': 'de_core_news_sm', | SPACY_MODELS = {'de': 'de_core_news_sm', | ||||||
| @@ -39,12 +40,19 @@ else: | |||||||
|     encoding = 'utf-8' |     encoding = 'utf-8' | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # hashing in chunks to avoid full RAM with huge files. | ||||||
|  | with open(args.i, 'rb') as input_file: | ||||||
|  |     md5_hash = hashlib.md5() | ||||||
|  |     for chunk in iter(lambda: input_file.read(128 * md5_hash.block_size), b''): | ||||||
|  |         md5_hash.update(chunk) | ||||||
|  |     md5_hash = md5_hash.hexdigest() | ||||||
|  |  | ||||||
| # Load the text contents from the input file | # Load the text contents from the input file | ||||||
| with open(args.i, encoding=encoding) as input_file: | with open(args.i, encoding=encoding) as input_file: | ||||||
|     text = input_file.read() |     text = input_file.read() | ||||||
|     # spaCys NLP is limited to strings with maximum 1 million characters at |     # spaCys NLP is limited to strings with maximum 1 million characters at | ||||||
|     # once. So we split it into suitable chunks. |     # once. So we split it into suitable chunks. | ||||||
|     text_chunks = textwrap.wrap(text, 1000000, break_long_words=False) |     text_chunks = textwrap.wrap(text, 1000, break_long_words=False) | ||||||
|     # the text variable potentially occupies a lot of system memory and is no |     # the text variable potentially occupies a lot of system memory and is no | ||||||
|     # longer needed... |     # longer needed... | ||||||
|     del text |     del text | ||||||
| @@ -56,21 +64,56 @@ nlp = spacy.load(SPACY_MODELS[args.language]) | |||||||
|  |  | ||||||
| # Create the output file in verticalized text format | # Create the output file in verticalized text format | ||||||
| # See: http://cwb.sourceforge.net/files/CWB_Encoding_Tutorial/node3.html | # See: http://cwb.sourceforge.net/files/CWB_Encoding_Tutorial/node3.html | ||||||
| output_file = open(args.o, 'w+') | output_file_original_filename = args.o | ||||||
| output_file.write('<?xml version="1.0" encoding="UTF-8"?>\n<corpus>\n<text>\n') | output_file_stand_off_filename = args.o.replace('.vrt', '.stand-off.vrt') | ||||||
|  | output_file_tokens_filename = args.o.replace('.vrt', '.tokens.txt') | ||||||
|  | xml_head = '''<?xml version="1.0" encoding="UTF-8"?>\n\ | ||||||
|  | <corpus>\n\ | ||||||
|  | <text>\n\ | ||||||
|  | <metadata\n\ | ||||||
|  |     spacyVersion="{spacy_version}" | ||||||
|  |     spacyModel="{spacy_model}" | ||||||
|  |     md5HashOfInput="{md5_hash}">\n'''.format(md5_hash=md5_hash, | ||||||
|  |                                              spacy_version=spacy.__version__, | ||||||
|  |                                              spacy_model=SPACY_MODELS[args.language]) | ||||||
|  |  | ||||||
|  | with open(output_file_original_filename, 'w+') as output_file_original, \ | ||||||
|  |      open(output_file_stand_off_filename, 'w+') as output_file_stand_off, \ | ||||||
|  |      open(output_file_tokens_filename, 'w+') as output_file_tokens: | ||||||
|  |  | ||||||
|  |     output_file_original.write(xml_head) | ||||||
|  |     output_file_stand_off.write(xml_head) | ||||||
|  |     output_file_tokens.write(xml_head) | ||||||
|  |     text_offset = 0 | ||||||
|     for text_chunk in text_chunks: |     for text_chunk in text_chunks: | ||||||
|         doc = nlp(text_chunk) |         doc = nlp(text_chunk) | ||||||
|         for sent in doc.sents: |         for sent in doc.sents: | ||||||
|         output_file.write('<s>\n') |             output_file_original.write('<s>\n') | ||||||
|         for token in sent: |             output_file_stand_off.write('<s>\n') | ||||||
|  |             space_flag = False | ||||||
|             # Skip whitespace tokens |             # Skip whitespace tokens | ||||||
|             if token.text.isspace(): |             sent_no_space = [token for token in sent if not token.text.isspace()] | ||||||
|                 continue |             # No space variant for cwb original .vrt file input. | ||||||
|             output_file.write('{}'.format(escape(token.text)) |             for token in sent_no_space: | ||||||
|  |                 output_file_original.write('{}'.format(escape(token.text)) | ||||||
|                                            + '\t{}'.format(escape(token.lemma_)) |                                            + '\t{}'.format(escape(token.lemma_)) | ||||||
|                                            + '\t{}'.format(token.pos_) |                                            + '\t{}'.format(token.pos_) | ||||||
|                                            + '\t{}'.format(token.tag_) |                                            + '\t{}'.format(token.tag_) | ||||||
|                                            + '\t{}\n'.format(token.ent_type_ or 'NULL')) |                                            + '\t{}\n'.format(token.ent_type_ or 'NULL')) | ||||||
|         output_file.write('</s>\n') |             # Stand off variant with spaces. | ||||||
| output_file.write('</text>\n</corpus>') |             for token in sent: | ||||||
| output_file.close() |                 token_start = token.idx + text_offset | ||||||
|  |                 token_end = token.idx + len(token.text) + text_offset | ||||||
|  |                 output_file_stand_off.write('{}:{}'.format(token_start, | ||||||
|  |                                                            token_end) | ||||||
|  |                                             + '\t{}'.format(escape(token.lemma_)) | ||||||
|  |                                             + '\t{}'.format(token.pos_) | ||||||
|  |                                             + '\t{}'.format(token.tag_) | ||||||
|  |                                             + '\t{}\n'.format(token.ent_type_ or 'NULL')) | ||||||
|  |                 output_file_tokens.write('{}\n'.format(escape(token.text))) | ||||||
|  |             output_file_original.write('</s>\n') | ||||||
|  |             output_file_stand_off.write('</s>\n') | ||||||
|  |         text_offset = token_end + 1 | ||||||
|  |     output_file_original.write('</metadata>\n</text>\n</corpus>') | ||||||
|  |     output_file_stand_off.write('</metadata>\n</text>\n</corpus>') | ||||||
|  |     output_file_tokens.write('</metadata>\n</text>\n</corpus>') | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user