ocr/ocr

361 lines
15 KiB
Plaintext
Raw Normal View History

2018-10-09 12:43:23 +00:00
#!/usr/bin/env python2.7
# coding=utf-8
2021-02-19 12:04:03 +00:00
"""An OCR pipeline for PDF file processing."""
2018-10-09 12:43:23 +00:00
2021-02-24 14:17:42 +00:00
__author__ = 'Patrick Jentsch <p.jentsch@uni-bielefeld.de>,' \
'Stephan Porada <porada@posteo.de>'
2021-02-19 12:04:03 +00:00
__version__ = '1.0.0'
2018-10-09 12:43:23 +00:00
2020-04-03 15:35:30 +00:00
from argparse import ArgumentParser
from pyflow import WorkflowRunner
2018-10-29 09:38:50 +00:00
import multiprocessing
2018-10-09 12:43:23 +00:00
import os
import sys
2020-04-03 15:35:30 +00:00
class OCRPipelineJob:
"""An OCR pipeline job class
Each input file of the pipeline is represented as an OCR pipeline job,
which holds all necessary information for the pipeline to process it.
Arguments:
file -- Path to the file
output_dir -- Path to a directory, where job results a stored
"""
def __init__(self, file, output_dir):
2020-04-03 15:35:30 +00:00
self.file = file
self.name = os.path.basename(file).rsplit('.', 1)[0]
self.output_dir = output_dir
self.page_dir = os.path.join(output_dir, 'pages')
2020-04-03 15:35:30 +00:00
class OCRPipeline(WorkflowRunner):
def __init__(self, input_dir, lang, output_dir, binarize, zip):
self.input_dir = input_dir
2020-04-03 15:35:30 +00:00
self.lang = lang
self.output_dir = output_dir
self.binarize = binarize
self.zip = zip
self.jobs = collect_jobs(self.input_dir, self.output_dir)
2018-10-09 12:43:23 +00:00
2018-10-29 09:38:50 +00:00
def workflow(self):
2020-04-03 15:35:30 +00:00
if not self.jobs:
2019-05-17 10:00:56 +00:00
return
2019-05-15 11:54:08 +00:00
'''
2019-05-15 22:09:19 +00:00
' ##################################################
2020-06-23 13:19:27 +00:00
' # setup output directory #
2019-05-15 22:09:19 +00:00
' ##################################################
2019-05-15 11:54:08 +00:00
'''
setup_output_directory_tasks = []
2020-04-03 15:35:30 +00:00
for i, job in enumerate(self.jobs):
cmd = 'mkdir -p "{}"'.format(job.page_dir)
2020-06-23 13:19:27 +00:00
lbl = 'setup_output_directory_-_{}'.format(i)
task = self.addTask(command=cmd, label=lbl)
setup_output_directory_tasks.append(task)
2018-10-29 09:38:50 +00:00
2019-05-15 11:54:08 +00:00
'''
2019-05-15 22:09:19 +00:00
' ##################################################
2020-06-23 13:19:27 +00:00
' # split input #
2019-05-15 22:09:19 +00:00
' ##################################################
2019-05-15 11:54:08 +00:00
'''
split_input_tasks = []
n_cores = max(1, int(self.getNCores() / len(self.jobs)))
2020-04-03 15:35:30 +00:00
for i, job in enumerate(self.jobs):
input_file = job.file
output_file = '{}/page-%d.tif'.format(job.page_dir)
cmd = 'gs'
cmd += ' -dBATCH'
cmd += ' -dNOPAUSE'
cmd += ' -dNumRenderingThreads={}'.format(n_cores)
cmd += ' -dQUIET'
cmd += ' -r300'
cmd += ' -sDEVICE=tiff24nc'
cmd += ' -sCompression=lzw'
cmd += ' "-sOutputFile={}"'.format(output_file)
cmd += ' "{}"'.format(input_file)
2020-06-23 13:19:27 +00:00
deps = 'setup_output_directory_-_{}'.format(i)
lbl = 'split_input_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl,
nCores=n_cores)
split_input_tasks.append(task)
2020-04-03 15:35:30 +00:00
if self.binarize:
2019-05-15 11:54:08 +00:00
'''
' ##################################################
' # pre binarization #
' ##################################################
2019-05-15 22:09:19 +00:00
'''
pre_binarization_tasks = []
for i, job in enumerate(self.jobs):
input_file = os.path.join(job.output_dir, 'binarization_input_files.txt') # noqa
cmd = 'ls -dv "{}/"* >> "{}"'.format(job.page_dir, input_file)
deps = 'split_input_-_{}'.format(i)
lbl = 'pre_binarization_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
pre_binarization_tasks.append(task)
2019-05-15 22:09:19 +00:00
'''
' ##################################################
2020-06-23 13:19:27 +00:00
' # binarization #
2019-05-15 22:09:19 +00:00
' ##################################################
2019-05-15 11:54:08 +00:00
'''
binarization_tasks = []
n_cores = self.getNCores()
mem_mb = self.getMemMb()
2020-04-03 15:35:30 +00:00
for i, job in enumerate(self.jobs):
input_file = os.path.join(job.output_dir, 'binarization_input_files.txt') # noqa
cmd = 'ocropus-nlbin "@{}"'.format(input_file)
cmd += ' --nocheck'
cmd += ' --output "{}"'.format(job.page_dir)
cmd += ' --parallel "{}"'.format(n_cores)
deps = 'pre_binarization_-_{}'.format(i)
2020-06-23 13:19:27 +00:00
lbl = 'binarization_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl,
memMb=mem_mb, nCores=n_cores)
binarization_tasks.append(task)
2019-05-15 22:09:19 +00:00
'''
' ##################################################
' # post binarization #
2019-05-15 22:09:19 +00:00
' ##################################################
'''
post_binarization_tasks = []
2020-04-03 15:35:30 +00:00
for i, job in enumerate(self.jobs):
input_file = os.path.join(job.output_dir, 'binarization_input_files.txt') # noqa
cmd = 'rm "{}"'.format(input_file)
2020-06-23 13:19:27 +00:00
cmd += ' && '
cmd += 'cd "{}"'.format(job.page_dir)
cmd += ' && '
cmd += 'rm *.{nrm.png,tif}'
cmd += ' && '
cmd += 'rename \'s/^0*/page-/\' *'
cmd += ' && '
cmd += 'cd -'
deps = 'binarization_-_{}'.format(i)
lbl = 'post_binarization_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
post_binarization_tasks.append(task)
2020-06-09 14:58:22 +00:00
'''
' ##################################################
' # pre ocr #
2020-06-09 14:58:22 +00:00
' ##################################################
'''
pre_ocr_tasks = []
2020-06-09 14:58:22 +00:00
for i, job in enumerate(self.jobs):
input_file = os.path.join(job.output_dir, 'ocr_input_files.txt')
cmd = 'ls -dv "{}/"* >> "{}"'.format(job.page_dir, input_file)
deps = 'post_binarization_-_{}'.format(i) if self.binarize else 'split_input_-_{}'.format(i) # noqa
lbl = 'pre_ocr_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
pre_ocr_tasks.append(task)
2020-06-09 14:58:22 +00:00
2019-05-15 11:54:08 +00:00
'''
2019-05-15 22:09:19 +00:00
' ##################################################
' # ocr #
2019-05-15 22:09:19 +00:00
' ##################################################
2019-05-15 11:54:08 +00:00
'''
ocr_tasks = []
n_cores = min(4, self.getNCores())
mem_mb = min(n_cores * 2048, self.getMemMb())
2020-04-03 15:35:30 +00:00
for i, job in enumerate(self.jobs):
input_file = os.path.join(job.output_dir, 'ocr_input_files.txt')
output_file_base = os.path.join(job.output_dir, job.name)
cmd = 'tesseract "{}" "{}"'.format(input_file, output_file_base)
cmd += ' -l "{}"'.format(self.lang)
cmd += ' hocr pdf txt'
deps = 'pre_ocr_-_{}'.format(i)
lbl = 'ocr_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps,
env={'OMP_THREAD_LIMIT': '{}'.format(n_cores)},
label=lbl, memMb=mem_mb, nCores=n_cores)
ocr_tasks.append(task)
2018-10-09 12:43:23 +00:00
2019-05-15 11:54:08 +00:00
'''
2019-05-15 22:09:19 +00:00
' ##################################################
' # post ocr #
2019-05-15 22:09:19 +00:00
' ##################################################
2019-05-15 11:54:08 +00:00
'''
post_ocr_tasks = []
2020-04-03 15:35:30 +00:00
for i, job in enumerate(self.jobs):
input_file = os.path.join(job.output_dir, 'ocr_input_files.txt')
output_file_base = os.path.join(job.output_dir, job.name)
cmd = 'rm "{}"'.format(input_file)
cmd += ' && '
cmd += 'sed -i \'s+{}+pages+g\' "{}.hocr"'.format(job.page_dir, output_file_base) # noqa
deps = 'ocr_-_{}'.format(i)
lbl = 'post_ocr_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
post_ocr_tasks.append(task)
2020-06-16 07:31:34 +00:00
2020-06-23 13:19:27 +00:00
'''
' ##################################################
' # hocr to tei #
2020-06-23 13:19:27 +00:00
' ##################################################
'''
hocr_to_tei_tasks = []
2020-06-23 13:19:27 +00:00
for i, job in enumerate(self.jobs):
output_file_base = os.path.join(job.output_dir, job.name)
cmd = 'hocrtotei "{}.hocr" "{}.xml"'.format(output_file_base, output_file_base) # noqa
deps = 'post_ocr_-_{}'.format(i)
lbl = 'hocr_to_tei_-_{}'.format(i)
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
hocr_to_tei_tasks.append(task)
2020-04-03 15:35:30 +00:00
'''
' ##################################################
2020-06-23 13:19:27 +00:00
' # zip creation #
2020-04-03 15:35:30 +00:00
' ##################################################
'''
zip_creation_tasks = []
2020-04-03 15:35:30 +00:00
if self.zip is not None:
2020-06-23 13:19:27 +00:00
# zip all files
2020-04-03 15:35:30 +00:00
cmd = 'cd "{}"'.format(self.output_dir)
cmd += ' && '
cmd += 'zip'
cmd += ' -r'
cmd += ' "{}.all.zip" .'.format(self.zip)
2020-04-03 15:35:30 +00:00
cmd += ' -x "pyflow.data*" "*tmp*"'
2020-06-23 13:19:27 +00:00
cmd += ' -i "*.pdf" "*.txt" "*.xml" "*.hocr" "*.{}"'.format('bin.png' if self.binarize else 'tif') # noqa
2020-04-03 15:35:30 +00:00
cmd += ' && '
cmd += 'cd -'
deps = hocr_to_tei_tasks
2020-06-23 13:19:27 +00:00
lbl = 'zip_creation_-_all'
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
zip_creation_tasks.append(task)
2020-06-23 13:19:27 +00:00
# zip PDF files
2020-04-03 15:35:30 +00:00
cmd = 'cd "{}"'.format(self.output_dir)
cmd += ' && '
cmd += 'zip'
cmd += ' -r'
cmd += ' "{}.pdf.zip" .'.format(self.zip)
2020-04-03 15:35:30 +00:00
cmd += ' -x "pyflow.data*" "*tmp*"'
cmd += ' -i "*.pdf"'
cmd += ' && '
cmd += 'cd -'
deps = ocr_tasks
2020-06-23 13:19:27 +00:00
lbl = 'zip_creation_-_pdf'
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
zip_creation_tasks.append(task)
2020-06-23 13:19:27 +00:00
# zip TXT files
2020-04-03 15:35:30 +00:00
cmd = 'cd "{}"'.format(self.output_dir)
cmd += ' && '
cmd += 'zip'
cmd += ' -r'
cmd += ' "{}.txt.zip" .'.format(self.zip)
2020-04-03 15:35:30 +00:00
cmd += ' -x "pyflow.data*" "*tmp*"'
cmd += ' -i "*.txt"'
cmd += ' && '
cmd += 'cd -'
deps = ocr_tasks
2020-06-23 13:19:27 +00:00
lbl = 'zip_creation_-_txt'
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
zip_creation_tasks.append(task)
2020-06-23 13:19:27 +00:00
# zip XML files
2020-04-03 15:35:30 +00:00
cmd = 'cd "{}"'.format(self.output_dir)
cmd += ' && '
cmd += 'zip'
cmd += ' -r'
cmd += ' "{}.xml.zip" .'.format(self.zip)
2020-04-03 15:35:30 +00:00
cmd += ' -x "pyflow.data*" "*tmp*"'
cmd += ' -i "*.xml"'
cmd += ' && '
cmd += 'cd -'
deps = hocr_to_tei_tasks
2020-06-23 13:19:27 +00:00
lbl = 'zip_creation_-_xml'
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
zip_creation_tasks.append(task)
2020-06-23 13:19:27 +00:00
# zip PoCo bundles
2020-06-09 14:58:22 +00:00
cmd = 'cd "{}"'.format(self.output_dir)
cmd += ' && '
cmd += 'zip'
cmd += ' -r'
cmd += ' "{}.poco.zip" .'.format(self.zip)
2020-06-23 13:19:27 +00:00
cmd += ' -x "pyflow.data*" "*tmp*"'
cmd += ' -i "*.hocr" "*.{}"'.format('bin.png' if self.binarize else 'tif') # noqa
2020-06-09 14:58:22 +00:00
cmd += ' && '
cmd += 'cd -'
deps = post_ocr_tasks
2020-06-23 13:19:27 +00:00
lbl = 'zip_creation_-_poco'
task = self.addTask(command=cmd, dependencies=deps, label=lbl)
zip_creation_tasks.append(task)
2020-04-03 15:35:30 +00:00
def collect_jobs(input_dir, output_dir):
2019-01-15 09:46:35 +00:00
jobs = []
2019-05-17 10:00:56 +00:00
for file in os.listdir(input_dir):
if os.path.isdir(os.path.join(input_dir, file)):
2020-04-03 15:35:30 +00:00
jobs += collect_jobs(os.path.join(input_dir, file),
os.path.join(output_dir, file))
elif file.lower().endswith('.pdf'):
job = OCRPipelineJob(os.path.join(input_dir, file),
os.path.join(output_dir, file))
jobs.append(job)
2019-01-15 09:46:35 +00:00
return jobs
2018-10-09 12:43:23 +00:00
def parse_args():
parser = ArgumentParser(description='OCR pipeline for PDF file processing',
prog='OCR pipeline')
parser.add_argument('-i', '--input-dir',
help='Input directory',
required=True)
parser.add_argument('-o', '--output-dir',
help='Output directory',
required=True)
parser.add_argument('-l', '--language',
choices=list(map(lambda x: x[:-12], filter(lambda x: x.endswith('.traineddata'), os.listdir('/usr/local/share/tessdata')))), # noqa
help='Language of the input '
'(3-character ISO 639-2 language codes)',
required=True)
parser.add_argument('--binarize',
action='store_true',
help='Add binarization as a preprocessing step')
parser.add_argument('--log-dir',
help='Logging directory')
parser.add_argument('--mem-mb',
help='Amount of system memory to be used (Default: min(--n-cores * 2048, available system memory))', # noqa
type=int)
parser.add_argument('--n-cores',
default=min(4, multiprocessing.cpu_count()),
help='Number of CPU threads to be used', # noqa
type=int)
parser.add_argument('--zip',
help='Create one zip file per filetype')
parser.add_argument('-v', '--version',
action='version',
help='Returns the current version of the OCR pipeline',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
# Set some tricky default values and check for insufficient input
if args.log_dir is None:
args.log_dir = args.output_dir
if args.n_cores < 1:
raise Exception('--n-cores must be greater or equal 1')
if args.mem_mb is None:
max_mem_mb = int(os.popen('free -t -m').readlines()[-1].split()[1:][0])
args.mem_mb = min(args.n_cores * 2048, max_mem_mb)
if args.mem_mb < 2048:
raise Exception('--mem-mb must be greater or equal 2048')
if args.zip is not None and args.zip.lower().endswith('.zip'):
# Remove .zip file extension if provided
args.zip = args.zip[:-4]
args.zip = args.zip if args.zip else 'output'
return args
2018-10-09 12:43:23 +00:00
def main():
2020-04-03 15:35:30 +00:00
args = parse_args()
ocr_pipeline = OCRPipeline(args.input_dir, args.language, args.output_dir, args.binarize, args.zip) # noqa
retval = ocr_pipeline.run(dataDirRoot=args.log_dir, memMb=args.mem_mb, nCores=args.n_cores) # noqa
2018-10-09 12:43:23 +00:00
sys.exit(retval)
if __name__ == '__main__':
2019-04-14 12:33:40 +00:00
main()