Add some output messages and code formatting.

This commit is contained in:
Patrick Jentsch 2019-05-15 11:56:24 +02:00
parent 843151e547
commit e5c0d53a03

385
ocr
View File

@ -29,172 +29,187 @@ from pyflow import WorkflowRunner
def parse_arguments(): def parse_arguments():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
"Performs OCR of (historical) documents utilizing OCRopus for \ 'Performs OCR of (historical) documents utilizing OCRopus for \
preprocessing and Tesseract OCR for OCR. Available outputs are HOCR, \ preprocessing and Tesseract OCR for OCR. Available outputs are HOCR, \
PDF, shrinked PDF, and simple DTAbf (TEI P5 compliant). Software \ PDF, shrinked PDF, and simple DTAbf (TEI P5 compliant). Software \
requirements: imagemagick, ocropus, pdftk, pdftoppm, poppler-utils, \ requirements: imagemagick, ocropus, pdftoppm, pdfunite, \
pyflow, python2.7, python3.5, tesseract" poppler-utils, pyflow, python2.7, python3.5, tesseract'
) )
parser.add_argument("-l", parser.add_argument(
'-l',
dest='lang', dest='lang',
help="Language for OCR", help='Language for OCR.',
required=True) required=True
parser.add_argument("--skip-binarization", )
parser.add_argument(
'--i',
default=os.path.normpath('/files_for_ocr'),
dest='inputDirectory',
help='The input directory.',
required=False
)
parser.add_argument(
'--o',
default=os.path.normpath('/files_from_ocr'),
dest='outputDirectory',
help='The output directory.',
required=False
)
parser.add_argument(
'--skip-binarization',
action='store_true', action='store_true',
default=False, default=False,
dest="skipBinarization", dest='skipBinarization',
help="Skip binarization.", help='Skip binarization.',
required=False) required=False
parser.add_argument("--keep-intermediates", )
parser.add_argument(
'--keep-intermediates',
action='store_true', action='store_true',
default=False, default=False,
dest="keepIntermediates", dest='keepIntermediates',
help="Keep intermediate files.", help='Keep intermediate files.',
required=False) required=False
parser.add_argument("--nCores", )
parser.add_argument(
'--nCores',
default=min(4, multiprocessing.cpu_count()), default=min(4, multiprocessing.cpu_count()),
dest="nCores", dest='nCores',
help="Total number of cores available.", help='Total number of cores available.',
required=False, required=False,
type=int) type=int
)
return parser.parse_args() return parser.parse_args()
class OCRWorkflow(WorkflowRunner): class OCRWorkflow(WorkflowRunner):
def __init__(self, args): def __init__(self, args):
self.jobs = analyze_jobs() self.jobs = analyze_jobs(args.inputDirectory, args.outputDirectory)
self.skipBinarization = args.skipBinarization self.skipBinarization = args.skipBinarization
self.keepIntermediates = args.keepIntermediates self.keepIntermediates = args.keepIntermediates
self.lang = args.lang self.lang = args.lang
self.nCores = args.nCores self.nCores = args.nCores
self.defaultNCores = min(
self.nCores,
max(1, int(self.nCores / len(self.jobs)))
)
def workflow(self): def workflow(self):
### print('##########################################################')
# Task "create_output_directories_job": create output directories print('# Starting workflow... #')
# Dependencies: None print('##########################################################')
### for index, job in enumerate(self.jobs):
print('%i: %s' % (index, job))
print('##########################################################')
print('# Creating output directories... #')
print('##########################################################')
create_output_directories_jobs = [] create_output_directories_jobs = []
create_output_directories_job_number = 0 for index, job in enumerate(self.jobs):
for job in self.jobs: cmd = 'mkdir -p "%s"' % (
create_output_directories_job_number += 1 os.path.join(job['output_dir'], 'tmp')
cmd = 'mkdir -p "%s" "%s" "%s" "%s"' % ( )
os.path.join(job["output_dir"], "tmp", "hocr"), if self.keepIntermediates:
os.path.join(job["output_dir"], "tmp", "pdf"), cmd += ' "%s" "%s" "%s" "%s"' % (
os.path.join(job["output_dir"], "tmp", "tiff"), os.path.join(job['output_dir'], 'tmp', 'hocr'),
os.path.join(job["output_dir"], "tmp", "txt") os.path.join(job['output_dir'], 'tmp', 'pdf'),
os.path.join(job['output_dir'], 'tmp', 'tiff'),
os.path.join(job['output_dir'], 'tmp', 'txt')
) )
if not self.skipBinarization: if not self.skipBinarization:
cmd += ' "%s" "%s"' % ( cmd += ' "%s" "%s"' % (
os.path.join(job["output_dir"], "tmp", "bin.png"), os.path.join(job['output_dir'], 'tmp', 'bin.png'),
os.path.join(job["output_dir"], "tmp", "nrm.png"), os.path.join(job['output_dir'], 'tmp', 'nrm.png'),
) )
create_output_directories_jobs.append( create_output_directories_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
label="create_output_directories_job_-_%i" % ( label='create_output_directories_job_-_%i' % (index)
create_output_directories_job_number
),
nCores=self.defaultNCores)
) )
)
self.waitForTasks()
### print('##########################################################')
# Task "split_job": split input file into one tiff file per page print('# Splitting... #')
# Dependencies: create_output_directories_jobs print('##########################################################')
###
split_jobs = [] split_jobs = []
split_job_number = 0 split_job_nCores = min(
for job in self.jobs: self.nCores,
split_job_number += 1 max(1, int(self.nCores / len(self.jobs)))
if job["filename"].endswith((".tif", ".tiff")): )
for index, job in enumerate(self.jobs):
if job['filename'].endswith(('.tif', '.tiff')):
cmd = 'convert "%s" -compress LZW -density 300 -scene 1 "%s"/page-%%d.tif' % ( cmd = 'convert "%s" -compress LZW -density 300 -scene 1 "%s"/page-%%d.tif' % (
job["path"], job['path'],
os.path.join(job["output_dir"], "tmp") os.path.join(job['output_dir'], 'tmp')
) )
else: else:
cmd = 'pdftoppm -r 300 -tiff -tiffcompression lzw "%s" "%s"' % ( cmd = 'pdftoppm -r 300 -tiff -tiffcompression lzw "%s" "%s"' % (
job["path"], job['path'],
os.path.join(job["output_dir"], "tmp", "page") os.path.join(job['output_dir'], 'tmp', 'page')
) )
split_jobs.append( split_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
dependencies=create_output_directories_jobs, label='split_job_-_%i' % (index),
label="split_job_-_%i" % (split_job_number), nCores=split_job_nCores
nCores=self.defaultNCores
) )
) )
self.waitForTasks()
###
# Task "ocropus_nlbin_job": binarize tiff files from previous split
# Dependencies: split_jobs
###
binarization_jobs = []
binarization_job_number = 0
'''
' We run ocropus-nlbin with either four or, if there are less then four
' cores available for this workflow, the available core number.
'''
binarization_job_nCores = min(4, self.nCores)
if not self.skipBinarization: if not self.skipBinarization:
for job in self.jobs: print('##########################################################')
binarization_job_number += 1 print('# Binarising... #')
cmd = 'ocropus-nlbin --output "%s" --parallel "%i" $(ls --quoting-style=shell-escape -v "%s"/*.tif)' % ( print('##########################################################')
os.path.join(job["output_dir"], "tmp"), binarisation_jobs = []
binarization_job_nCores, '''
os.path.join(job["output_dir"], "tmp") ' We run ocropus-nlbin with either four or, if there are less then
' four cores available for this workflow, the available core
' number.
'''
binarisation_job_nCores = min(4, self.nCores)
for index, job in enumerate(self.jobs):
cmd = 'ls --quoting-style=shell-escape -v "%s"/*.tif | xargs ocropus-nlbin --output "%s" --parallel "%i"' % (
os.path.join(job['output_dir'], 'tmp'),
os.path.join(job['output_dir'], 'tmp'),
binarisation_job_nCores
) )
binarization_jobs.append( binarisation_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
dependencies=split_jobs, label='binarisation_job_-_%i' % (index),
label="binarization_job_-_%i" % ( nCores=binarisation_job_nCores
binarization_job_number
),
nCores=binarization_job_nCores
) )
) )
###
# Task "post_binarization_job": Normalize file names from binarization
# Dependencies: binarization_jobs
###
self.waitForTasks() self.waitForTasks()
post_binarization_jobs = []
post_binarization_job_number = 0 print('##########################################################')
if not self.skipBinarization: print('# Normalising file names from binarisation... #')
for job in self.jobs: print('##########################################################')
for file in filter(lambda x: x.endswith((".bin.png", ".nrm.png")), os.listdir(os.path.join(job["output_dir"], "tmp"))): post_binarisation_jobs = []
post_binarization_job_number += 1 for index, job in enumerate(self.jobs):
number = 0
for file in filter(lambda x: x.endswith(('.bin.png', '.nrm.png')), os.listdir(os.path.join(job['output_dir'], 'tmp'))):
cmd = 'mv "%s" "%s"' % ( cmd = 'mv "%s" "%s"' % (
os.path.join(job["output_dir"], "tmp", file), os.path.join(job['output_dir'], 'tmp', file),
os.path.join(job["output_dir"], "tmp", "page-%i.%s" % ( os.path.join(job['output_dir'], 'tmp', 'page-%i.%s' % (
int(file.split(".", 1)[0]), int(file.split('.', 1)[0]),
file.split(".", 1)[1]) file.split('.', 1)[1])
), ),
) )
post_binarization_jobs.append( post_binarisation_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
dependencies=binarization_jobs, label='post_binarisation_job_-_%i-%i' % (
label="post_binarization_job_-_%i" % ( index,
post_binarization_job_number number
),
nCores=self.defaultNCores
) )
) )
)
### number += 1
# Task "ocr_job": perform OCR
# Dependencies: waitForTasks
###
self.waitForTasks() self.waitForTasks()
print('##########################################################')
print('# Performing OCR... #')
print('##########################################################')
ocr_jobs = [] ocr_jobs = []
ocr_job_number = 0
''' '''
' Tesseract runs fastest with four cores. So we run it with either four ' Tesseract runs fastest with four cores. So we run it with either four
' or, if there are less then four cores available for this workflow, ' or, if there are less then four cores available for this workflow,
@ -207,161 +222,151 @@ class OCRWorkflow(WorkflowRunner):
''' '''
if self.lang == "deu_frak": if self.lang == "deu_frak":
ocr_job_nCores = 1 ocr_job_nCores = 1
for job in self.jobs: for index, job in enumerate(self.jobs):
for file in filter(lambda x: x.endswith(".tif") if self.skipBinarization else x.endswith(".bin.png"), os.listdir(os.path.join(job["output_dir"], "tmp"))): number = 0
ocr_job_number += 1 for file in filter(lambda x: x.endswith('.tif') if self.skipBinarization else x.endswith('.bin.png'), os.listdir(os.path.join(job['output_dir'], 'tmp'))):
cmd = 'tesseract "%s" "%s" -l "%s" hocr pdf txt' % ( cmd = 'tesseract "%s" "%s" -l "%s" hocr pdf txt' % (
os.path.join(job["output_dir"], "tmp", file), os.path.join(job['output_dir'], 'tmp', file),
os.path.join(job["output_dir"], "tmp", file.rsplit(".", 1 if self.skipBinarization else 2)[0]), os.path.join(
job['output_dir'],
'tmp',
file.rsplit('.', 1 if self.skipBinarization else 2)[0]
),
self.lang self.lang
) )
ocr_jobs.append( ocr_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
dependencies=post_binarization_jobs, label='ocr_job_-_%i-%i' % (index, number),
label="ocr_job_-_%i" % (ocr_job_number),
nCores=ocr_job_nCores nCores=ocr_job_nCores
) )
) )
number += 1
self.waitForTasks()
### print('##########################################################')
# Task "hocr_to_tei_job": create TEI P5 file from hocr files print('# Creating TEI P5 files... #')
# Dependencies: ocr_jobs print('##########################################################')
###
hocr_to_tei_jobs = [] hocr_to_tei_jobs = []
hocr_to_tei_job_number = 0 for index, job in enumerate(self.jobs):
for job in self.jobs:
hocr_to_tei_job_number += 1
cmd = 'hocrtotei "%s" "%s"' % ( cmd = 'hocrtotei "%s" "%s"' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], job["filename"].rsplit(".", 1)[0] + ".xml") os.path.join(
job['output_dir'],
job['filename'].rsplit('.', 1)[0] + '.xml'
)
) )
hocr_to_tei_jobs.append( hocr_to_tei_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
dependencies=ocr_jobs, label='hocr_to_tei_job_-_%i' % (index)
label="hocr_to_tei_job_-_%i" % (hocr_to_tei_job_number),
nCores=self.defaultNCores
) )
) )
### print('##########################################################')
# Task "pdf_merge_job": Merge PDF files print('# Merging PDF files... #')
# Dependencies: ocr_jobs print('##########################################################')
###
pdf_merge_jobs = [] pdf_merge_jobs = []
pdf_merge_job_number = 0 for index, job in enumerate(self.jobs):
for job in self.jobs:
pdf_merge_job_number += 1
cmd = '(ls --quoting-style=shell-escape -v "%s"/*.pdf && echo "\'%s\'") | xargs pdfunite' % ( cmd = '(ls --quoting-style=shell-escape -v "%s"/*.pdf && echo "\'%s\'") | xargs pdfunite' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], job["filename"].rsplit(".", 1)[0] + ".pdf") os.path.join(
job['output_dir'],
job['filename'].rsplit('.', 1)[0] + '.pdf'
)
) )
pdf_merge_jobs.append( pdf_merge_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
dependencies=ocr_jobs, label='pdf_merge_job_-_%i' % (index)
label="pdf_merge_job_-_%i" % (pdf_merge_job_number),
nCores=self.defaultNCores
) )
) )
### print('##########################################################')
# Task "txt_merge_job": Merge .txt files print('# Merging text files... #')
# Dependencies: ocr_jobs print('##########################################################')
###
txt_merge_jobs = [] txt_merge_jobs = []
txt_merge_job_number = 0 for index, job in enumerate(self.jobs):
for job in self.jobs:
txt_merge_job_number += 1
cmd = 'ls --quoting-style=shell-escape -v "%s"/*.txt | xargs cat > "%s"' % ( cmd = 'ls --quoting-style=shell-escape -v "%s"/*.txt | xargs cat > "%s"' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], job["filename"].rsplit(".", 1)[0] + ".txt") os.path.join(
job['output_dir'],
job['filename'].rsplit('.', 1)[0] + '.txt'
)
) )
txt_merge_jobs.append( txt_merge_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
dependencies=ocr_jobs, label='txt_merge_job_-_%i' % (index)
label="txt_merge_job_-_%i" % (txt_merge_job_number),
nCores=self.defaultNCores
) )
) )
self.waitForTasks()
### print('##########################################################')
# Task "cleanup_job": remove temporary files print('# Cleanup... #')
# Dependencies: hocr_to_tei_jobs + pdf_merge_jobs + txt_merge_jobs print('##########################################################')
###
cleanup_jobs = [] cleanup_jobs = []
cleanup_job_counter = 0
if self.keepIntermediates: if self.keepIntermediates:
for job in self.jobs: for index, job in enumerate(self.jobs):
cleanup_job_counter += 1
cmd = 'mv "%s"/*.hocr "%s"' % ( cmd = 'mv "%s"/*.hocr "%s"' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], "tmp", "hocr"), os.path.join(job['output_dir'], 'tmp', 'hocr'),
) )
cmd += ' && mv "%s"/*.pdf "%s"' % ( cmd += ' && mv "%s"/*.pdf "%s"' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], "tmp", "pdf"), os.path.join(job['output_dir'], 'tmp', 'pdf'),
) )
cmd += ' && mv "%s"/*.tif "%s"' % ( cmd += ' && mv "%s"/*.tif "%s"' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], "tmp", "tiff"), os.path.join(job['output_dir'], 'tmp', 'tiff'),
) )
cmd += ' && mv "%s"/*.txt "%s"' % ( cmd += ' && mv "%s"/*.txt "%s"' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], "tmp", "txt"), os.path.join(job['output_dir'], 'tmp', 'txt'),
) )
if not self.skipBinarization: if not self.skipBinarization:
cmd += ' && mv "%s"/*.bin.png "%s"' % ( cmd += ' && mv "%s"/*.bin.png "%s"' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], "tmp", "bin.png"), os.path.join(job['output_dir'], 'tmp', 'bin.png'),
) )
cmd += ' && mv "%s"/*.nrm.png "%s"' % ( cmd += ' && mv "%s"/*.nrm.png "%s"' % (
os.path.join(job["output_dir"], "tmp"), os.path.join(job['output_dir'], 'tmp'),
os.path.join(job["output_dir"], "tmp", "nrm.png"), os.path.join(job['output_dir'], 'tmp', 'nrm.png'),
) )
cleanup_jobs.append( cleanup_jobs.append(
self.addTask( self.addTask(
command=cmd, command=cmd,
dependencies=hocr_to_tei_jobs + pdf_merge_jobs + txt_merge_jobs, label='cleanup_job_-_%i' % (index)
label="cleanup_job_-_%i" % (cleanup_job_counter),
nCores=self.defaultNCores
) )
) )
else: else:
for job in self.jobs: for index, job in enumerate(self.jobs):
cleanup_job_counter += 1
cmd = 'rm -r "%s"' % ( cmd = 'rm -r "%s"' % (
os.path.join(job["output_dir"], "tmp") os.path.join(job['output_dir'], 'tmp')
) )
cleanup_jobs.append( cleanup_jobs.append(
self.addTask( self.addTask(
label="cleanup_job_-_%i" % (cleanup_job_counter),
command=cmd, command=cmd,
dependencies=hocr_to_tei_jobs + pdf_merge_jobs + txt_merge_jobs, label='cleanup_job_-_%i' % (index)
nCores=self.defaultNCores
) )
) )
def analyze_jobs(): def analyze_jobs(inputDirectory, outputDirectory):
inputDir = "/files_for_ocr"
jobs = [] jobs = []
outputDir = "/files_from_ocr"
for file in os.listdir(inputDir): for file in os.listdir(inputDirectory):
if os.path.isdir(os.path.join(inputDir, file)): if os.path.isdir(os.path.join(inputDirectory, file)):
jobs += analyze_jobs( jobs += analyze_jobs(
os.path.join(inputDir, file), os.path.join(inputDirectory, file),
os.path.join(outputDir, file) os.path.join(outputDirectory, file)
) )
elif file.endswith((".pdf", ".tif", ".tiff")): elif file.endswith(('.pdf', '.tif', '.tiff')):
jobs.append( jobs.append(
{ {
"filename": file, 'filename': file,
"output_dir": os.path.join(outputDir, file), 'output_dir': os.path.join(outputDirectory, file),
"path": os.path.join(inputDir, file) 'path': os.path.join(inputDirectory, file)
} }
) )
@ -373,10 +378,10 @@ def main():
wflow = OCRWorkflow(args) wflow = OCRWorkflow(args)
retval = wflow.run(dataDirRoot="/files_from_ocr", nCores=args.nCores) retval = wflow.run(dataDirRoot=args.outputDirectory, nCores=args.nCores)
sys.exit(retval) sys.exit(retval)
if __name__ == "__main__": if __name__ == '__main__':
main() main()