mirror of
https://gitlab.ub.uni-bielefeld.de/sfb1288inf/ocr.git
synced 2024-12-26 06:04:17 +00:00
564 lines
24 KiB
Python
Executable File
564 lines
24 KiB
Python
Executable File
#!/usr/bin/env python2.7
|
|
# coding=utf-8
|
|
|
|
|
|
"""
|
|
ocr
|
|
|
|
Usage: For usage instructions run with option --help
|
|
Author: Patrick Jentsch <p.jentsch@uni-bielefeld.de>
|
|
"""
|
|
|
|
|
|
from argparse import ArgumentParser
|
|
from pyflow import WorkflowRunner
|
|
import multiprocessing
|
|
import os
|
|
import re
|
|
import sys
|
|
|
|
|
|
TESSERACT_MODELS = ['deu', 'eng', 'enm', 'fra', 'frk', 'frm', 'ita', 'por',
|
|
'spa']
|
|
|
|
|
|
def parse_args():
|
|
parser = ArgumentParser(description='OCR Pipeline utilizing tesseract.')
|
|
parser.add_argument('i', help='Input directory for OCR. One PDf equals one\
|
|
job')
|
|
parser.add_argument('o', help='Output directory containing OCR results.')
|
|
parser.add_argument('-l', '--language',
|
|
choices=TESSERACT_MODELS,
|
|
required=True)
|
|
parser.add_argument('--binarize',
|
|
action='store_true',
|
|
help='Use ocropy binarisation as preprocessing step.')
|
|
parser.add_argument('--keep-intermediates',
|
|
action='store_true',
|
|
help='Keep intermediate files for debugging etc.',
|
|
required=False)
|
|
parser.add_argument('--n-cores',
|
|
default=min(4, multiprocessing.cpu_count()),
|
|
help='Total number of cores available.',
|
|
type=int,
|
|
required=False)
|
|
parser.add_argument('--zip', help='Zips all results in different archives \
|
|
depending on result types. Also zips \
|
|
everything into one archive.',
|
|
required=False)
|
|
parser.add_argument('-c', '--compress',
|
|
help='Compress the final PDF result file.',
|
|
required=False,
|
|
action='store_true')
|
|
parser.add_argument('--log_dir')
|
|
return parser.parse_args()
|
|
|
|
|
|
class OCRPipelineJob:
|
|
def __init__(self, file, output_dir):
|
|
self.file = file
|
|
self.name = os.path.basename(file).rsplit('.', 1)[0]
|
|
self.output_dir = output_dir
|
|
|
|
|
|
class OCRPipeline(WorkflowRunner):
|
|
def __init__(self, binarize, jobs, keep_intermediates, lang, n_cores,
|
|
output_dir, zip, compress):
|
|
self.binarize = binarize
|
|
self.jobs = jobs
|
|
self.keep_intermediates = keep_intermediates
|
|
self.lang = lang
|
|
self.n_cores = n_cores
|
|
self.output_dir = output_dir
|
|
self.zip = zip
|
|
self.compress = compress
|
|
|
|
def workflow(self):
|
|
if not self.jobs:
|
|
return
|
|
|
|
'''
|
|
' ##################################################
|
|
' # mkdir_jobs #
|
|
' ##################################################
|
|
'''
|
|
|
|
mkdir_jobs = []
|
|
for i, job in enumerate(self.jobs):
|
|
output_dir = os.path.join(job.output_dir, 'tmp')
|
|
poco_dir = os.path.join(job.output_dir, 'PoCo')
|
|
cmd = 'mkdir'
|
|
cmd += ' -p'
|
|
cmd += ' "{}"'.format(output_dir)
|
|
cmd += ' "{}"'.format(os.path.join(poco_dir, 'hocr'))
|
|
cmd += ' "{}"'.format(os.path.join(poco_dir, 'tiff'))
|
|
if self.keep_intermediates:
|
|
cmd += ' "{}"'.format(os.path.join(output_dir, 'hocr'))
|
|
cmd += ' "{}"'.format(os.path.join(output_dir, 'pdf'))
|
|
cmd += ' "{}"'.format(os.path.join(output_dir, 'tiff'))
|
|
cmd += ' "{}"'.format(os.path.join(output_dir, 'txt'))
|
|
if self.binarize:
|
|
cmd += ' "{}"'.format(os.path.join(output_dir, 'bin.png'))
|
|
cmd += ' "{}"'.format(os.path.join(output_dir, 'nrm.png'))
|
|
lbl = 'mkdir_job_-_{}'.format(i)
|
|
mkdir_jobs.append(self.addTask(command=cmd, label=lbl))
|
|
|
|
'''
|
|
' ##################################################
|
|
' # pdftoppm_jobs #
|
|
' ##################################################
|
|
'''
|
|
pdftoppm_jobs = []
|
|
n_cores = min(self.n_cores, max(1, int(self.n_cores / len(self.jobs))))
|
|
for i, job in enumerate(self.jobs):
|
|
output_dir = os.path.join(job.output_dir, 'tmp')
|
|
output_file_base = os.path.join(output_dir, 'page')
|
|
cmd = 'pdftoppm'
|
|
cmd += ' -r 300'
|
|
cmd += ' -tiff'
|
|
cmd += ' -tiffcompression lzw'
|
|
cmd += ' "{}" "{}"'.format(job.file, output_file_base)
|
|
deps = mkdir_jobs
|
|
lbl = 'pdftoppm_job_-_{}'.format(i)
|
|
pdftoppm_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl, nCores=n_cores))
|
|
|
|
if self.binarize:
|
|
'''
|
|
' The ocropus_nlbin_jobs list is created based on the output files
|
|
' of the pdftoppm_jobs. So wait until they are finished.
|
|
'''
|
|
self.waitForTasks()
|
|
|
|
'''
|
|
' ##################################################
|
|
' # ocropus_nlbin_jobs #
|
|
' ##################################################
|
|
'''
|
|
ocropus_nlbin_jobs = []
|
|
'''
|
|
' We run ocropus-nlbin with either four or, if there are less then
|
|
' four cores available for this workflow, the available core
|
|
' number.
|
|
'''
|
|
n_cores = min(4, self.n_cores)
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
output_dir = input_dir
|
|
files = filter(lambda x: x.endswith('.tif'),
|
|
os.listdir(input_dir))
|
|
files.sort(key=lambda x: int(re.search(r'\d+', x).group(0)))
|
|
files = map(lambda x: os.path.join(input_dir, x), files)
|
|
cmd = 'ocropus-nlbin "{}"'.format('" "'.join(files))
|
|
cmd += ' -o "{}"'.format(output_dir)
|
|
cmd += ' -Q "{}"'.format(n_cores)
|
|
deps = pdftoppm_jobs
|
|
lbl = 'ocropus_nlbin_job_-_{}'.format(i)
|
|
ocropus_nlbin_jobs.append(
|
|
self.addTask(command=cmd, dependencies=deps, label=lbl,
|
|
nCores=n_cores))
|
|
|
|
'''
|
|
' The post_ocropus_nlbin_jobs are created based on the output files
|
|
' of the ocropus_nlbin_jobs. So wait until they are finished.
|
|
'''
|
|
self.waitForTasks()
|
|
|
|
'''
|
|
' ##################################################
|
|
' # post_ocropus_nlbin_jobs #
|
|
' ##################################################
|
|
'''
|
|
post_ocropus_nlbin_jobs = []
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
output_dir = input_dir
|
|
number = 0
|
|
files = filter(lambda x: x.endswith('.bin.png'),
|
|
os.listdir(input_dir))
|
|
files.sort()
|
|
for file in files:
|
|
# int conversion is done in order to trim leading zeros
|
|
output_file = os.path.join(output_dir, 'page-{}.bin.png'.format(int(file.split('.', 1)[0]))) # noqa
|
|
cmd = 'mv "{}" "{}"'.format(os.path.join(output_dir, file),
|
|
output_file)
|
|
deps = ocropus_nlbin_jobs
|
|
lbl = 'post_ocropus_nlbin_job_-_{}-{}'.format(i, number)
|
|
post_ocropus_nlbin_jobs.append(
|
|
self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
number += 1
|
|
|
|
'''
|
|
' The tesseract_jobs are created based of the output files of either
|
|
' the pdftoppm_jobs or post_ocropus_nlbin_jobs. So wait until they are
|
|
' finished.
|
|
'''
|
|
self.waitForTasks()
|
|
|
|
'''
|
|
' ##################################################
|
|
' # tesseract_jobs #
|
|
' ##################################################
|
|
'''
|
|
tesseract_jobs = []
|
|
'''
|
|
' Tesseract runs fastest with four cores. So we run it with either four
|
|
' or, if there are less then four cores available for this workflow,
|
|
' the available core number.
|
|
'''
|
|
n_cores = min(4, self.n_cores)
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
output_dir = input_dir
|
|
files = filter(lambda x: x.endswith('.bin.png' if self.binarize else '.tif'), # noqa
|
|
os.listdir(input_dir))
|
|
files.sort(key=lambda x: int(re.search(r'\d+', x).group(0)))
|
|
files = map(lambda x: os.path.join(input_dir, x), files)
|
|
number = 0
|
|
for file in files:
|
|
output_file_base = os.path.join(output_dir, file.rsplit('.', 2 if self.binarize else 1)[0]) # noqa
|
|
cmd = 'tesseract "{}" "{}"'.format(file, output_file_base)
|
|
cmd += ' -l "{}"'.format(self.lang)
|
|
cmd += ' hocr pdf txt'
|
|
if self.binarize:
|
|
deps = post_ocropus_nlbin_jobs
|
|
else:
|
|
deps = pdftoppm_jobs
|
|
label = 'tesseract_jobs_-_{}-{}'.format(i, number)
|
|
tesseract_jobs.append(
|
|
self.addTask(command=cmd, dependencies=deps, label=label,
|
|
nCores=n_cores))
|
|
number += 1
|
|
|
|
'''
|
|
' The following jobs are created based of the output files of the
|
|
' ocr_jobs. So wait until they are finished.
|
|
'''
|
|
self.waitForTasks()
|
|
|
|
'''
|
|
' ##################################################
|
|
' # hocrtotei_jobs #
|
|
' ##################################################
|
|
'''
|
|
hocrtotei_jobs = []
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
files = filter(lambda x: x.endswith('.hocr'),
|
|
os.listdir(input_dir))
|
|
files.sort(key=lambda x: int(re.search(r'\d+', x).group(0)))
|
|
files = map(lambda x: os.path.join(input_dir, x), files)
|
|
output_file = os.path.join(job.output_dir,
|
|
'{}.xml'.format(job.name))
|
|
cmd = 'hocrtotei "{}" "{}"'.format('" "'.join(files), output_file)
|
|
deps = tesseract_jobs
|
|
lbl = 'hocrtotei_job_-_{}'.format(i)
|
|
hocrtotei_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
|
|
'''
|
|
' ##################################################
|
|
' # hocr_poco_jobs #
|
|
' ##################################################
|
|
'''
|
|
|
|
hocr_poco_jobs = []
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
files = filter(lambda x: x.endswith('.hocr'),
|
|
os.listdir(input_dir))
|
|
files.sort(key=lambda x: int(re.search(r'\d+', x).group(0)))
|
|
files = map(lambda x: os.path.join(input_dir, x), files)
|
|
# set relative file paths into hocr
|
|
relative_files = map(lambda x: os.path.join('..',
|
|
'tiff',
|
|
os.path.basename(x).replace('.hocr', '.tif')), # noqa
|
|
files)
|
|
for file, relative_file in zip(files, relative_files):
|
|
with open(file, 'r+') as f:
|
|
html = f.read()
|
|
html = html.replace(file.replace('.hocr', '.tif'),
|
|
relative_file)
|
|
f.seek(0)
|
|
f.truncate(0) # deletes content of file to write new html
|
|
f.write(html)
|
|
output_path_base = os.path.join(job.output_dir, 'PoCo')
|
|
output_path = os.path.join(output_path_base, 'hocr')
|
|
cmd = 'cp "{}" "{}"'.format('" "'.join(files), output_path)
|
|
deps = tesseract_jobs
|
|
lbl = 'hocr_poco_jobs-_{}'.format(i)
|
|
hocr_poco_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
'''
|
|
' ##################################################
|
|
' # tiff_poco_jobs #
|
|
' ##################################################
|
|
'''
|
|
|
|
tiff_poco_jobs = []
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
files = filter(lambda x: x.endswith('.tif'),
|
|
os.listdir(input_dir))
|
|
files.sort(key=lambda x: int(re.search(r'\d+', x).group(0)))
|
|
files = map(lambda x: os.path.join(input_dir, x), files)
|
|
output_path_base = os.path.join(job.output_dir, 'PoCo')
|
|
output_path = os.path.join(output_path_base, 'tiff')
|
|
cmd = 'cp "{}" "{}"'.format('" "'.join(files), output_path)
|
|
deps = tesseract_jobs
|
|
lbl = 'tiff_poco_jobs-_{}'.format(i)
|
|
tiff_poco_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
|
|
'''
|
|
' ##################################################
|
|
' # pdfunite_jobs #
|
|
' ##################################################
|
|
'''
|
|
pdfunite_jobs = []
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
files = filter(lambda x: x.endswith('.pdf'), os.listdir(input_dir))
|
|
files.sort(key=lambda x: int(re.search(r'\d+', x).group(0)))
|
|
files = map(lambda x: os.path.join(input_dir, x), files)
|
|
output_file = os.path.join(job.output_dir,
|
|
'{}.pdf'.format(job.name))
|
|
cmd = 'pdfunite "{}" "{}"'.format('" "'.join(files), output_file)
|
|
deps = tesseract_jobs
|
|
lbl = 'pdfunite_job_-_{}'.format(i)
|
|
pdfunite_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
|
|
'''
|
|
' ##################################################
|
|
' # cat_jobs #
|
|
' ##################################################
|
|
'''
|
|
cat_jobs = []
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
files = filter(lambda x: x.endswith('.txt'), os.listdir(input_dir))
|
|
files.sort(key=lambda x: int(re.search(r'\d+', x).group(0)))
|
|
files = map(lambda x: os.path.join(input_dir, x), files)
|
|
output_file = os.path.join(job.output_dir,
|
|
'{}.txt'.format(job.name))
|
|
cmd = 'cat "{}" > "{}"'.format('" "'.join(files), output_file)
|
|
deps = tesseract_jobs
|
|
lbl = 'cat_job_-_{}'.format(i)
|
|
cat_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
'''
|
|
' The following jobs are created based of the output files of the
|
|
' pdfunite_jobs. So wait until they are finished.
|
|
'''
|
|
self.waitForTasks()
|
|
|
|
'''
|
|
' ##################################################
|
|
' # compress_jobs #
|
|
' ##################################################
|
|
'''
|
|
compress_jobs = []
|
|
if self.compress:
|
|
for i, job in enumerate(self.jobs):
|
|
print(os.listdir(job.output_dir))
|
|
file = filter(lambda x: x.endswith('.pdf'),
|
|
os.listdir(job.output_dir))[0]
|
|
original_file = os.path.join(job.output_dir, file)
|
|
compressed_file = os.path.join(job.output_dir, 'c_' + file)
|
|
cmd = ('gs '
|
|
+ '-sDEVICE=pdfwrite '
|
|
+ '-dCompatibilityLevel=1.4 '
|
|
+ '-dPDFSETTINGS=/ebook '
|
|
+ '-dNOPAUSE '
|
|
+ '-dQUIET '
|
|
+ '-dBATCH '
|
|
+ '-sOutputFile={o} {i} ').format(o=compressed_file,
|
|
i=original_file)
|
|
cmd += '&& rm {original_f} '.format(original_f=original_file)
|
|
cmd += ('&& mv {compressed_f} '
|
|
+ '{original_f} ').format(compressed_f=compressed_file,
|
|
original_f=original_file)
|
|
deps = (hocrtotei_jobs
|
|
+ tesseract_jobs
|
|
+ pdfunite_jobs
|
|
+ cat_jobs
|
|
+ hocr_poco_jobs
|
|
+ tiff_poco_jobs)
|
|
lbl = 'compress_job_-_{}'.format(i)
|
|
compress_jobs.append(self.addTask(command=cmd,
|
|
dependencies=deps,
|
|
label=lbl))
|
|
|
|
'''
|
|
' ##################################################
|
|
' # zip_jobs #
|
|
' ##################################################
|
|
'''
|
|
zip_jobs = []
|
|
deps = (hocrtotei_jobs
|
|
+ tesseract_jobs
|
|
+ pdfunite_jobs
|
|
+ cat_jobs
|
|
+ hocr_poco_jobs
|
|
+ tiff_poco_jobs
|
|
+ compress_jobs)
|
|
if self.zip is not None:
|
|
# Remove .zip file extension if provided
|
|
if self.zip.endswith('.zip'):
|
|
self.zip = self.zip[:-4]
|
|
self.zip = self.zip if self.zip else 'output'
|
|
# zip ALL
|
|
cmd = 'cd "{}"'.format(self.output_dir)
|
|
cmd += ' && '
|
|
cmd += 'zip'
|
|
cmd += ' -r'
|
|
cmd += ' "{}".all.zip .'.format(self.zip)
|
|
cmd += ' -x "pyflow.data*" "*tmp*"'
|
|
cmd += ' -i "*.pdf" "*.txt" "*.xml" "*.hocr" "*.tif"'
|
|
cmd += ' && '
|
|
cmd += 'cd -'
|
|
lbl = 'zip_job_-_all'
|
|
zip_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
# zip PDFs
|
|
cmd = 'cd "{}"'.format(self.output_dir)
|
|
cmd += ' && '
|
|
cmd += 'zip'
|
|
cmd += ' -r'
|
|
cmd += ' "{}".pdf.zip .'.format(self.zip)
|
|
cmd += ' -x "pyflow.data*" "*tmp*"'
|
|
cmd += ' -i "*.pdf"'
|
|
cmd += ' && '
|
|
cmd += 'cd -'
|
|
deps = deps + ['zip_job_-_all']
|
|
lbl = 'zip_job_-_pdf'
|
|
zip_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
# zip TXTs
|
|
cmd = 'cd "{}"'.format(self.output_dir)
|
|
cmd += ' && '
|
|
cmd += 'zip'
|
|
cmd += ' -r'
|
|
cmd += ' "{}".txt.zip .'.format(self.zip)
|
|
cmd += ' -x "pyflow.data*" "*tmp*"'
|
|
cmd += ' -i "*.txt"'
|
|
cmd += ' && '
|
|
cmd += 'cd -'
|
|
deps = deps + ['zip_job_-_all']
|
|
lbl = 'zip_job_-_txt'
|
|
zip_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
# zip XMLs
|
|
cmd = 'cd "{}"'.format(self.output_dir)
|
|
cmd += ' && '
|
|
cmd += 'zip'
|
|
cmd += ' -r'
|
|
cmd += ' "{}".xml.zip .'.format(self.zip)
|
|
cmd += ' -x "pyflow.data*" "*tmp*"'
|
|
cmd += ' -i "*.xml"'
|
|
cmd += ' && '
|
|
cmd += 'cd -'
|
|
deps = deps + ['zip_job_-_all']
|
|
lbl = 'zip_job_-_xml'
|
|
zip_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
# zip PoCo files
|
|
poco_paths = []
|
|
poco_names = []
|
|
for i, job in enumerate(self.jobs):
|
|
poco_paths.append(os.path.join(os.path.basename(job.output_dir), # noqa
|
|
'PoCo'))
|
|
poco_names.append(job.output_dir)
|
|
|
|
cmd = 'cd "{}"'.format(self.output_dir)
|
|
cmd += ' && '
|
|
cmd += 'zip'
|
|
cmd += ' -r'
|
|
cmd += ' "{}".poco.zip'.format(self.zip)
|
|
cmd += ' "{}"'.format('" "'.join(poco_paths))
|
|
cmd += ' && '
|
|
cmd += 'cd -'
|
|
deps = deps + ['zip_job_-_all']
|
|
lbl = 'zip_job_-_poco_{}'.format(i)
|
|
zip_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
|
|
'''
|
|
' ##################################################
|
|
' # mv_jobs #
|
|
' ##################################################
|
|
'''
|
|
mv_jobs = []
|
|
if self.keep_intermediates:
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
output_dir = input_dir
|
|
cmd = 'mv "{}"/*.hocr "{}"'.format(
|
|
input_dir, os.path.join(output_dir, 'hocr'))
|
|
cmd += ' && '
|
|
cmd += 'mv "{}"/*.pdf "{}"'.format(input_dir, os.path.join(output_dir, 'pdf')) # noqa
|
|
cmd += ' && '
|
|
cmd += 'mv "{}"/*.tif "{}"'.format(input_dir, os.path.join(output_dir, 'tiff')) # noqa
|
|
cmd += ' && '
|
|
cmd += 'mv "{}"/*.txt "{}"'.format(input_dir, os.path.join(output_dir, 'txt')) # noqa
|
|
if self.binarize:
|
|
cmd += ' && '
|
|
cmd += 'mv "{}"/*.bin.png "{}"'.format(input_dir, os.path.join(output_dir, 'bin.png')) # noqa
|
|
cmd += ' && '
|
|
cmd += 'mv "{}"/*.nrm.png "{}"'.format(input_dir, os.path.join(output_dir, 'nrm.png')) # noqa
|
|
deps = (hocrtotei_jobs
|
|
+ tesseract_jobs
|
|
+ pdfunite_jobs
|
|
+ cat_jobs
|
|
+ hocr_poco_jobs
|
|
+ tiff_poco_jobs,
|
|
+ compress_jobs
|
|
+ zip_jobs)
|
|
lbl = 'mv_job_-_{}'.format(i)
|
|
mv_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
else:
|
|
for i, job in enumerate(self.jobs):
|
|
input_dir = os.path.join(job.output_dir, 'tmp')
|
|
cmd = 'rm -r "{}"'.format(input_dir)
|
|
deps = (hocrtotei_jobs
|
|
+ tesseract_jobs
|
|
+ pdfunite_jobs
|
|
+ cat_jobs
|
|
+ hocr_poco_jobs
|
|
+ tiff_poco_jobs
|
|
+ compress_jobs
|
|
+ zip_jobs)
|
|
lbl = 'mv_job_-_{}'.format(i)
|
|
mv_jobs.append(self.addTask(command=cmd, dependencies=deps,
|
|
label=lbl))
|
|
|
|
|
|
def collect_jobs(input_dir, output_dir):
|
|
jobs = []
|
|
for file in os.listdir(input_dir):
|
|
if os.path.isdir(os.path.join(input_dir, file)):
|
|
jobs += collect_jobs(os.path.join(input_dir, file),
|
|
os.path.join(output_dir, file))
|
|
elif file.endswith('.pdf'):
|
|
jobs.append(OCRPipelineJob(os.path.join(input_dir, file),
|
|
os.path.join(output_dir, file)))
|
|
return jobs
|
|
|
|
|
|
def main():
|
|
args = parse_args()
|
|
jobs = collect_jobs(args.i, args.o)
|
|
ocr_pipeline = OCRPipeline(args.binarize, jobs, args.keep_intermediates,
|
|
args.language, args.n_cores, args.o, args.zip,
|
|
args.compress)
|
|
retval = ocr_pipeline.run(dataDirRoot=(args.log_dir or args.o),
|
|
nCores=args.n_cores)
|
|
sys.exit(retval)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|