From ea091152bdb0b1b66cf7e262f0bee51e3d4e8998 Mon Sep 17 00:00:00 2001 From: Patrick Jentsch Date: Tue, 15 Jan 2019 10:46:35 +0100 Subject: [PATCH] Fixed ticks and "*"-operator usages. --- ocr_pyflow | 261 +++++++++++++++++++++-------------------------------- 1 file changed, 104 insertions(+), 157 deletions(-) diff --git a/ocr_pyflow b/ocr_pyflow index f4f845b..61c50b7 100755 --- a/ocr_pyflow +++ b/ocr_pyflow @@ -6,31 +6,18 @@ """ ocr_pyflow.py -Date: 18/10/2018 Usage: For usage instructions run with option --help -Author: Madis Rumming +Author: Patrick Jentsch """ -__author__ = "Madis Rumming " -__copyright__ = "Copyright 2018, Data Infrastructure and Digital Humanities,\ -SFB 1288, Bielefeld University" - -__version__ = "0.7" -__maintainer__ = "Patrick Jentsch" -__email__ = "p.jentsch@uni-bielefeld.de" -__status__ = "Development" - - import argparse import multiprocessing import os import sys -import unicodedata from pyflow import WorkflowRunner - ''' TODO: ' Implement --end-page: Last page to ocr ' Implement --memMb: Total amount of memory (RAM) available for this workflow. Default: 2048 * nCores @@ -48,7 +35,6 @@ def parse_arguments(): help="Input directory.", required=True) parser.add_argument("-l", - choices=["deu", "deu_frak", "eng", "enm", "fra", "spa", "frm"], dest='lang', help="Language for OCR", required=True) @@ -56,24 +42,6 @@ def parse_arguments(): dest="outputDir", help="Output directory.", required=True) - parser.add_argument("--skip-image", - action='store_true', - default=False, - dest="skip_images", - help="Skip detection of images as input.", - required=False) - parser.add_argument("--skip-pdf", - action='store_true', - default=False, - dest="skip_pdf", - help="Skip detection of PDFs as input.", - required=False) - parser.add_argument("--pdf", - action='store_true', - default=False, - dest='pdf', - help="Create PDF files.", - required=False) parser.add_argument("--keep-intermediates", action='store_true', default=False, @@ -89,17 +57,14 @@ def parse_arguments(): return parser.parse_args() - class OCRWorkflow(WorkflowRunner): - def __init__(self, jobs, lang, pdf, keepIntermediates, nCores): + def __init__(self, jobs, keepIntermediates, lang, nCores): self.jobs = jobs - self.lang = lang - self.pdf = pdf self.keepIntermediates = keepIntermediates + self.lang = lang self.nCores = nCores - def workflow(self): ### # Task "mkdir_job": create output directories @@ -107,45 +72,45 @@ class OCRWorkflow(WorkflowRunner): ### mkdir_jobs = [] mkdir_job_number = 0 - for job in self.jobs["images"] + self.jobs["pdf"]: + for job in jobs: mkdir_job_number += 1 - cmd = 'mkdir -p \"%s\" \"%s\" \"%s\" \"%s\"' % ( + cmd = 'mkdir -p "%s" "%s" "%s" "%s"' % ( os.path.join(job["output_dir"], "hocr_files"), os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"), os.path.join(job["output_dir"], "tmp", "tesseract"), - os.path.join(job["output_dir"], "tmp", "tiff_files")) + os.path.join(job["output_dir"], "tmp", "tiff_files") + ) mkdir_jobs.append(self.addTask(label="mkdir_job_-_%i" % (mkdir_job_number), command=cmd)) - ### # Task "split_job": split input file into one tiff file per page # Dependencies: mkdir_jobs ### split_jobs = [] split_job_number = 0 - for job in self.jobs["images"]: + for job in jobs: split_job_number += 1 - # TODO: Make the following command work - ''' - cmd = 'convert \"%s\" \"%s\"' % ( - job["path"], - os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-%sd.tif" % ("%"))) - ''' - # WORKAROUND - cmd = 'tiff2pdf -o \"%s\" \"%s\" && pdftoppm \"%s\" \"%s\" -tiff -r 300 -tiffcompression lzw -cropbox && rm \"%s\"' % ( - os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"), - job["path"], - os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"), - os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0]), - os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf")) + if job["basename"].endswith(".tif") or job["basename"].endswith(".tiff"): + # TODO: Make the following command work + ''' + cmd = 'convert "%s" "%s"' % ( + job["path"], + os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-%sd.tif" % ("%"))) + ''' + # WORKAROUND + cmd = 'tiff2pdf -o "%s" "%s" && pdftoppm "%s" "%s" -tiff -r 300 -tiffcompression lzw -cropbox && rm "%s"' % ( + os.path.join(job["output_dir"], "tmp", "tiff_files", job["basename"].rsplit(".", 1)[0] + ".pdf"), + job["path"], + os.path.join(job["output_dir"], "tmp", "tiff_files", job["basename"].rsplit(".", 1)[0] + ".pdf"), + os.path.join(job["output_dir"], "tmp", "tiff_files", job["basename"].rsplit(".", 1)[0]), + os.path.join(job["output_dir"], "tmp", "tiff_files", job["basename"].rsplit(".", 1)[0] + ".pdf") + ) + else: + cmd = 'pdftoppm "%s" "%s" -tiff -r 300 -tiffcompression lzw -cropbox' % ( + job["path"], + os.path.join(job["output_dir"], "tmp", "tiff_files", "page") + ) split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs)) - for job in self.jobs["pdf"]: - split_job_number += 1 - cmd = 'pdftoppm \"%s\" \"%s\" -tiff -r 300 -tiffcompression lzw -cropbox' % ( - job["path"], - os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0])) - split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs)) - ### # Task "ocropus_nlbin_job": binarize tiff files from previous split @@ -153,14 +118,14 @@ class OCRWorkflow(WorkflowRunner): ### ocropusnlbin_jobs = [] ocropusnlbin_job_number = 0 - for job in self.jobs["images"] + self.jobs["pdf"]: + for job in jobs: ocropusnlbin_job_number += 1 - cmd = 'ocropus-nlbin -o \"%s\" \"%s\"' % ( + cmd = 'ocropus-nlbin -o "%s" "%s"' % ( os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"), - os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-*.tif")) + os.path.join(job["output_dir"], "tmp", "tiff_files", "page-*.tif") + ) ocropusnlbin_jobs.append(self.addTask(label="ocropusnlbin_job_-_%i" % (ocropusnlbin_job_number), command=cmd, dependencies=split_jobs)) - ### # Task "tesseract_job": perform OCR on binarized images # Dependencies: ocropusnlbin_jobs @@ -168,17 +133,44 @@ class OCRWorkflow(WorkflowRunner): self.waitForTasks() tesseract_jobs = [] tesseract_job_number = 0 - for job in self.jobs["images"] + self.jobs["pdf"]: + for job in jobs: # This list is empty if you don't wait for ocropus_nlbin_jobs to complete for file in filter(lambda x: x.endswith(".bin.png"), os.listdir(os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"))): tesseract_job_number += 1 - cmd = 'tesseract \"%s\" \"%s\" -l \"%s\" hocr \"%s\"' % ( + cmd = 'tesseract "%s" "%s" -l "%s" hocr pdf txt' % ( os.path.join(job["output_dir"], "tmp", "ocropus-nlbin", file), os.path.join(job["output_dir"], "tmp", "tesseract", file.rsplit(".", 2)[0]), - self.lang, - "pdf" if self.pdf else "") + self.lang + ) tesseract_jobs.append(self.addTask(label="tesseract_job_-_%i" % (tesseract_job_number), command=cmd, dependencies=ocropusnlbin_jobs, nCores=min(4, self.nCores))) + ### + # Task "hocr_to_teip5_job": create TEI P5 file from hocr files + # Dependencies: tesseract_jobs + ### + hocr_to_teip5_jobs = [] + hocr_to_teip5_job_number = 0 + for job in jobs: + hocr_to_teip5_job_number += 1 + cmd = 'parse_hocr "%s" "%s"' % ( + os.path.join(job["output_dir"], "tmp", "tesseract"), + os.path.join(job["output_dir"], job["basename"].rsplit(".", 1)[0] + ".xml") + ) + hocr_to_teip5_jobs.append(self.addTask(label="hocr_to_teip5_job_-_%i" % (hocr_to_teip5_job_number), command=cmd, dependencies=tesseract_jobs)) + + ### + # Task "move_hocr_job": move hocr files from /tmp/tesseract to /hocr_files + # Dependencies: hocr_to_teip5_jobs + ### + move_hocr_jobs = [] + move_hocr_job_number = 0 + for job in jobs: + move_hocr_job_number += 1 + cmd = 'mv "%s"/*.hocr "%s"' % ( + os.path.join(job["output_dir"], "tmp", "tesseract"), + os.path.join(job["output_dir"], "hocr_files") + ) + move_hocr_jobs.append(self.addTask(label="move_hocr_job_-_%i" % (move_hocr_job_number), command=cmd, dependencies=hocr_to_teip5_jobs)) ### # Task "pdf_merge_job": Merge PDF files @@ -186,119 +178,74 @@ class OCRWorkflow(WorkflowRunner): ### pdf_merge_jobs = [] pdf_merge_job_number = 0 - if self.pdf: - for job in self.jobs["images"] + self.jobs["pdf"]: - pdf_merge_job_number += 1 - cmd = 'pdftk \"%s\" cat output \"%s\"' % ( - os.path.join(job["output_dir"], "tmp", "tesseract", "*.pdf"), - os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf"))) - pdf_merge_jobs.append(self.addTask(label="pdf_merge_job_-_%i" % (pdf_merge_job_number), command=cmd, dependencies=tesseract_jobs)) - + for job in jobs: + pdf_merge_job_number += 1 + cmd = 'pdftk "%s"/*.pdf cat output "%s"' % ( + os.path.join(job["output_dir"], "tmp", "tesseract"), + os.path.join(job["output_dir"], job["basename"].rsplit(".", 1)[0] + ".pdf") + ) + pdf_merge_jobs.append(self.addTask(label="pdf_merge_job_-_%i" % (pdf_merge_job_number), command=cmd, dependencies=tesseract_jobs)) ### - # Task "pdf_to_txt_jobs": + # Task "pdf_to_txt_job": # Dependencies: pdf_merge_jobs ### pdf_to_txt_jobs = [] pdf_to_txt_job_number = 0 - if self.pdf: - for job in self.jobs["images"] + self.jobs["pdf"]: - pdf_to_txt_job_number += 1 - cmd = 'pdftotext -raw \"%s\"' % ( - os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf"))) - pdf_merge_jobs.append(self.addTask(label="pdf_to_txt_job_-_%i" % (pdf_to_txt_job_number), command=cmd, dependencies=pdf_merge_jobs)) - - - ### - # Task "move_hocr_job": move hocr files from /tmp/tesseract to /hocr_files - # Dependencies: tesseract_jobs - ### - move_hocr_jobs = [] - move_hocr_job_number = 0 - for job in self.jobs["images"] + self.jobs["pdf"]: - move_hocr_job_number += 1 - cmd = 'mv \"%s\" \"%s\"' % ( - os.path.join(job["output_dir"], "tmp", "tesseract", "*.hocr"), - os.path.join(job["output_dir"], "hocr_files")) - move_hocr_jobs.append(self.addTask(label="move_hocr_job_-_%i" % (move_hocr_job_number), command=cmd, dependencies=tesseract_jobs)) - - - ###Total amount of memory (RAM) available for this workflow. Default: 2048 * nCores" - # Task "hocr_to_teip5_job": create TEI P5 file from hocr files - # Dependencies: move_hocr_jobs - ### - hocr_to_teip5_jobs = [] - hocr_to_teip5_job_number = 0 - for job in self.jobs["images"] + self.jobs["pdf"]: - hocr_to_teip5_job_number += 1 - cmd = 'parse_hocr \"%s\" \"%s\"' % ( - os.path.join(job["output_dir"], "hocr_files"), - os.path.join(os.path.join(job["output_dir"], os.path.basename(job["path"]).rsplit(".", 1)[0] + ".xml"))) - hocr_to_teip5_jobs.append(self.addTask(label="hocr_to_teip5_job_-_%i" % (hocr_to_teip5_job_number), command=cmd, dependencies=move_hocr_jobs)) - + for job in jobs: + pdf_to_txt_job_number += 1 + cmd = 'pdftotext -raw "%s"' % ( + os.path.join(job["output_dir"], job["basename"].rsplit(".", 1)[0] + ".pdf") + ) + pdf_merge_jobs.append(self.addTask(label="pdf_to_txt_job_-_%i" % (pdf_to_txt_job_number), command=cmd, dependencies=pdf_merge_jobs)) ### # Task "cleanup_job": remove temporary files - # Dependencies: All + # Dependencies: hocr_to_teip5_job + move_hocr_jobs + pdf_merge_jobs + pdf_to_txt_jobs ### - self.waitForTasks() cleanup_jobs = [] cleanup_job_counter = 0 if not self.keepIntermediates: - for job in self.jobs["images"] + self.jobs["pdf"]: + for job in jobs: cleanup_job_counter += 1 - cmd = 'rm -r \"%s\"' % (os.path.join(job["output_dir"], "tmp")) - cleanup_jobs.append(self.addTask(label="cleanup_job_-_%i" % (cleanup_job_counter), command=cmd)) + cmd = 'rm -r "%s"' % ( + os.path.join(job["output_dir"], "tmp") + ) + cleanup_jobs.append(self.addTask(label="cleanup_job_-_%i" % (cleanup_job_counter), command=cmd, dependencies=hocr_to_teip5_job + move_hocr_jobs + pdf_merge_jobs + pdf_to_txt_jobs)) +def analyze_jobs(inputDir, outputDir, level=1): + jobs = [] -def analyze_jobs(inputDir, outputDir, skip_pdf=False, skip_images=False): - files = os.listdir(inputDir) - images = [] - pdf = [] + if level > 2: + return jobs - if not skip_images: - for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), files): - images.append({"path": os.path.join(inputDir, file), "output_dir": os.path.join(outputDir, file.rsplit(".", 1)[0])}) - if not skip_pdf: - for file in filter(lambda x: x.endswith(".pdf"), files): - pdf.append({"path": os.path.join(inputDir, file), "output_dir": os.path.join(outputDir, file.rsplit(".", 1)[0])}) - for subDir in filter(lambda x: os.path.isdir(os.path.join(inputDir, x)), files): - subDirFiles = os.listdir(os.path.join(inputDir, subDir)) - if not skip_images: - for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), subDirFiles): - images.append({"path": os.path.join(inputDir, subDir, file), "output_dir": os.path.join(outputDir, subDir, file.rsplit(".", 1)[0])}) - if not skip_pdf: - for file in filter(lambda x: x.endswith(".pdf"), subDirFiles): - pdf.append({"path": os.path.join(inputDir, subDir, file), "output_dir": os.path.join(outputDir, subDir, file.rsplit(".", 1)[0])}) - return {"pdf": pdf, "images": images} - - - -def normalize_input_filenames(path): - ### - # Normalize input filenames and directories to avoid bugs and also for better usage and readability. - ### - for file in os.listdir(path): - file_with_path = os.path.join(path, file) - if os.path.isdir(file_with_path): - normalize_input_filenames(file_with_path) - new_file_with_path = os.path.join(path, unicodedata.normalize("NFKD", file.decode("utf-8")).encode("ascii", "ignore").replace(" ", "_")) - os.rename(file_with_path, new_file_with_path) + for file in os.listdir(inputDir): + if os.path.isdir(os.path.join(inputDir, file)): + jobs += analyze_jobs( + os.path.join(inputDir, file), + os.path.join(outputDir, file), + level + 1 + ) + elif file.endswith(".pdf") or file.endswith(".tif") or file.endswith(".tiff"): + jobs.append({"basename": os.path.basename(file), "output_dir": os.path.join(outputDir, file.rsplit(".", 1)[0]), "path": os.path.join(inputDir, file)}) + return jobs def main(): args = parse_arguments() - # normalize_input_filenames(args.inputDir) - jobs = analyze_jobs(args.inputDir, args.outputDir, skip_pdf=args.skip_pdf, skip_images=args.skip_images) + wflow = OCRWorkflow( + analyze_jobs(args.inputDir, args.outputDir), + args.keepIntermediates, + args.lang, + args.nCores + ) - wflow = OCRWorkflow(jobs, args.lang, args.pdf, args.keepIntermediates, args.nCores) retval = wflow.run(nCores=args.nCores) sys.exit(retval) - if __name__ == "__main__": - main() + main() \ No newline at end of file