#!/usr/bin/env python2.7 # coding=utf-8 """ ocr_pyflow.py Date: 18/10/2018 Usage: For usage instructions run with option --help Author: Madis Rumming """ __author__ = "Madis Rumming " __copyright__ = "Copyright 2018, Data Infrastructure and Digital Humanities,\ SFB 1288, Bielefeld University" __version__ = "0.7" __maintainer__ = "Patrick Jentsch" __email__ = "p.jentsch@uni-bielefeld.de" __status__ = "Development" import argparse import multiprocessing import os import sys import unicodedata from pyflow import WorkflowRunner def parse_arguments(): parser = argparse.ArgumentParser( "Performs OCR of (historical) documents utilizing OCRopus for preprocessing and Tesseract OCR \ for OCR. Available outputs are HOCR, PDF, shrinked PDF, and simple DTAbf \ (TEI P5 compliant). Software requirements: imagemagick, ocropus, pdftk, pdftoppm, poppler-utils, pyflow, python2.7, tesseract") parser.add_argument("-i", "--input-directory", dest="input_dir", help="Input directory with input (Multipage-)TIFs or PDFs. For each input file an OCR-Run is \ instantiated and output is created.", required=True) parser.add_argument("-o", "--output-directory", dest="output_dir", help="Directory, where output directories are created if necessary. \ Default: %s" % (os.path.join(os.path.curdir, "ocr_pyflow")), required=False, default=(os.path.join(os.path.curdir, "ocr_pyflow"))) parser.add_argument("--skip-pdf-processing", dest="skip_pdf", help="Skip detection of PDFs as input.", default=False, action='store_true', required=False) parser.add_argument("--skip-image-processing", dest="skip_images", help="Skip detection of images as input.", default=False, action='store_true', required=False) parser.add_argument("--start-page", dest='startp', help="NOT IMPLEMENTED! First page to ocr.", default=-1, required=False, type=int) parser.add_argument("--end-page", dest='endp', default=-1, help="NOT IMPLEMENTED! Last page to ocr.", required=False, type=int) parser.add_argument("-r", "--rotate-pages", dest='rotate', default='norotation', help="NOT IMPLEMENTED! Rotate pages from input. Values: clockwise, counterclockwise, \ upsidedown. Default: norotation", required=False, choices=['clockwise', 'counterclockwise', 'upsidedown', 'norotation']) parser.add_argument("-s", "--split-pages", dest='split', default=False, help="NOT IMPLEMENTED! Split pages in half after possible rotation. Default: Not performed.", required=False, action='store_true') parser.add_argument("--ppi-import", dest="ppi_in", help="NOT IMPLEMENTED! Scaling for input images. Default: 300 ppi.", default=300, required=False, type=int) parser.add_argument("-l", "--language", dest='lang', help="Language for OCR", required=True, type=str) parser.add_argument("-p", "--create-pdf", dest='pdf', default=False, action='store_true', required=False) parser.add_argument("-c", "--compress-pdf", dest='comp', help="NOT IMPLEMENTED!", default=False, action='store_true', required=False) parser.add_argument("--ppi-export", dest="ppi_out", help="NOT IMPLEMENTED! Scaling for output images in PDF. Default: 150 ppi.", default=150, required=False, type=int) parser.add_argument("-k", "--keep-intermediate", dest="intermediate", help="Keep intermediate files. Default: False", default=False, action='store_true') parser.add_argument("--cores", dest='nCores', help="Amount of CPUs to use for parallel jobs. Default: Number of available CPUs", default=multiprocessing.cpu_count(), required=False, type=int) parser.add_argument("--is-continued", dest='continued', help="NOT IMPLEMENTED! Enables continuing an erroneous or paused workflow. MUST use the \ same dataDirRoot as before.", default=False, required=False, action='store_true') parser.add_argument("--memory", dest='mem', help="NOT IMPLEMENTED! Total amount of memory (RAM) available for this workflow. \ Default: %i" % (8192), default=8192, required=False, type=int) args = parser.parse_args() return (args) class OCRWorkflow(WorkflowRunner): def __init__(self, pdfImageJobs, inputDir, outputDir, lang, pdf, intermediate, nCores, memMb): self.pdfImageJobs = pdfImageJobs self.outputDir = outputDir self.inputDir = inputDir self.lang = lang self.pdf = pdf self.intermediate = intermediate self.nCores = nCores self.memMb = memMb def workflow(self): ### # Task "mkdir_job": create output directories # Dependencies: None ### mkdir_jobs = [] mkdir_job_number = 0 for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: mkdir_job_number += 1 cmd = "mkdir -p %s %s %s %s" % ( os.path.join(job["output_dir"], "hocr_files"), os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"), os.path.join(job["output_dir"], "tmp", "tesseract"), os.path.join(job["output_dir"], "tmp", "tiff_files")) mkdir_jobs.append(self.addTask(label="mkdir_job_-_%i" % (mkdir_job_number), command=cmd)) ### # Task "split_job": split input file into one tiff file per page # Dependencies: mkdir_jobs ### split_jobs = [] split_job_number = 0 for job in self.pdfImageJobs["images"]: split_job_number += 1 # TODO: Make the following command work ''' cmd = "convert %s %s" % ( job["path"], os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-%sd.tif" % ("%"))) ''' # WORKAROUND cmd = "tiff2pdf -o %s %s && pdftoppm %s %s -tiff -r 300 -tiffcompression lzw -cropbox && rm %s" % ( os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"), job["path"], os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"), os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0]), os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf")) split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs, nCores=1, memMb=1024)) for job in self.pdfImageJobs["pdf"]: split_job_number += 1 cmd = "pdftoppm %s %s -tiff -r 300 -tiffcompression lzw -cropbox" % ( job["path"], os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0])) split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs, nCores=1, memMb=1024)) ### # Task "ocropus_nlbin_job": binarize tiff files from previous split # Dependencies: split_jobs ### ocropusnlbin_jobs = [] ocropusnlbin_job_number = 0 for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: ocropusnlbin_job_number += 1 cmd = "ocropus-nlbin -Q %i -o %s %s" % ( self.nCores, os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"), os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-*.tif")) ocropusnlbin_jobs.append(self.addTask(label="ocropusnlbin_job_-_%i" % (ocropusnlbin_job_number), command=cmd, dependencies=split_jobs, nCores=self.nCores, memMb=self.memMb)) ### # Task "tesseract_job": perform OCR on binarized images # Dependencies: ocropusnlbin_jobs ### self.waitForTasks() tesseract_jobs = [] tesseract_job_number = 0 for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: # This list is empty if you don't wait for ocropus_nlbin_jobs to complete for file in filter(lambda x: x.endswith(".bin.png"), os.listdir(os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"))): tesseract_job_number += 1 cmd = "tesseract %s %s -l %s hocr %s" % ( os.path.join(job["output_dir"], "tmp", "ocropus-nlbin", file), os.path.join(job["output_dir"], "tmp", "tesseract", file.rsplit(".", 2)[0]), self.lang, "pdf" if self.pdf else "") tesseract_jobs.append(self.addTask(label="tesseract_job_-_%i" % (tesseract_job_number), command=cmd, dependencies=ocropusnlbin_jobs, nCores=1, memMb=2048)) ### # Task "pdf_merge_job": Merge PDF files # Dependencies: tesseract_jobs ### pdf_merge_jobs = [] pdf_merge_job_number = 0 if self.pdf: for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: pdf_merge_job_number += 1 cmd = "pdftk %s cat output %s" % ( os.path.join(job["output_dir"], "tmp", "tesseract", "*.pdf"), os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf"))) pdf_merge_jobs.append(self.addTask(label="pdf_merge_job_-_%i" % (pdf_merge_job_number), command=cmd, dependencies=tesseract_jobs, nCores=1, memMb=4096)) ### # Task "pdf_to_txt_jobs": # Dependencies: pdf_merge_jobs ### pdf_to_txt_jobs = [] pdf_to_txt_job_number = 0 if self.pdf: for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: pdf_to_txt_job_number += 1 cmd = "pdftotext -raw %s" % ( os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf"))) pdf_merge_jobs.append(self.addTask(label="pdf_to_txt_job_-_%i" % (pdf_to_txt_job_number), command=cmd, dependencies=pdf_merge_jobs, nCores=1, memMb=4096)) ### # Task "move_hocr_job": move hocr files from /tmp/tesseract to /hocr_files # Dependencies: tesseract_jobs ### move_hocr_jobs = [] move_hocr_job_number = 0 for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: move_hocr_job_number += 1 cmd = "mv %s %s" % ( os.path.join(job["output_dir"], "tmp", "tesseract", "*.hocr"), os.path.join(job["output_dir"], "hocr_files")) move_hocr_jobs.append(self.addTask(label="move_hocr_job_-_%i" % (move_hocr_job_number), command=cmd, dependencies=tesseract_jobs)) ### # Task "hocr_to_teip5_job": create TEI P5 file from hocr files # Dependencies: move_hocr_jobs ### hocr_to_teip5_jobs = [] hocr_to_teip5_job_number = 0 for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: hocr_to_teip5_job_number += 1 cmd = "parse_hocr %s %s" % ( os.path.join(job["output_dir"], "hocr_files"), os.path.join(os.path.join(job["output_dir"], os.path.basename(job["path"]).rsplit(".", 1)[0] + ".xml"))) hocr_to_teip5_jobs.append(self.addTask(label="hocr_to_teip5_job_-_%i" % (hocr_to_teip5_job_number), command=cmd, dependencies=move_hocr_jobs, nCores=1, memMb=250)) ### # Task "cleanup_job": remove temporary files # Dependencies: All ### self.waitForTasks() cleanup_jobs = [] cleanup_job_counter = 0 if not self.intermediate: for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: cleanup_job_counter += 1 cmd = "rm -r %s" % (os.path.join(job["output_dir"], "tmp")) cleanup_jobs.append(self.addTask(label="cleanup_job_-_%i" % (cleanup_job_counter), command=cmd)) def analyze_jobs(input_dir, output_dir, skip_pdf=False, skip_images=False): files = os.listdir(input_dir) images = [] pdf = [] if not skip_images: for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), files): images.append({"path": os.path.join(input_dir, file), "output_dir": os.path.join(output_dir, file.rsplit(".", 1)[0])}) if not skip_pdf: for file in filter(lambda x: x.endswith(".pdf"), files): pdf.append({"path": os.path.join(input_dir, file), "output_dir": os.path.join(output_dir, file.rsplit(".", 1)[0])}) for subdir in filter(lambda x: os.path.isdir(os.path.join(input_dir, x)), files): subdir_files = os.listdir(os.path.join(input_dir, subdir)) if not skip_images: for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), subdir_files): images.append({"path": os.path.join(input_dir, subdir, file), "output_dir": os.path.join(output_dir, subdir, file.rsplit(".", 1)[0])}) if not skip_pdf: for file in filter(lambda x: x.endswith(".pdf"), subdir_files): pdf.append({"path": os.path.join(input_dir, subdir, file), "output_dir": os.path.join(output_dir, subdir, file.rsplit(".", 1)[0])}) return {"pdf": pdf, "images": images} def normalize_input_filenames(path): ### # Normalize input filenames and directories to avoid bugs and also for better usage and readability. ### for file in os.listdir(path): file_with_path = os.path.join(path, file) if os.path.isdir(file_with_path): normalize_input_filenames(file_with_path) new_file_with_path = os.path.join(path, unicodedata.normalize("NFKD", file.decode("utf-8")).encode("ascii", "ignore").replace(" ", "_")) os.rename(file_with_path, new_file_with_path) def main(): args = parse_arguments() normalize_input_filenames(args.input_dir) jobs = analyze_jobs(args.input_dir, args.output_dir, skip_pdf=args.skip_pdf, skip_images=args.skip_images) wflow = OCRWorkflow(jobs, args.input_dir, args.output_dir, args.lang, args.pdf, args.intermediate, args.nCores, args.mem) retval = wflow.run(mode="local", nCores=args.nCores, memMb=args.mem) sys.exit(retval) if __name__ == "__main__": main()