diff --git a/ocr_pyflow b/ocr_pyflow index 4ad3fc8..f815c26 100755 --- a/ocr_pyflow +++ b/ocr_pyflow @@ -31,125 +31,72 @@ from pyflow import WorkflowRunner +''' TODO: + ' Implement --end-page: Last page to ocr + ' Implement --memMb: Total amount of memory (RAM) available for this workflow. Default: 2048 * nCores + ' Implement --rotate: Rotate pages from input (90, 180, 270) + ' Implement --split-pages: Split pages in half after possible rotation + ' Implement --start-page: First page to ocr +''' def parse_arguments(): - parser = argparse.ArgumentParser( - "Performs OCR of (historical) documents utilizing OCRopus for preprocessing and Tesseract OCR \ + parser = argparse.ArgumentParser("Performs OCR of (historical) documents utilizing OCRopus for preprocessing and Tesseract OCR \ for OCR. Available outputs are HOCR, PDF, shrinked PDF, and simple DTAbf \ (TEI P5 compliant). Software requirements: imagemagick, ocropus, pdftk, pdftoppm, poppler-utils, pyflow, python2.7, tesseract") - parser.add_argument("-i", "--input-directory", - dest="input_dir", - help="Input directory with input (Multipage-)TIFs or PDFs. For each input file an OCR-Run is \ - instantiated and output is created.", + parser.add_argument("-i", + dest="inputDir", + help="Input directory.", required=True) - parser.add_argument("-o", "--output-directory", - dest="output_dir", - help="Directory, where output directories are created if necessary. \ - Default: %s" % (os.path.join(os.path.curdir, "ocr_pyflow")), - required=False, - default=(os.path.join(os.path.curdir, "ocr_pyflow"))) - parser.add_argument("--skip-pdf-processing", - dest="skip_pdf", - help="Skip detection of PDFs as input.", - default=False, - action='store_true', - required=False) - parser.add_argument("--skip-image-processing", - dest="skip_images", - help="Skip detection of images as input.", - default=False, - action='store_true', - required=False) - parser.add_argument("--start-page", - dest='startp', - help="NOT IMPLEMENTED! First page to ocr.", - default=-1, - required=False, type=int) - parser.add_argument("--end-page", - dest='endp', - default=-1, - help="NOT IMPLEMENTED! Last page to ocr.", - required=False, type=int) - parser.add_argument("-r", "--rotate-pages", - dest='rotate', - default='norotation', - help="NOT IMPLEMENTED! Rotate pages from input. Values: clockwise, counterclockwise, \ - upsidedown. Default: norotation", - required=False, - choices=['clockwise', 'counterclockwise', 'upsidedown', 'norotation']) - parser.add_argument("-s", "--split-pages", - dest='split', - default=False, - help="NOT IMPLEMENTED! Split pages in half after possible rotation. Default: Not performed.", - required=False, - action='store_true') - parser.add_argument("--ppi-import", - dest="ppi_in", - help="NOT IMPLEMENTED! Scaling for input images. Default: 300 ppi.", - default=300, - required=False, - type=int) - parser.add_argument("-l", "--language", + parser.add_argument("-l", + choices=["deu", "deu_frak", "eng", "enm", "fra", "spa", "frm"], dest='lang', help="Language for OCR", - required=True, - type=str) - parser.add_argument("-p", "--create-pdf", + required=True) + parser.add_argument("-o", + dest="outputDir", + help="Output directory.", + required=True) + parser.add_argument("--skip-image", + action='store_true', + default=False, + dest="skip_images", + help="Skip detection of images as input.", + required=False) + parser.add_argument("--skip-pdf", + action='store_true', + default=False, + dest="skip_pdf", + help="Skip detection of PDFs as input.", + required=False) + parser.add_argument("--pdf", + action='store_true', + default=False, dest='pdf', - default=False, - action='store_true', + help="Create PDF files.", required=False) - parser.add_argument("-c", "--compress-pdf", - dest='comp', - help="NOT IMPLEMENTED!", - default=False, + parser.add_argument("--keep-intermediates", action='store_true', - required=False) - parser.add_argument("--ppi-export", - dest="ppi_out", - help="NOT IMPLEMENTED! Scaling for output images in PDF. Default: 150 ppi.", - default=150, - required=False, - type=int) - parser.add_argument("-k", "--keep-intermediate", - dest="intermediate", - help="Keep intermediate files. Default: False", default=False, - action='store_true') - parser.add_argument("--cores", - dest='nCores', - help="Amount of CPUs to use for parallel jobs. Default: Number of available CPUs", + dest="keepIntermediates", + help="Keep intermediate files.", + required=False) + parser.add_argument("--nCores", default=multiprocessing.cpu_count(), + dest="nCores", + help="Total number of cores available.", required=False, type=int) - parser.add_argument("--is-continued", dest='continued', - help="NOT IMPLEMENTED! Enables continuing an erroneous or paused workflow. MUST use the \ - same dataDirRoot as before.", - default=False, - required=False, - action='store_true') - parser.add_argument("--memory", - dest='mem', - help="NOT IMPLEMENTED! Total amount of memory (RAM) available for this workflow. \ - Default: %i" % (8192), - default=8192, - required=False, - type=int) - args = parser.parse_args() - return (args) + return parser.parse_args() class OCRWorkflow(WorkflowRunner): - def __init__(self, pdfImageJobs, inputDir, outputDir, lang, pdf, intermediate, nCores, memMb): - self.pdfImageJobs = pdfImageJobs - self.outputDir = outputDir - self.inputDir = inputDir + def __init__(self, jobs, lang, pdf, keepIntermediates, nCores): + self.jobs = jobs self.lang = lang self.pdf = pdf - self.intermediate = intermediate + self.keepIntermediates = keepIntermediates self.nCores = nCores - self.memMb = memMb @@ -160,7 +107,7 @@ class OCRWorkflow(WorkflowRunner): ### mkdir_jobs = [] mkdir_job_number = 0 - for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: + for job in self.jobs["images"] + self.jobs["pdf"]: mkdir_job_number += 1 cmd = "mkdir -p %s %s %s %s" % ( os.path.join(job["output_dir"], "hocr_files"), @@ -176,7 +123,7 @@ class OCRWorkflow(WorkflowRunner): ### split_jobs = [] split_job_number = 0 - for job in self.pdfImageJobs["images"]: + for job in self.jobs["images"]: split_job_number += 1 # TODO: Make the following command work ''' @@ -191,13 +138,13 @@ class OCRWorkflow(WorkflowRunner): os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"), os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0]), os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf")) - split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs, nCores=1, memMb=1024)) - for job in self.pdfImageJobs["pdf"]: + split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs)) + for job in self.jobs["pdf"]: split_job_number += 1 cmd = "pdftoppm %s %s -tiff -r 300 -tiffcompression lzw -cropbox" % ( job["path"], os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0])) - split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs, nCores=1, memMb=1024)) + split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs)) ### @@ -206,13 +153,12 @@ class OCRWorkflow(WorkflowRunner): ### ocropusnlbin_jobs = [] ocropusnlbin_job_number = 0 - for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: + for job in self.jobs["images"] + self.jobs["pdf"]: ocropusnlbin_job_number += 1 - cmd = "ocropus-nlbin -Q %i -o %s %s" % ( - self.nCores, + cmd = "ocropus-nlbin -o %s %s" % ( os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"), os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-*.tif")) - ocropusnlbin_jobs.append(self.addTask(label="ocropusnlbin_job_-_%i" % (ocropusnlbin_job_number), command=cmd, dependencies=split_jobs, nCores=self.nCores, memMb=self.memMb)) + ocropusnlbin_jobs.append(self.addTask(label="ocropusnlbin_job_-_%i" % (ocropusnlbin_job_number), command=cmd, dependencies=split_jobs)) ### @@ -222,7 +168,7 @@ class OCRWorkflow(WorkflowRunner): self.waitForTasks() tesseract_jobs = [] tesseract_job_number = 0 - for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: + for job in self.jobs["images"] + self.jobs["pdf"]: # This list is empty if you don't wait for ocropus_nlbin_jobs to complete for file in filter(lambda x: x.endswith(".bin.png"), os.listdir(os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"))): tesseract_job_number += 1 @@ -231,7 +177,7 @@ class OCRWorkflow(WorkflowRunner): os.path.join(job["output_dir"], "tmp", "tesseract", file.rsplit(".", 2)[0]), self.lang, "pdf" if self.pdf else "") - tesseract_jobs.append(self.addTask(label="tesseract_job_-_%i" % (tesseract_job_number), command=cmd, dependencies=ocropusnlbin_jobs, nCores=1, memMb=2048)) + tesseract_jobs.append(self.addTask(label="tesseract_job_-_%i" % (tesseract_job_number), command=cmd, dependencies=ocropusnlbin_jobs, nCores=min(4, self.nCores))) ### @@ -241,12 +187,12 @@ class OCRWorkflow(WorkflowRunner): pdf_merge_jobs = [] pdf_merge_job_number = 0 if self.pdf: - for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: + for job in self.jobs["images"] + self.jobs["pdf"]: pdf_merge_job_number += 1 cmd = "pdftk %s cat output %s" % ( os.path.join(job["output_dir"], "tmp", "tesseract", "*.pdf"), os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf"))) - pdf_merge_jobs.append(self.addTask(label="pdf_merge_job_-_%i" % (pdf_merge_job_number), command=cmd, dependencies=tesseract_jobs, nCores=1, memMb=4096)) + pdf_merge_jobs.append(self.addTask(label="pdf_merge_job_-_%i" % (pdf_merge_job_number), command=cmd, dependencies=tesseract_jobs)) ### @@ -256,11 +202,11 @@ class OCRWorkflow(WorkflowRunner): pdf_to_txt_jobs = [] pdf_to_txt_job_number = 0 if self.pdf: - for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: + for job in self.jobs["images"] + self.jobs["pdf"]: pdf_to_txt_job_number += 1 cmd = "pdftotext -raw %s" % ( os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf"))) - pdf_merge_jobs.append(self.addTask(label="pdf_to_txt_job_-_%i" % (pdf_to_txt_job_number), command=cmd, dependencies=pdf_merge_jobs, nCores=1, memMb=4096)) + pdf_merge_jobs.append(self.addTask(label="pdf_to_txt_job_-_%i" % (pdf_to_txt_job_number), command=cmd, dependencies=pdf_merge_jobs)) ### @@ -269,7 +215,7 @@ class OCRWorkflow(WorkflowRunner): ### move_hocr_jobs = [] move_hocr_job_number = 0 - for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: + for job in self.jobs["images"] + self.jobs["pdf"]: move_hocr_job_number += 1 cmd = "mv %s %s" % ( os.path.join(job["output_dir"], "tmp", "tesseract", "*.hocr"), @@ -277,18 +223,18 @@ class OCRWorkflow(WorkflowRunner): move_hocr_jobs.append(self.addTask(label="move_hocr_job_-_%i" % (move_hocr_job_number), command=cmd, dependencies=tesseract_jobs)) - ### + ###Total amount of memory (RAM) available for this workflow. Default: 2048 * nCores" # Task "hocr_to_teip5_job": create TEI P5 file from hocr files # Dependencies: move_hocr_jobs ### hocr_to_teip5_jobs = [] hocr_to_teip5_job_number = 0 - for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: + for job in self.jobs["images"] + self.jobs["pdf"]: hocr_to_teip5_job_number += 1 cmd = "parse_hocr %s %s" % ( os.path.join(job["output_dir"], "hocr_files"), os.path.join(os.path.join(job["output_dir"], os.path.basename(job["path"]).rsplit(".", 1)[0] + ".xml"))) - hocr_to_teip5_jobs.append(self.addTask(label="hocr_to_teip5_job_-_%i" % (hocr_to_teip5_job_number), command=cmd, dependencies=move_hocr_jobs, nCores=1, memMb=250)) + hocr_to_teip5_jobs.append(self.addTask(label="hocr_to_teip5_job_-_%i" % (hocr_to_teip5_job_number), command=cmd, dependencies=move_hocr_jobs)) ### @@ -298,33 +244,33 @@ class OCRWorkflow(WorkflowRunner): self.waitForTasks() cleanup_jobs = [] cleanup_job_counter = 0 - if not self.intermediate: - for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]: + if not self.keepIntermediates: + for job in self.jobs["images"] + self.jobs["pdf"]: cleanup_job_counter += 1 cmd = "rm -r %s" % (os.path.join(job["output_dir"], "tmp")) cleanup_jobs.append(self.addTask(label="cleanup_job_-_%i" % (cleanup_job_counter), command=cmd)) -def analyze_jobs(input_dir, output_dir, skip_pdf=False, skip_images=False): - files = os.listdir(input_dir) +def analyze_jobs(inputDir, outputDir, skip_pdf=False, skip_images=False): + files = os.listdir(inputDir) images = [] pdf = [] if not skip_images: for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), files): - images.append({"path": os.path.join(input_dir, file), "output_dir": os.path.join(output_dir, file.rsplit(".", 1)[0])}) + images.append({"path": os.path.join(inputDir, file), "output_dir": os.path.join(outputDir, file.rsplit(".", 1)[0])}) if not skip_pdf: for file in filter(lambda x: x.endswith(".pdf"), files): - pdf.append({"path": os.path.join(input_dir, file), "output_dir": os.path.join(output_dir, file.rsplit(".", 1)[0])}) - for subdir in filter(lambda x: os.path.isdir(os.path.join(input_dir, x)), files): - subdir_files = os.listdir(os.path.join(input_dir, subdir)) + pdf.append({"path": os.path.join(inputDir, file), "output_dir": os.path.join(outputDir, file.rsplit(".", 1)[0])}) + for subDir in filter(lambda x: os.path.isdir(os.path.join(inputDir, x)), files): + subDirFiles = os.listdir(os.path.join(inputDir, subDir)) if not skip_images: - for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), subdir_files): - images.append({"path": os.path.join(input_dir, subdir, file), "output_dir": os.path.join(output_dir, subdir, file.rsplit(".", 1)[0])}) + for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), subDirFiles): + images.append({"path": os.path.join(inputDir, subDir, file), "output_dir": os.path.join(outputDir, subDir, file.rsplit(".", 1)[0])}) if not skip_pdf: - for file in filter(lambda x: x.endswith(".pdf"), subdir_files): - pdf.append({"path": os.path.join(input_dir, subdir, file), "output_dir": os.path.join(output_dir, subdir, file.rsplit(".", 1)[0])}) + for file in filter(lambda x: x.endswith(".pdf"), subDirFiles): + pdf.append({"path": os.path.join(inputDir, subDir, file), "output_dir": os.path.join(outputDir, subDir, file.rsplit(".", 1)[0])}) return {"pdf": pdf, "images": images} @@ -345,10 +291,11 @@ def normalize_input_filenames(path): def main(): args = parse_arguments() - normalize_input_filenames(args.input_dir) - jobs = analyze_jobs(args.input_dir, args.output_dir, skip_pdf=args.skip_pdf, skip_images=args.skip_images) - wflow = OCRWorkflow(jobs, args.input_dir, args.output_dir, args.lang, args.pdf, args.intermediate, args.nCores, args.mem) - retval = wflow.run(mode="local", nCores=args.nCores, memMb=args.mem) + normalize_input_filenames(args.inputDir) + jobs = analyze_jobs(args.inputDir, args.outputDir, skip_pdf=args.skip_pdf, skip_images=args.skip_images) + + wflow = OCRWorkflow(jobs, args.lang, args.pdf, args.keepIntermediates, args.nCores) + retval = wflow.run(nCores=args.nCores) sys.exit(retval)