mirror of
https://gitlab.ub.uni-bielefeld.de/sfb1288inf/ocr.git
synced 2024-12-26 19:14:18 +00:00
316 lines
13 KiB
Python
Executable File
316 lines
13 KiB
Python
Executable File
#!/usr/bin/env python2.7
|
|
# coding=utf-8
|
|
|
|
|
|
|
|
"""
|
|
ocr_pyflow.py
|
|
|
|
Date: 18/10/2018
|
|
Usage: For usage instructions run with option --help
|
|
Author: Madis Rumming <mrumming@uni-bielefeld.de>
|
|
"""
|
|
|
|
__author__ = "Madis Rumming <mrumming@uni-bielefeld.de>"
|
|
__copyright__ = "Copyright 2018, Data Infrastructure and Digital Humanities,\
|
|
SFB 1288, Bielefeld University"
|
|
|
|
__version__ = "0.7"
|
|
__maintainer__ = "Patrick Jentsch"
|
|
__email__ = "p.jentsch@uni-bielefeld.de"
|
|
__status__ = "Development"
|
|
|
|
|
|
|
|
import argparse
|
|
import multiprocessing
|
|
import os
|
|
import sys
|
|
import unicodedata
|
|
from pyflow import WorkflowRunner
|
|
from shlex import shlex
|
|
|
|
|
|
|
|
''' TODO:
|
|
' Implement --end-page: Last page to ocr
|
|
' Implement --memMb: Total amount of memory (RAM) available for this workflow. Default: 2048 * nCores
|
|
' Implement --rotate: Rotate pages from input (90, 180, 270)
|
|
' Implement --split-pages: Split pages in half after possible rotation
|
|
' Implement --start-page: First page to ocr
|
|
'''
|
|
def parse_arguments():
|
|
parser = argparse.ArgumentParser("Performs OCR of (historical) documents utilizing OCRopus for preprocessing and Tesseract OCR \
|
|
for OCR. Available outputs are HOCR, PDF, shrinked PDF, and simple DTAbf \
|
|
(TEI P5 compliant). Software requirements: imagemagick, ocropus, pdftk, pdftoppm, poppler-utils, pyflow, python2.7, tesseract")
|
|
|
|
parser.add_argument("-i",
|
|
dest="inputDir",
|
|
help="Input directory.",
|
|
required=True)
|
|
parser.add_argument("-l",
|
|
choices=["deu", "deu_frak", "eng", "enm", "fra", "spa", "frm"],
|
|
dest='lang',
|
|
help="Language for OCR",
|
|
required=True)
|
|
parser.add_argument("-o",
|
|
dest="outputDir",
|
|
help="Output directory.",
|
|
required=True)
|
|
parser.add_argument("--skip-image",
|
|
action='store_true',
|
|
default=False,
|
|
dest="skip_images",
|
|
help="Skip detection of images as input.",
|
|
required=False)
|
|
parser.add_argument("--skip-pdf",
|
|
action='store_true',
|
|
default=False,
|
|
dest="skip_pdf",
|
|
help="Skip detection of PDFs as input.",
|
|
required=False)
|
|
parser.add_argument("--pdf",
|
|
action='store_true',
|
|
default=False,
|
|
dest='pdf',
|
|
help="Create PDF files.",
|
|
required=False)
|
|
parser.add_argument("--keep-intermediates",
|
|
action='store_true',
|
|
default=False,
|
|
dest="keepIntermediates",
|
|
help="Keep intermediate files.",
|
|
required=False)
|
|
parser.add_argument("--nCores",
|
|
default=multiprocessing.cpu_count(),
|
|
dest="nCores",
|
|
help="Total number of cores available.",
|
|
required=False,
|
|
type=int)
|
|
return parser.parse_args()
|
|
|
|
|
|
|
|
class OCRWorkflow(WorkflowRunner):
|
|
def __init__(self, jobs, lang, pdf, keepIntermediates, nCores):
|
|
self.jobs = jobs
|
|
self.lang = lang
|
|
self.pdf = pdf
|
|
self.keepIntermediates = keepIntermediates
|
|
self.nCores = nCores
|
|
|
|
|
|
|
|
def workflow(self):
|
|
###
|
|
# Task "mkdir_job": create output directories
|
|
# Dependencies: None
|
|
###
|
|
mkdir_jobs = []
|
|
mkdir_job_number = 0
|
|
for job in self.jobs["images"] + self.jobs["pdf"]:
|
|
mkdir_job_number += 1
|
|
cmd = "mkdir -p %s %s %s %s" % (
|
|
os.path.join(job["output_dir"], "hocr_files"),
|
|
os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"),
|
|
os.path.join(job["output_dir"], "tmp", "tesseract"),
|
|
os.path.join(job["output_dir"], "tmp", "tiff_files"))
|
|
cmd = shlex.escape(cmd);
|
|
mkdir_jobs.append(self.addTask(label="mkdir_job_-_%i" % (mkdir_job_number), command=cmd))
|
|
|
|
|
|
###
|
|
# Task "split_job": split input file into one tiff file per page
|
|
# Dependencies: mkdir_jobs
|
|
###
|
|
split_jobs = []
|
|
split_job_number = 0
|
|
for job in self.jobs["images"]:
|
|
split_job_number += 1
|
|
# TODO: Make the following command work
|
|
'''
|
|
cmd = "convert %s %s" % (
|
|
job["path"],
|
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-%sd.tif" % ("%")))
|
|
'''
|
|
# WORKAROUND
|
|
cmd = "tiff2pdf -o %s %s && pdftoppm %s %s -tiff -r 300 -tiffcompression lzw -cropbox && rm %s" % (
|
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"),
|
|
job["path"],
|
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"),
|
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0]),
|
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"))
|
|
cmd = shlex.escape(cmd);
|
|
split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs))
|
|
for job in self.jobs["pdf"]:
|
|
split_job_number += 1
|
|
cmd = "pdftoppm %s %s -tiff -r 300 -tiffcompression lzw -cropbox" % (
|
|
job["path"],
|
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0]))
|
|
cmd = shlex.escape(cmd);
|
|
split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs))
|
|
|
|
|
|
###
|
|
# Task "ocropus_nlbin_job": binarize tiff files from previous split
|
|
# Dependencies: split_jobs
|
|
###
|
|
ocropusnlbin_jobs = []
|
|
ocropusnlbin_job_number = 0
|
|
for job in self.jobs["images"] + self.jobs["pdf"]:
|
|
ocropusnlbin_job_number += 1
|
|
cmd = "ocropus-nlbin -o %s %s" % (
|
|
os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"),
|
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-*.tif"))
|
|
cmd = shlex.escape(cmd);
|
|
ocropusnlbin_jobs.append(self.addTask(label="ocropusnlbin_job_-_%i" % (ocropusnlbin_job_number), command=cmd, dependencies=split_jobs))
|
|
|
|
|
|
###
|
|
# Task "tesseract_job": perform OCR on binarized images
|
|
# Dependencies: ocropusnlbin_jobs
|
|
###
|
|
self.waitForTasks()
|
|
tesseract_jobs = []
|
|
tesseract_job_number = 0
|
|
for job in self.jobs["images"] + self.jobs["pdf"]:
|
|
# This list is empty if you don't wait for ocropus_nlbin_jobs to complete
|
|
for file in filter(lambda x: x.endswith(".bin.png"), os.listdir(os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"))):
|
|
tesseract_job_number += 1
|
|
cmd = "tesseract %s %s -l %s hocr %s" % (
|
|
os.path.join(job["output_dir"], "tmp", "ocropus-nlbin", file),
|
|
os.path.join(job["output_dir"], "tmp", "tesseract", file.rsplit(".", 2)[0]),
|
|
self.lang,
|
|
"pdf" if self.pdf else "")
|
|
cmd = shlex.escape(cmd);
|
|
tesseract_jobs.append(self.addTask(label="tesseract_job_-_%i" % (tesseract_job_number), command=cmd, dependencies=ocropusnlbin_jobs, nCores=min(4, self.nCores)))
|
|
|
|
|
|
###
|
|
# Task "pdf_merge_job": Merge PDF files
|
|
# Dependencies: tesseract_jobs
|
|
###
|
|
pdf_merge_jobs = []
|
|
pdf_merge_job_number = 0
|
|
if self.pdf:
|
|
for job in self.jobs["images"] + self.jobs["pdf"]:
|
|
pdf_merge_job_number += 1
|
|
cmd = "pdftk %s cat output %s" % (
|
|
os.path.join(job["output_dir"], "tmp", "tesseract", "*.pdf"),
|
|
os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf")))
|
|
cmd = shlex.escape(cmd);
|
|
pdf_merge_jobs.append(self.addTask(label="pdf_merge_job_-_%i" % (pdf_merge_job_number), command=cmd, dependencies=tesseract_jobs))
|
|
|
|
|
|
###
|
|
# Task "pdf_to_txt_jobs":
|
|
# Dependencies: pdf_merge_jobs
|
|
###
|
|
pdf_to_txt_jobs = []
|
|
pdf_to_txt_job_number = 0
|
|
if self.pdf:
|
|
for job in self.jobs["images"] + self.jobs["pdf"]:
|
|
pdf_to_txt_job_number += 1
|
|
cmd = "pdftotext -raw %s" % (
|
|
os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf")))
|
|
cmd = shlex.escape(cmd);
|
|
pdf_merge_jobs.append(self.addTask(label="pdf_to_txt_job_-_%i" % (pdf_to_txt_job_number), command=cmd, dependencies=pdf_merge_jobs))
|
|
|
|
|
|
###
|
|
# Task "move_hocr_job": move hocr files from <output_dir>/tmp/tesseract to <output_dir>/hocr_files
|
|
# Dependencies: tesseract_jobs
|
|
###
|
|
move_hocr_jobs = []
|
|
move_hocr_job_number = 0
|
|
for job in self.jobs["images"] + self.jobs["pdf"]:
|
|
move_hocr_job_number += 1
|
|
cmd = "mv %s %s" % (
|
|
os.path.join(job["output_dir"], "tmp", "tesseract", "*.hocr"),
|
|
os.path.join(job["output_dir"], "hocr_files"))
|
|
cmd = shlex.escape(cmd);
|
|
move_hocr_jobs.append(self.addTask(label="move_hocr_job_-_%i" % (move_hocr_job_number), command=cmd, dependencies=tesseract_jobs))
|
|
|
|
|
|
###Total amount of memory (RAM) available for this workflow. Default: 2048 * nCores"
|
|
# Task "hocr_to_teip5_job": create TEI P5 file from hocr files
|
|
# Dependencies: move_hocr_jobs
|
|
###
|
|
hocr_to_teip5_jobs = []
|
|
hocr_to_teip5_job_number = 0
|
|
for job in self.jobs["images"] + self.jobs["pdf"]:
|
|
hocr_to_teip5_job_number += 1
|
|
cmd = "parse_hocr %s %s" % (
|
|
os.path.join(job["output_dir"], "hocr_files"),
|
|
os.path.join(os.path.join(job["output_dir"], os.path.basename(job["path"]).rsplit(".", 1)[0] + ".xml")))
|
|
cmd = shlex.escape(cmd);
|
|
hocr_to_teip5_jobs.append(self.addTask(label="hocr_to_teip5_job_-_%i" % (hocr_to_teip5_job_number), command=cmd, dependencies=move_hocr_jobs))
|
|
|
|
|
|
###
|
|
# Task "cleanup_job": remove temporary files
|
|
# Dependencies: All
|
|
###
|
|
self.waitForTasks()
|
|
cleanup_jobs = []
|
|
cleanup_job_counter = 0
|
|
if not self.keepIntermediates:
|
|
for job in self.jobs["images"] + self.jobs["pdf"]:
|
|
cleanup_job_counter += 1
|
|
cmd = "rm -r %s" % (os.path.join(job["output_dir"], "tmp"))
|
|
cmd = shlex.escape(cmd);
|
|
cleanup_jobs.append(self.addTask(label="cleanup_job_-_%i" % (cleanup_job_counter), command=cmd))
|
|
|
|
|
|
|
|
def analyze_jobs(inputDir, outputDir, skip_pdf=False, skip_images=False):
|
|
files = os.listdir(inputDir)
|
|
images = []
|
|
pdf = []
|
|
|
|
if not skip_images:
|
|
for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), files):
|
|
images.append({"path": os.path.join(inputDir, file), "output_dir": os.path.join(outputDir, file.rsplit(".", 1)[0])})
|
|
if not skip_pdf:
|
|
for file in filter(lambda x: x.endswith(".pdf"), files):
|
|
pdf.append({"path": os.path.join(inputDir, file), "output_dir": os.path.join(outputDir, file.rsplit(".", 1)[0])})
|
|
for subDir in filter(lambda x: os.path.isdir(os.path.join(inputDir, x)), files):
|
|
subDirFiles = os.listdir(os.path.join(inputDir, subDir))
|
|
if not skip_images:
|
|
for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), subDirFiles):
|
|
images.append({"path": os.path.join(inputDir, subDir, file), "output_dir": os.path.join(outputDir, subDir, file.rsplit(".", 1)[0])})
|
|
if not skip_pdf:
|
|
for file in filter(lambda x: x.endswith(".pdf"), subDirFiles):
|
|
pdf.append({"path": os.path.join(inputDir, subDir, file), "output_dir": os.path.join(outputDir, subDir, file.rsplit(".", 1)[0])})
|
|
return {"pdf": pdf, "images": images}
|
|
|
|
|
|
|
|
def normalize_input_filenames(path):
|
|
###
|
|
# Normalize input filenames and directories to avoid bugs and also for better usage and readability.
|
|
###
|
|
for file in os.listdir(path):
|
|
file_with_path = os.path.join(path, file)
|
|
if os.path.isdir(file_with_path):
|
|
normalize_input_filenames(file_with_path)
|
|
new_file_with_path = os.path.join(path, unicodedata.normalize("NFKD", file.decode("utf-8")).encode("ascii", "ignore").replace(" ", "_"))
|
|
os.rename(file_with_path, new_file_with_path)
|
|
|
|
|
|
|
|
def main():
|
|
args = parse_arguments()
|
|
|
|
normalize_input_filenames(args.inputDir)
|
|
jobs = analyze_jobs(args.inputDir, args.outputDir, skip_pdf=args.skip_pdf, skip_images=args.skip_images)
|
|
|
|
wflow = OCRWorkflow(jobs, args.lang, args.pdf, args.keepIntermediates, args.nCores)
|
|
retval = wflow.run(nCores=args.nCores)
|
|
sys.exit(retval)
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|