mirror of
https://gitlab.ub.uni-bielefeld.de/sfb1288inf/ocr.git
synced 2024-12-26 15:54:19 +00:00
Update
This commit is contained in:
parent
ce864e205a
commit
132490a929
69
Dockerfile
69
Dockerfile
@ -1,66 +1,57 @@
|
|||||||
FROM ubuntu:18.04
|
FROM debian:stretch
|
||||||
|
|
||||||
MAINTAINER Patrick Jentsch <p.jentsch@uni-bielefeld.de>
|
MAINTAINER Patrick Jentsch <p.jentsch@uni-bielefeld.de>
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV LANG=en_US.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
ENV PYFLOW_VERSION 1.1.20
|
ENV PYFLOW_VERSION 1.1.20
|
||||||
|
ENV OCROPY_VERSION 1.3.3
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
gnupg2
|
apt-transport-https \
|
||||||
|
ca-certificates \
|
||||||
# Add PPA for pdftk
|
gnupg2 \
|
||||||
RUN echo "deb http://ppa.launchpad.net/malteworld/ppa/ubuntu bionic main" >> /etc/apt/sources.list && \
|
|
||||||
echo "deb-src http://ppa.launchpad.net/malteworld/ppa/ubuntu bionic main" >> /etc/apt/sources.list && \
|
|
||||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys BCA68C33DA36783B03E981C820D0BB61B700CE29
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
ghostscript \
|
|
||||||
git \
|
|
||||||
imagemagick \
|
imagemagick \
|
||||||
libtiff-tools \
|
libtiff-tools \
|
||||||
locales \
|
|
||||||
pdftk \
|
pdftk \
|
||||||
poppler-utils \
|
poppler-utils \
|
||||||
python2.7 \
|
python2.7 \
|
||||||
python3.6 \
|
python3.5 \
|
||||||
python-pip \
|
python-numpy \
|
||||||
python-tk \
|
|
||||||
tesseract-ocr \
|
|
||||||
wget
|
wget
|
||||||
|
|
||||||
# Configure locales
|
|
||||||
RUN locale-gen "$LANG"
|
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
# Install pyFlow
|
# Install pyFlow
|
||||||
RUN wget -nv http://github.com/Illumina/pyflow/releases/download/v"$PYFLOW_VERSION"/pyflow-"$PYFLOW_VERSION".tar.gz && \
|
RUN wget -nv https://github.com/Illumina/pyflow/releases/download/v"$PYFLOW_VERSION"/pyflow-"$PYFLOW_VERSION".tar.gz && \
|
||||||
tar -xzf pyflow-"$PYFLOW_VERSION".tar.gz && \
|
tar -xzf pyflow-"$PYFLOW_VERSION".tar.gz && \
|
||||||
|
rm pyflow-"$PYFLOW_VERSION".tar.gz && \
|
||||||
cd pyflow-"$PYFLOW_VERSION" && \
|
cd pyflow-"$PYFLOW_VERSION" && \
|
||||||
python2.7 setup.py build install && \
|
python2.7 setup.py build install && \
|
||||||
cd /root && \
|
cd ..
|
||||||
rm pyflow-"$PYFLOW_VERSION".tar.gz
|
|
||||||
|
|
||||||
# Install Tesseract OCR Data Files
|
# Install Tesseract OCR and Data Files
|
||||||
RUN wget -nv http://github.com/tesseract-ocr/tessdata_best/raw/master/deu.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
RUN echo "deb https://notesalexp.org/tesseract-ocr/stretch/ stretch main" >> /etc/apt/sources.list && \
|
||||||
wget -nv http://github.com/tesseract-ocr/tessdata/raw/3.04.00/deu_frak.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
wget -O - https://notesalexp.org/debian/alexp_key.asc | apt-key add - && \
|
||||||
wget -nv http://github.com/tesseract-ocr/tessdata_best/raw/master/eng.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
apt-get update && \
|
||||||
wget -nv http://github.com/tesseract-ocr/tessdata_best/raw/master/enm.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
apt-get install -y --no-install-recommends tesseract-ocr && \
|
||||||
wget -nv http://github.com/tesseract-ocr/tessdata_best/raw/master/fra.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
wget -nv https://github.com/tesseract-ocr/tessdata_best/raw/master/deu.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
||||||
wget -nv http://github.com/tesseract-ocr/tessdata_best/raw/master/frm.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata
|
wget -nv https://github.com/tesseract-ocr/tessdata/raw/master/deu_frak.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
||||||
|
wget -nv https://github.com/tesseract-ocr/tessdata_best/raw/master/eng.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
||||||
|
wget -nv https://github.com/tesseract-ocr/tessdata_best/raw/master/enm.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
||||||
|
wget -nv https://github.com/tesseract-ocr/tessdata_best/raw/master/fra.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata && \
|
||||||
|
wget -nv https://github.com/tesseract-ocr/tessdata_best/raw/master/frm.traineddata -P /usr/share/tesseract-ocr/4.00/tessdata
|
||||||
|
|
||||||
# Install OCRopus
|
# Install ocropy
|
||||||
RUN git clone http://github.com/tmbdev/ocropy && \
|
RUN wget -nv https://github.com/tmbdev/ocropy/archive/v"$OCROPY_VERSION".tar.gz && \
|
||||||
cd ocropy && \
|
tar -xzf v"$OCROPY_VERSION".tar.gz && \
|
||||||
apt-get install -y --no-install-recommends $(cat PACKAGES) && \
|
rm v"$OCROPY_VERSION".tar.gz && \
|
||||||
pip install -r requirements.txt && \
|
cd ocropy-"$OCROPY_VERSION" && \
|
||||||
wget -nv http://www.tmbdev.net/en-default.pyrnn.gz && \
|
apt-get install -y --no-install-recommends $(cat PACKAGES) python-pil python-tk && \
|
||||||
mv en-default.pyrnn.gz models/ && \
|
wget -nv http://www.tmbdev.net/en-default.pyrnn.gz -P models/ && \
|
||||||
python2.7 setup.py install && \
|
python2.7 setup.py install && \
|
||||||
cd /root
|
cd ..
|
||||||
|
|
||||||
COPY ocr_pyflow /usr/local/bin
|
COPY ocr_pyflow /usr/local/bin
|
||||||
COPY parse_hocr /usr/local/bin
|
COPY parse_hocr /usr/local/bin
|
||||||
|
586
ocr_pyflow
586
ocr_pyflow
@ -1,10 +1,12 @@
|
|||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python2.7
|
||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
ocr_pyflow.py
|
ocr_pyflow.py
|
||||||
|
|
||||||
Date: 01/04/2018
|
Date: 18/10/2018
|
||||||
Usage: For usage instructions run with option --help
|
Usage: For usage instructions run with option --help
|
||||||
Author: Madis Rumming <mrumming@uni-bielefeld.de>
|
Author: Madis Rumming <mrumming@uni-bielefeld.de>
|
||||||
"""
|
"""
|
||||||
@ -13,45 +15,43 @@ __author__ = "Madis Rumming <mrumming@uni-bielefeld.de>"
|
|||||||
__copyright__ = "Copyright 2018, Data Infrastructure and Digital Humanities,\
|
__copyright__ = "Copyright 2018, Data Infrastructure and Digital Humanities,\
|
||||||
SFB 1288, Bielefeld University"
|
SFB 1288, Bielefeld University"
|
||||||
|
|
||||||
__version__ = "0.6"
|
__version__ = "0.7"
|
||||||
__maintainer__ = "Madis Rumming"
|
__maintainer__ = "Patrick Jentsch"
|
||||||
__email__ = "mrumming@uni-bielefeld.de"
|
__email__ = "p.jentsch@uni-bielefeld.de"
|
||||||
__status__ = "Development"
|
__status__ = "Development"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import multiprocessing
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
# from string import maketrans
|
import unicodedata
|
||||||
# from lxml import html
|
|
||||||
from pyflow import WorkflowRunner
|
from pyflow import WorkflowRunner
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
ocropusnlbin_bin = "/usr/local/bin/ocropus-nlbin"
|
ocropusnlbin_bin = "/usr/local/bin/ocropus-nlbin"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def parse_arguments():
|
def parse_arguments():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
"Performs OCR of (historical) documents utilizing ocropy for preprocessing and tesseract \
|
"Performs OCR of (historical) documents utilizing OCRopus for preprocessing and Tesseract OCR \
|
||||||
for final OCR. Available outputs are HOCR, PDF, shrinked PDF, and simple DTAbf \
|
for OCR. Available outputs are HOCR, PDF, shrinked PDF, and simple DTAbf \
|
||||||
(TEI P5 compliant). Software requirements: python2.7, pyflow, pdftoppm, ocropus, \
|
(TEI P5 compliant). Software requirements: imagemagick, ocropus, pdftk, pdftoppm, poppler-utils, pyflow, python2.7, tesseract")
|
||||||
tesseract, ghostscript, imagick, ")
|
|
||||||
|
|
||||||
parser.add_argument("-i", "--input-directory",
|
parser.add_argument("-i", "--input-directory",
|
||||||
dest="input_dir",
|
dest="input_dir",
|
||||||
help="Input directory with input images/PDFs. For each PDF an OCR-Run is \
|
help="Input directory with input (Multipage-)TIFs or PDFs. For each input file an OCR-Run is \
|
||||||
instantiated and output is created. If images reside in the directory, a \
|
instantiated and output is created.",
|
||||||
single OCR run is performed. For each subdirectory containing images, a \
|
|
||||||
particular OCR run is instantiated.",
|
|
||||||
required=True)
|
required=True)
|
||||||
parser.add_argument("-o", "--output-directory",
|
parser.add_argument("-o", "--output-directory",
|
||||||
dest="output_dir",
|
dest="output_dir",
|
||||||
help="Directory, where output directories are created if necessary. \
|
help="Directory, where output directories are created if necessary. \
|
||||||
Default: %s" % (os.path.curdir),
|
Default: %s" % (os.path.join(os.path.curdir, "ocr_pyflow")),
|
||||||
required=False,
|
required=False,
|
||||||
default=os.path.curdir)
|
default=(os.path.join(os.path.curdir, "ocr_pyflow")))
|
||||||
parser.add_argument("--image-suffix",
|
|
||||||
dest="suffix",
|
|
||||||
help="Input images suffix. Case-sensitive! tiff!=TIFF!=Tiff Default: tif.",
|
|
||||||
default="tif",
|
|
||||||
required=False)
|
|
||||||
parser.add_argument("--skip-pdf-processing",
|
parser.add_argument("--skip-pdf-processing",
|
||||||
dest="skip_pdf",
|
dest="skip_pdf",
|
||||||
help="Skip detection of PDFs as input.",
|
help="Skip detection of PDFs as input.",
|
||||||
@ -64,36 +64,32 @@ def parse_arguments():
|
|||||||
default=False,
|
default=False,
|
||||||
action='store_true',
|
action='store_true',
|
||||||
required=False)
|
required=False)
|
||||||
parser.add_argument("--skip_binarization",
|
|
||||||
dest="skip_bin",
|
|
||||||
help="skip binarizaiton pre-processing.",
|
|
||||||
default=False,
|
|
||||||
action="store_true",
|
|
||||||
required=False)
|
|
||||||
parser.add_argument("--start-page",
|
parser.add_argument("--start-page",
|
||||||
dest='startp',
|
dest='startp',
|
||||||
default=-1, help="First page to ocr.",
|
help="NOT IMPLEMENTED! First page to ocr.",
|
||||||
|
default=-1,
|
||||||
required=False, type=int)
|
required=False, type=int)
|
||||||
parser.add_argument("--end-page",
|
parser.add_argument("--end-page",
|
||||||
dest='endp',
|
dest='endp',
|
||||||
default=-1,
|
default=-1,
|
||||||
help="Last page to ocr.",
|
help="NOT IMPLEMENTED! Last page to ocr.",
|
||||||
required=False, type=int)
|
required=False, type=int)
|
||||||
parser.add_argument("-r", "--rotate-pages",
|
parser.add_argument("-r", "--rotate-pages",
|
||||||
dest='rotate',
|
dest='rotate',
|
||||||
default='norotation',
|
default='norotation',
|
||||||
help="Rotate pages from input. Values: clockwise, counterclockwise, \
|
help="NOT IMPLEMENTED! Rotate pages from input. Values: clockwise, counterclockwise, \
|
||||||
upsidedown. Default: norotation",
|
upsidedown. Default: norotation",
|
||||||
required=False,
|
required=False,
|
||||||
choices=['clockwise', 'counterclockwise', 'upsidedown', 'norotation'])
|
choices=['clockwise', 'counterclockwise', 'upsidedown', 'norotation'])
|
||||||
parser.add_argument("-s", "--split-pages",
|
parser.add_argument("-s", "--split-pages",
|
||||||
dest='split',
|
dest='split',
|
||||||
default=False,
|
default=False,
|
||||||
help="Split pages in half after possible rotation. Default: Not performed.",
|
help="NOT IMPLEMENTED! Split pages in half after possible rotation. Default: Not performed.",
|
||||||
required=False,
|
required=False,
|
||||||
action='store_true')
|
action='store_true')
|
||||||
parser.add_argument("--ppi-import",
|
parser.add_argument("--ppi-import",
|
||||||
dest="ppi_in", help="Down-/Scaling for input images. Default: 300 ppi.",
|
dest="ppi_in",
|
||||||
|
help="NOT IMPLEMENTED! Scaling for input images. Default: 300 ppi.",
|
||||||
default=300,
|
default=300,
|
||||||
required=False,
|
required=False,
|
||||||
type=int)
|
type=int)
|
||||||
@ -109,47 +105,36 @@ def parse_arguments():
|
|||||||
required=False)
|
required=False)
|
||||||
parser.add_argument("-c", "--compress-pdf",
|
parser.add_argument("-c", "--compress-pdf",
|
||||||
dest='comp',
|
dest='comp',
|
||||||
|
help="NOT IMPLEMENTED!",
|
||||||
default=False,
|
default=False,
|
||||||
action='store_true',
|
action='store_true',
|
||||||
required=False)
|
required=False)
|
||||||
parser.add_argument("--ppi-export",
|
parser.add_argument("--ppi-export",
|
||||||
dest="ppi_out",
|
dest="ppi_out",
|
||||||
help="Down-/Scaling for output images in PDF. Default: 150 ppi.",
|
help="NOT IMPLEMENTED! Scaling for output images in PDF. Default: 150 ppi.",
|
||||||
default=150,
|
default=150,
|
||||||
required=False,
|
required=False,
|
||||||
type=int)
|
type=int)
|
||||||
parser.add_argument("-t", "--temp-directory",
|
|
||||||
dest="temp",
|
|
||||||
help="Location of intermediate files. Defaults to tmp in output directory.",
|
|
||||||
default="",
|
|
||||||
required=False,
|
|
||||||
type=str)
|
|
||||||
parser.add_argument("-k", "--keep-intermediate",
|
parser.add_argument("-k", "--keep-intermediate",
|
||||||
dest="intermediate",
|
dest="intermediate",
|
||||||
help="Keep intermediate files. Default: True",
|
help="Keep intermediate files. Default: False",
|
||||||
default=False,
|
default=False,
|
||||||
action='store_true')
|
action='store_true')
|
||||||
parser.add_argument("--cores",
|
parser.add_argument("--cores",
|
||||||
dest='nCores',
|
dest='nCores',
|
||||||
help="Amount of CPUs to use for parallel jobs. Default: %i" % (4),
|
help="Amount of CPUs to use for parallel jobs. Default: Number of available CPUs",
|
||||||
default=4,
|
default=multiprocessing.cpu_count(),
|
||||||
required=False,
|
required=False,
|
||||||
type=int)
|
type=int)
|
||||||
parser.add_argument("--is-continued", dest='continued',
|
parser.add_argument("--is-continued", dest='continued',
|
||||||
help="Enables continuing an erroneous or paused workflow. MUST use the \
|
help="NOT IMPLEMENTED! Enables continuing an erroneous or paused workflow. MUST use the \
|
||||||
same dataDirRoot as before.",
|
same dataDirRoot as before.",
|
||||||
default=False,
|
default=False,
|
||||||
required=False,
|
required=False,
|
||||||
action='store_true')
|
action='store_true')
|
||||||
parser.add_argument("--is-dry-run",
|
|
||||||
dest='dry_run',
|
|
||||||
help="Check workflow without execution.",
|
|
||||||
default=False,
|
|
||||||
action='store_true',
|
|
||||||
required=False)
|
|
||||||
parser.add_argument("--memory",
|
parser.add_argument("--memory",
|
||||||
dest='mem',
|
dest='mem',
|
||||||
help="Total amount of memory (RAM) available for this workflow. \
|
help="NOT IMPLEMENTED! Total amount of memory (RAM) available for this workflow. \
|
||||||
Default: %i" % (8192),
|
Default: %i" % (8192),
|
||||||
default=8192,
|
default=8192,
|
||||||
required=False,
|
required=False,
|
||||||
@ -158,405 +143,206 @@ def parse_arguments():
|
|||||||
return (args)
|
return (args)
|
||||||
|
|
||||||
|
|
||||||
class PureOCR(WorkflowRunner):
|
|
||||||
def __init__(self, input_path, output_path):
|
|
||||||
self.input_path = input_path
|
|
||||||
self.output_path = output_path
|
|
||||||
|
|
||||||
def workflow(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class OCRWorkflow(WorkflowRunner):
|
class OCRWorkflow(WorkflowRunner):
|
||||||
|
def __init__(self, pdfImageJobs, inputDir, outputDir, lang, pdf, intermediate, nCores, memMb):
|
||||||
def __init__(self, pdfImageJobs, inputDir, outputDir, suffix, lang, pdf, intermediate, skip_bin,
|
|
||||||
nCores, memMb, dry_run):
|
|
||||||
self.pdfImageJobs = pdfImageJobs
|
self.pdfImageJobs = pdfImageJobs
|
||||||
self.outputDir = outputDir
|
self.outputDir = outputDir
|
||||||
self.inputDir = inputDir
|
self.inputDir = inputDir
|
||||||
self.suffix = suffix
|
|
||||||
self.lang = lang
|
self.lang = lang
|
||||||
self.pdf = pdf
|
self.pdf = pdf
|
||||||
self.intermediate = intermediate
|
self.intermediate = intermediate
|
||||||
self.skip_bin = skip_bin
|
|
||||||
self.nCores = nCores
|
self.nCores = nCores
|
||||||
self.memMb = memMb
|
self.memMb = memMb
|
||||||
self.dry_run = dry_run
|
|
||||||
|
|
||||||
|
|
||||||
def workflow(self):
|
def workflow(self):
|
||||||
|
###
|
||||||
|
# Task "mkdir_job": create output directories
|
||||||
|
# Dependencies: None
|
||||||
|
###
|
||||||
|
mkdir_jobs = []
|
||||||
|
mkdir_job_number = 0
|
||||||
|
for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]:
|
||||||
|
mkdir_job_number += 1
|
||||||
|
cmd = "mkdir -p %s %s %s %s" % (
|
||||||
|
os.path.join(job["output_dir"], "hocr_files"),
|
||||||
|
os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"),
|
||||||
|
os.path.join(job["output_dir"], "tmp", "tesseract"),
|
||||||
|
os.path.join(job["output_dir"], "tmp", "tiff_files"))
|
||||||
|
mkdir_jobs.append(self.addTask(label="mkdir_job_-_%i" % (mkdir_job_number), command=cmd))
|
||||||
|
|
||||||
cmd = None
|
|
||||||
print(self.outputDir)
|
|
||||||
if os.path.isabs(self.outputDir):
|
|
||||||
if not os.path.exists(self.outputDir):
|
|
||||||
cmd = "mkdir -p %s" % (self.outputDir)
|
|
||||||
else:
|
|
||||||
self.outputDir = os.path.join(os.path.abspath(self.inputDir), self.outputDir)
|
|
||||||
if not os.path.exists(self.outputDir):
|
|
||||||
cmd = "mkdir -p %s" % (self.outputDir)
|
|
||||||
|
|
||||||
self.addTask(label="mkdir_outputdir", command=cmd, isForceLocal=True)
|
|
||||||
|
|
||||||
deps = []
|
|
||||||
if self.pdfImageJobs["pdf"]:
|
|
||||||
cmd = "mkdir %s" % (os.path.join(self.outputDir, "pdf"))
|
|
||||||
deps.append("mkdir_pdf")
|
|
||||||
self.addTask(label="mkdir_pdf", command=cmd, isForceLocal=True,
|
|
||||||
dependencies="mkdir_outputdir")
|
|
||||||
if self.pdfImageJobs["images"]:
|
|
||||||
cmd = "mkdir %s" % (os.path.join(self.outputDir, "images"))
|
|
||||||
deps.append("mkdir_images")
|
|
||||||
self.addTask(label="mkdir_images", command=cmd, isForceLocal=True,
|
|
||||||
dependencies="mkdir_outputdir")
|
|
||||||
|
|
||||||
###
|
###
|
||||||
# Generate directories
|
# Task "split_job": split input file into one tiff file per page
|
||||||
|
# Dependencies: mkdir_jobs
|
||||||
###
|
###
|
||||||
deps_dirs = []
|
split_jobs = []
|
||||||
if self.pdfImageJobs["pdf"]:
|
split_job_number = 0
|
||||||
i = 0
|
for job in self.pdfImageJobs["images"]:
|
||||||
for k in self.pdfImageJobs["pdf"]:
|
split_job_number += 1
|
||||||
cmd = "mkdir -p %s %s %s %s %s" % (
|
# TODO: Make the following command work
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", k, "tmp", "tiff"),
|
'''
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", k, "tmp", "tesseract"),
|
cmd = "convert %s %s" % (
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", k, "tmp", "ocropy"),
|
job["path"],
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", k, "tmp", "gs"),
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-%sd.tif" % ("%")))
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", k, "HOCR"))
|
'''
|
||||||
|
# WORKAROUND
|
||||||
|
cmd = "tiff2pdf -o %s %s && pdftoppm %s %s -tiff -r 300 -tiffcompression lzw -cropbox && rm %s" % (
|
||||||
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"),
|
||||||
|
job["path"],
|
||||||
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"),
|
||||||
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0]),
|
||||||
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + ".pdf"))
|
||||||
|
split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs, nCores=1, memMb=1024))
|
||||||
|
for job in self.pdfImageJobs["pdf"]:
|
||||||
|
split_job_number += 1
|
||||||
|
cmd = "pdftoppm %s %s -tiff -r 300 -tiffcompression lzw -cropbox" % (
|
||||||
|
job["path"],
|
||||||
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0]))
|
||||||
|
split_jobs.append(self.addTask(label="split_job_-_%i" % (split_job_number), command=cmd, dependencies=mkdir_jobs, nCores=1, memMb=1024))
|
||||||
|
|
||||||
deps_dirs.append("mkdir_pdf_%i" % (i))
|
|
||||||
self.addTask(label="mkdir_pdf_%i" % (i), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps)
|
|
||||||
i += 1
|
|
||||||
if self.pdfImageJobs["images"]:
|
|
||||||
i = 0
|
|
||||||
for k in self.pdfImageJobs["images"]:
|
|
||||||
cmd = "mkdir -p %s %s %s %s" % (
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", k, "tmp", "tesseract"),
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", k, "tmp", "ocropy"),
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", k, "tmp", "gs"),
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", k, "HOCR"))
|
|
||||||
deps_dirs.append("mkdir_images_%i" % (i))
|
|
||||||
self.addTask(label="mkdir_images_%i" % (i), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps)
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
###
|
###
|
||||||
# Extract images from PDF input files
|
# Task "ocropus_nlbin_job": binarize tiff files from previous split
|
||||||
|
# Dependencies: split_jobs
|
||||||
###
|
###
|
||||||
pdf_counter = 0
|
ocropusnlbin_jobs = []
|
||||||
deps_ppm = []
|
ocropusnlbin_job_number = 0
|
||||||
for pdf in self.pdfImageJobs["pdf"]:
|
for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]:
|
||||||
cmd = "pdftoppm %s %s -tiff -r 300 -tiffcompression lzw -cropbox" % (self.pdfImageJobs["pdf"][pdf],
|
ocropusnlbin_job_number += 1
|
||||||
os.path.join(
|
cmd = "%s -Q %i -o %s %s" % (
|
||||||
os.path.abspath(self.outputDir),
|
ocropusnlbin_bin,
|
||||||
"pdf", pdf, "tmp",
|
self.nCores,
|
||||||
"tiff", "interm"))
|
os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"),
|
||||||
self.addTask(label="pdftoppm_%i" % (pdf_counter), command=cmd, isForceLocal=True,
|
os.path.join(job["output_dir"], "tmp", "tiff_files", os.path.basename(job["path"]).rsplit(".", 1)[0] + "-*.tif"))
|
||||||
dependencies=deps_dirs, nCores=1, memMb=1024)
|
ocropusnlbin_jobs.append(self.addTask(label="ocropusnlbin_job_-_%i" % (ocropusnlbin_job_number), command=cmd, dependencies=split_jobs, nCores=self.nCores, memMb=self.memMb))
|
||||||
deps_ppm.append("pdftoppm_%i" % (pdf_counter))
|
|
||||||
pdf_counter += 1
|
|
||||||
|
|
||||||
###
|
###
|
||||||
# Perform image binarization if not skipped
|
# Task "tesseract_job": perform OCR on binarized images
|
||||||
|
# Dependencies: ocropusnlbin_jobs
|
||||||
###
|
###
|
||||||
self.waitForTasks()
|
self.waitForTasks()
|
||||||
ocropus_counter = 0
|
tesseract_jobs = []
|
||||||
deps_ocropus = []
|
tesseract_job_number = 0
|
||||||
if not self.skip_bin:
|
for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]:
|
||||||
for pdf in self.pdfImageJobs["pdf"]:
|
# This list is empty if you don't wait for ocropus_nlbin_jobs to complete
|
||||||
cmd = "%s -Q %i -o %s %s" % (ocropusnlbin_bin, self.nCores,
|
for file in filter(lambda x: x.endswith(".bin.png"), os.listdir(os.path.join(job["output_dir"], "tmp", "ocropus-nlbin"))):
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp", "ocropy"),
|
tesseract_job_number += 1
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp", "tiff",
|
|
||||||
"interm-*"))
|
|
||||||
self.addTask(label="ocropusnlbin_%i" % (ocropus_counter), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_ppm,
|
|
||||||
nCores=self.nCores, memMb=self.memMb)
|
|
||||||
deps_ocropus.append("ocropusnlbin_%i" % (ocropus_counter))
|
|
||||||
ocropus_counter += 1
|
|
||||||
for img in self.pdfImageJobs["images"]:
|
|
||||||
cmd = "%s -Q %i -o %s %s" % (ocropusnlbin_bin, self.nCores,
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", img, "tmp",
|
|
||||||
"ocropy"),
|
|
||||||
os.path.join(self.pdfImageJobs["images"][img], "*.%s" % (self.suffix)))
|
|
||||||
self.addTask(label="ocropusnlbin_%i" % (ocropus_counter), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_ppm,
|
|
||||||
nCores=self.nCores, memMb=self.memMb)
|
|
||||||
deps_ocropus.append("ocropusnlbin_%i" % (ocropus_counter))
|
|
||||||
ocropus_counter += 1
|
|
||||||
|
|
||||||
###
|
|
||||||
# Perform OCR, HOCR as default output, PDF only optional
|
|
||||||
###
|
|
||||||
self.waitForTasks()
|
|
||||||
pdf_counter = 0
|
|
||||||
for pdf in self.pdfImageJobs["pdf"]:
|
|
||||||
if self.skip_bin:
|
|
||||||
inp_images = sorted(os.listdir(
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp", "tiff")))
|
|
||||||
else:
|
|
||||||
inp_images = sorted(filter(lambda x: ".bin." in x, os.listdir(
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp",
|
|
||||||
"ocropy"))))
|
|
||||||
|
|
||||||
deps_ocr_pdf = []
|
|
||||||
|
|
||||||
for image in inp_images:
|
|
||||||
cmd = "tesseract %s %s -l %s hocr %s" % (
|
cmd = "tesseract %s %s -l %s hocr %s" % (
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp",
|
os.path.join(job["output_dir"], "tmp", "ocropus-nlbin", file),
|
||||||
"tiff" if self.skip_bin else "ocropy", image),
|
os.path.join(job["output_dir"], "tmp", "tesseract", file.rsplit(".", 2)[0]),
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "tesseract",
|
self.lang,
|
||||||
"%04i" % int(image.split(".")[0].split("-")[1]) if self.skip_bin else image.split(".")[
|
"pdf" if self.pdf else "")
|
||||||
0]),
|
tesseract_jobs.append(self.addTask(label="tesseract_job_-_%i" % (tesseract_job_number), command=cmd, dependencies=ocropusnlbin_jobs, nCores=1, memMb=2048))
|
||||||
self.lang, "pdf" if self.pdf else "")
|
|
||||||
|
|
||||||
self.addTask(label="ocr_%i_%s" % (pdf_counter, image.split(".")[0]), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_ocropus, nCores=1, memMb=2048)
|
|
||||||
deps_ocr_pdf.append("ocr_%i_%s" % (pdf_counter, image.split(".")[0]))
|
|
||||||
|
|
||||||
pdf_counter += 1
|
|
||||||
|
|
||||||
for img in self.pdfImageJobs["images"]:
|
|
||||||
if self.skip_bin:
|
|
||||||
images = sorted(
|
|
||||||
list(filter(lambda x: x.endswith(self.suffix), os.listdir(self.pdfImageJobs["images"][img]))))
|
|
||||||
else:
|
|
||||||
images = sorted(
|
|
||||||
list(filter(lambda x: ".bin." in x, os.listdir(
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", img, "tmp", "ocropy")))))
|
|
||||||
|
|
||||||
deps_ocr = []
|
|
||||||
for image in images:
|
|
||||||
if self.skip_bin:
|
|
||||||
cmd = "tesseract %s %s -l %s hocr %s" % (
|
|
||||||
os.path.join(self.pdfImageJobs["images"][img], image),
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "tesseract", image.split(".")[0]),
|
|
||||||
self.lang, "pdf" if self.pdf else "")
|
|
||||||
else:
|
|
||||||
cmd = "tesseract %s %s -l %s hocr %s" % (
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", img, "tmp", "ocropy", image),
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "tesseract", image.split(".")[0]),
|
|
||||||
self.lang, "pdf" if self.pdf else "")
|
|
||||||
print(cmd)
|
|
||||||
self.addTask(label="ocr_%s_%s" % (img, image.split(".")[0]), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_ocropus, nCores=1, memMb=2048)
|
|
||||||
|
|
||||||
deps_ocr.append("ocr_%s_%s" % (img, image.split(".")[0]))
|
|
||||||
|
|
||||||
pdftk_jobs = []
|
|
||||||
###
|
###
|
||||||
# Creation of PDF output
|
# Task "pdf_merge_job": Merge PDF files
|
||||||
|
# Dependencies: tesseract_jobs
|
||||||
###
|
###
|
||||||
|
pdf_merge_jobs = []
|
||||||
|
pdf_merge_job_number = 0
|
||||||
if self.pdf:
|
if self.pdf:
|
||||||
pdf_counter_conv = 0
|
for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]:
|
||||||
for img in self.pdfImageJobs["images"]:
|
pdf_merge_job_number += 1
|
||||||
if self.skip_bin:
|
cmd = "pdftk %s cat output %s" % (
|
||||||
images = sorted(
|
os.path.join(job["output_dir"], "tmp", "tesseract", "*.pdf"),
|
||||||
list(filter(lambda x: x.endswith(self.suffix), os.listdir(self.pdfImageJobs["images"][img]))))
|
os.path.join(job["output_dir"], os.path.basename(job["path"].rsplit(".", 1)[0] + ".pdf")))
|
||||||
else:
|
pdf_merge_jobs.append(self.addTask(label="pdf_merge_job_-_%i" % (pdf_merge_job_number), command=cmd, dependencies=tesseract_jobs, nCores=1, memMb=4096))
|
||||||
images = sorted(
|
|
||||||
list(filter(lambda x: ".bin." in x, os.listdir(
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", img, "tmp", "ocropy")))))
|
|
||||||
|
|
||||||
deps_tiffpdf = []
|
|
||||||
|
|
||||||
for image in images:
|
|
||||||
if self.skip_bin:
|
|
||||||
cmd = "tiff2pdf -o %s.pdf %s" % (
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "gs", image.split(".")[0]),
|
|
||||||
os.path.join(self.pdfImageJobs["images"][img], image))
|
|
||||||
self.addTask(label="tiffpdf_%s_%s" % (img, image.split(".")[0]), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_dirs, nCores=1, memMb=256)
|
|
||||||
deps_tiffpdf.append("tiffpdf_%s_%s" % (img, image.split(".")[0]))
|
|
||||||
else:
|
|
||||||
cmd = "convert %s %s.pdf" % (
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", img, "tmp", "ocropy", image),
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "gs", image.split(".")[0]))
|
|
||||||
self.addTask(label="tiffpdf_%s_%s" % (img, image.split(".")[0]), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_ocropus, nCores=1, memMb=256)
|
|
||||||
deps_tiffpdf.append("tiffpdf_%s_%s" % (img, image.split(".")[0]))
|
|
||||||
|
|
||||||
cmd = "gs -dBATCH -dNOPAUSE -q -r150 -dPDFSETTINGS=/ebook -sDEVICE=pdfwrite -sOutputFile=%s %s" % (
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "gs", "image.pdf"),
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "gs", "*.pdf"))
|
|
||||||
self.addTask(label="gs_image_%s" % (img), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_tiffpdf, nCores=1, memMb=4096)
|
|
||||||
|
|
||||||
deps_ocr.append("gs_image_%s" % (img))
|
|
||||||
|
|
||||||
cmd = "gs -dBATCH -dNOPAUSE -q -r150 -dPDFSETTINGS=/ebook -dFILTERIMAGE -sDEVICE=pdfwrite -sOutputFile=%s %s" % (
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "gs", "text.pdf"),
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "tesseract", "*.pdf"))
|
|
||||||
|
|
||||||
self.addTask(label="gs_layer_%s" % (img), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_ocr, nCores=1, memMb=4096)
|
|
||||||
|
|
||||||
cmd = "pdftk %s multistamp %s output %s" % (
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "gs", "text.pdf"),
|
|
||||||
os.path.join(self.outputDir, "images", img, "tmp", "gs", "image.pdf"),
|
|
||||||
os.path.join(self.outputDir, "images", img, "%s.pdf" % (img)))
|
|
||||||
|
|
||||||
self.addTask(label="pdftk_%s" % (img), command=cmd, isForceLocal=True,
|
|
||||||
dependencies="gs_layer_%s" % (img), nCores=1, memMb=4096)
|
|
||||||
pdftk_jobs.append("pdftk_%s" % (img))
|
|
||||||
|
|
||||||
for pdf in self.pdfImageJobs["pdf"]:
|
|
||||||
|
|
||||||
pdf_counter_conv += 1
|
|
||||||
if self.skip_bin:
|
|
||||||
inp_images = sorted(os.listdir(
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp", "tiff")))
|
|
||||||
else:
|
|
||||||
inp_images = sorted(filter(lambda x: ".bin." in x, os.listdir(
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp",
|
|
||||||
"ocropy"))))
|
|
||||||
|
|
||||||
deps_pdf_tiffpdf = []
|
|
||||||
|
|
||||||
for image in inp_images:
|
|
||||||
if self.skip_bin:
|
|
||||||
cmd = "tiff2pdf -o %s.pdf %s" % (
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "gs", image.split(".")[0]),
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp", "tiff", image))
|
|
||||||
self.addTask(label="tiffpdf_pdf%i_%s" % (pdf_counter_conv, image.split(".")[0]), command=cmd,
|
|
||||||
isForceLocal=True,
|
|
||||||
dependencies=deps_dirs, nCores=1, memMb=256)
|
|
||||||
deps_pdf_tiffpdf.append("tiffpdf_pdf%i_%s" % (pdf_counter_conv, image.split(".")[0]))
|
|
||||||
else:
|
|
||||||
cmd = "convert %s %s.pdf" % (
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "tmp", "ocropy", image),
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "gs", image.split(".")[0]))
|
|
||||||
self.addTask(label="tiffpdf_pdf%i_%s" % (pdf_counter_conv, image.split(".")[0]), command=cmd,
|
|
||||||
isForceLocal=True,
|
|
||||||
dependencies=deps_ocropus, nCores=1, memMb=256)
|
|
||||||
deps_pdf_tiffpdf.append("tiffpdf_pdf%i_%s" % (pdf_counter_conv, image.split(".")[0]))
|
|
||||||
|
|
||||||
cmd = "gs -dBATCH -dNOPAUSE -q -r150 -dPDFSETTINGS=/ebook -sDEVICE=pdfwrite -sOutputFile=%s %s" % (
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "gs", "image.pdf"),
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "gs", "*.pdf"))
|
|
||||||
self.addTask(label="gs_image_pdf%i" % (pdf_counter_conv), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_pdf_tiffpdf, nCores=1, memMb=4096)
|
|
||||||
|
|
||||||
deps_ocr.append("gs_image_pdf%i" % (pdf_counter_conv))
|
|
||||||
|
|
||||||
cmd = "gs -dBATCH -dNOPAUSE -q -r150 -dPDFSETTINGS=/ebook -dFILTERIMAGE -sDEVICE=pdfwrite -sOutputFile=%s %s" % (
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "gs", "text.pdf"),
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "tesseract", "*.pdf"))
|
|
||||||
|
|
||||||
self.addTask(label="gs_layer_pdf%i" % (pdf_counter_conv), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=deps_ocr, nCores=1, memMb=4096)
|
|
||||||
|
|
||||||
cmd = "pdftk %s multistamp %s output %s" % (
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "gs", "text.pdf"),
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "tmp", "gs", "image.pdf"),
|
|
||||||
os.path.join(self.outputDir, "pdf", pdf, "%s.pdf" % (pdf)))
|
|
||||||
|
|
||||||
self.addTask(label="pdftk_pdf%i" % (pdf_counter_conv), command=cmd, isForceLocal=True,
|
|
||||||
dependencies="gs_layer_pdf%i" % (pdf_counter_conv), nCores=1, memMb=4096)
|
|
||||||
|
|
||||||
pdftk_jobs.append("pdftk_pdf%i" % (pdf_counter_conv))
|
|
||||||
|
|
||||||
else:
|
|
||||||
self.addTask(label="pdftk_", command=None, dependencies=deps_ocr)
|
|
||||||
pdftk_jobs.append("pdftk_")
|
|
||||||
|
|
||||||
###
|
###
|
||||||
# Merging and cleanup
|
# Task "move_hocr_job": move hocr files from <output_dir>/tmp/tesseract to <output_dir>/hocr_files
|
||||||
|
# Dependencies: tesseract_jobs
|
||||||
|
###
|
||||||
|
move_hocr_jobs = []
|
||||||
|
move_hocr_job_number = 0
|
||||||
|
for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]:
|
||||||
|
move_hocr_job_number += 1
|
||||||
|
cmd = "mv %s %s" % (
|
||||||
|
os.path.join(job["output_dir"], "tmp", "tesseract", "*.hocr"),
|
||||||
|
os.path.join(job["output_dir"], "hocr_files"))
|
||||||
|
move_hocr_jobs.append(self.addTask(label="move_hocr_job_-_%i" % (move_hocr_job_number), command=cmd, dependencies=tesseract_jobs))
|
||||||
|
|
||||||
|
|
||||||
|
###
|
||||||
|
# Task "hocr_to_teip5_job": create TEI P5 file from hocr files
|
||||||
|
# Dependencies: move_hocr_jobs
|
||||||
|
###
|
||||||
|
hocr_to_teip5_jobs = []
|
||||||
|
hocr_to_teip5_job_number = 0
|
||||||
|
for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]:
|
||||||
|
hocr_to_teip5_job_number += 1
|
||||||
|
cmd = "parse_hocr %s %s" % (
|
||||||
|
os.path.join(job["output_dir"], "hocr_files"),
|
||||||
|
os.path.join(os.path.join(job["output_dir"], os.path.basename(job["path"]).rsplit(".", 1)[0] + ".xml")))
|
||||||
|
hocr_to_teip5_jobs.append(self.addTask(label="hocr_to_teip5_job_-_%i" % (hocr_to_teip5_job_number), command=cmd, dependencies=move_hocr_jobs, nCores=1, memMb=250))
|
||||||
|
|
||||||
|
|
||||||
|
###
|
||||||
|
# Task "cleanup_job": remove temporary files
|
||||||
|
# Dependencies: All
|
||||||
###
|
###
|
||||||
self.waitForTasks()
|
self.waitForTasks()
|
||||||
for img in self.pdfImageJobs["images"]:
|
cleanup_jobs = []
|
||||||
cmd = "mv %s %s" % (os.path.join(self.outputDir, "images", img, "tmp", "tesseract", "*.hocr"),
|
cleanup_job_counter = 0
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", img, "HOCR"))
|
if not self.intermediate:
|
||||||
self.addTask(label="mv_%s" % (img), command=cmd, isForceLocal=True,
|
for job in self.pdfImageJobs["images"] + self.pdfImageJobs["pdf"]:
|
||||||
dependencies=pdftk_jobs, nCores=1, memMb=32)
|
cleanup_job_counter += 1
|
||||||
|
cmd = "rm -r %s" % (os.path.join(job["output_dir"], "tmp"))
|
||||||
cmd = "parse_hocr %s %s" % (
|
cleanup_jobs.append(self.addTask(label="cleanup_job_-_%i" % (cleanup_job_counter), command=cmd))
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", img, "HOCR"),
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "images", img, "%s.xml" % (img)))
|
|
||||||
self.addTask(label="generate_hocr_%s" % (img), command=cmd, isForceLocal=True,
|
|
||||||
dependencies="mv_%s" % (img), nCores=1, memMb=250)
|
|
||||||
|
|
||||||
if not self.intermediate:
|
|
||||||
cmd = "rm -rf %s" % (os.path.join(self.outputDir, "images", img, "tmp"))
|
|
||||||
self.addTask(label="cleanup_%s" % (img), command=cmd, isForceLocal=True,
|
|
||||||
dependencies="mv_%s" % (img), nCores=1, memMb=32)
|
|
||||||
|
|
||||||
pdf_counter = 0
|
|
||||||
for pdf in self.pdfImageJobs["pdf"]:
|
|
||||||
pdf_counter += 1
|
|
||||||
cmd = "mv %s %s" % (os.path.join(self.outputDir, "pdf", pdf, "tmp", "tesseract", "*.hocr"),
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "HOCR"))
|
|
||||||
self.addTask(label="mv_%i" % (pdf_counter), command=cmd, isForceLocal=True,
|
|
||||||
dependencies=pdftk_jobs, nCores=1, memMb=32)
|
|
||||||
|
|
||||||
cmd = "parse_hocr %s %s" % (
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "HOCR"),
|
|
||||||
os.path.join(os.path.abspath(self.outputDir), "pdf", pdf, "%s.xml" % (pdf)))
|
|
||||||
self.addTask(label="generate_hocr_%i" % (pdf_counter), command=cmd, isForceLocal=True,
|
|
||||||
dependencies="mv_%i" % (pdf_counter), nCores=1, memMb=250)
|
|
||||||
|
|
||||||
if not self.intermediate:
|
|
||||||
cmd = "rm -rf %s" % (os.path.join(self.outputDir, "pdf", pdf, "tmp"))
|
|
||||||
self.addTask(label="cleanup_%i" % (pdf_counter), command=cmd, isForceLocal=True,
|
|
||||||
dependencies="mv_%i" % (pdf_counter), nCores=1, memMb=32)
|
|
||||||
|
|
||||||
|
|
||||||
def strip_files_of_spaces(current_path, old_path):
|
|
||||||
"""Strips inputfiles of spaces to avoid bugs and also for better usage and readability."""
|
|
||||||
os.chdir(current_path)
|
|
||||||
for f in os.listdir("."):
|
|
||||||
r = f.replace(" ", "_")
|
|
||||||
if(r != f):
|
|
||||||
os.rename(f, r)
|
|
||||||
os.chdir(old_path)
|
|
||||||
|
|
||||||
|
def analyze_jobs(input_dir, output_dir, skip_pdf=False, skip_images=False):
|
||||||
|
files = os.listdir(input_dir)
|
||||||
|
images = []
|
||||||
|
pdf = []
|
||||||
|
|
||||||
def analyze_jobs(input_dir, image_suffix="tiff", skip_pdf=False, skip_images=False):
|
|
||||||
pdfs = {}
|
|
||||||
images = {}
|
|
||||||
files = os.listdir(os.path.abspath(input_dir))
|
|
||||||
dirs = list(filter(lambda x: os.path.isdir(os.path.join(os.path.abspath(input_dir), x)), files))
|
|
||||||
|
|
||||||
# Scan root input dir
|
|
||||||
if not skip_pdf:
|
|
||||||
pdfs_ = list(filter(lambda x: x.endswith(".pdf"), files))
|
|
||||||
pdfs = {".".join(k.split(".")[:-1]): os.path.join(os.path.abspath(input_dir), k) for k in pdfs_}
|
|
||||||
if not skip_images:
|
if not skip_images:
|
||||||
print("sdf")
|
for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), files):
|
||||||
if list(filter(lambda x: x.endswith(".%s" % (image_suffix)), files)):
|
images.append({"path": os.path.join(input_dir, file), "output_dir": os.path.join(output_dir, file.rsplit(".", 1)[0])})
|
||||||
images["input_root"] = os.path.abspath(input_dir)
|
if not skip_pdf:
|
||||||
|
for file in filter(lambda x: x.endswith(".pdf"), files):
|
||||||
# Scan subdirectories for inputs if available
|
pdf.append({"path": os.path.join(input_dir, file), "output_dir": os.path.join(output_dir, file.rsplit(".", 1)[0])})
|
||||||
for d in dirs:
|
for subdir in filter(lambda x: os.path.isdir(os.path.join(input_dir, x)), files):
|
||||||
strip_files_of_spaces(os.path.join(input_dir, d), os.getcwd())
|
subdir_files = os.listdir(os.path.join(input_dir, subdir))
|
||||||
files = os.listdir(os.path.join(input_dir, d))
|
|
||||||
|
|
||||||
if not skip_pdf:
|
|
||||||
for p in list(filter(lambda x: x.endswith(".pdf"), files)):
|
|
||||||
pdfs[".".join(p.split(".")[:-1])] = os.path.join(os.path.abspath(input_dir), d, p)
|
|
||||||
if not skip_images:
|
if not skip_images:
|
||||||
if sorted(list(filter(lambda x: x.endswith(".%s" % (image_suffix)), files))):
|
for file in filter(lambda x: x.endswith(".tif") or x.endswith(".tiff"), subdir_files):
|
||||||
images[d] = os.path.join(os.path.abspath(input_dir), d)
|
images.append({"path": os.path.join(input_dir, subdir, file), "output_dir": os.path.join(output_dir, subdir, file.rsplit(".", 1)[0])})
|
||||||
|
if not skip_pdf:
|
||||||
|
for file in filter(lambda x: x.endswith(".pdf"), subdir_files):
|
||||||
|
pdf.append({"path": os.path.join(input_dir, subdir, file), "output_dir": os.path.join(output_dir, subdir, file.rsplit(".", 1)[0])})
|
||||||
|
return {"pdf": pdf, "images": images}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_input_filenames(path):
|
||||||
|
###
|
||||||
|
# Normalize input filenames and directories to avoid bugs and also for better usage and readability.
|
||||||
|
###
|
||||||
|
for file in os.listdir(path):
|
||||||
|
file_with_path = os.path.join(path, file)
|
||||||
|
if os.path.isdir(file_with_path):
|
||||||
|
normalize_input_filenames(file_with_path)
|
||||||
|
new_file_with_path = os.path.join(path, unicodedata.normalize("NFKD", file.decode("utf-8")).encode("ascii", "ignore").replace(" ", "_"))
|
||||||
|
os.rename(file_with_path, new_file_with_path)
|
||||||
|
|
||||||
return {"pdf": pdfs, "images": images}
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_arguments()
|
args = parse_arguments()
|
||||||
|
|
||||||
# DICT{DICT}
|
normalize_input_filenames(args.input_dir)
|
||||||
# 'pdf' :: {name: path}
|
jobs = analyze_jobs(args.input_dir, args.output_dir, skip_pdf=args.skip_pdf, skip_images=args.skip_images)
|
||||||
# 'images' :: {name: path}
|
wflow = OCRWorkflow(jobs, args.input_dir, args.output_dir, args.lang, args.pdf, args.intermediate, args.nCores, args.mem)
|
||||||
current_path = os.getcwd()
|
retval = wflow.run(mode="local", nCores=args.nCores, memMb=args.mem)
|
||||||
strip_files_of_spaces(args.input_dir, current_path)
|
|
||||||
jobs = analyze_jobs(args.input_dir, image_suffix=args.suffix, skip_pdf=args.skip_pdf, skip_images=args.skip_images)
|
|
||||||
|
|
||||||
wflow = OCRWorkflow(jobs, args.input_dir, args.output_dir, args.suffix, args.lang, args.pdf, args.intermediate,
|
|
||||||
args.skip_bin,
|
|
||||||
args.nCores, args.mem,
|
|
||||||
args.dry_run)
|
|
||||||
retval = wflow.run(mode="local", nCores=args.nCores, memMb=args.mem, isDryRun=args.dry_run)
|
|
||||||
sys.exit(retval)
|
sys.exit(retval)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python3.6
|
#!/usr/bin/env python3.5
|
||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
|
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
@ -6,7 +6,7 @@ from xml.sax.saxutils import escape
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
input_files = sorted(os.listdir(sys.argv[1]))
|
input_files = filter(lambda x: x.endswith(".hocr"), sorted(os.listdir(sys.argv[1])))
|
||||||
output_file = open(sys.argv[2], "w")
|
output_file = open(sys.argv[2], "w")
|
||||||
|
|
||||||
output_file.write('<?xml version="1.0" encoding="UTF-8"?>\n' +
|
output_file.write('<?xml version="1.0" encoding="UTF-8"?>\n' +
|
||||||
@ -37,6 +37,7 @@ for input_file in input_files:
|
|||||||
if not first_word_in_line:
|
if not first_word_in_line:
|
||||||
output_file.write('<lb/>\n')
|
output_file.write('<lb/>\n')
|
||||||
output_file.write(' </p>\n')
|
output_file.write(' </p>\n')
|
||||||
|
|
||||||
output_file.write(' </body>\n' +
|
output_file.write(' </body>\n' +
|
||||||
' </text>\n' +
|
' </text>\n' +
|
||||||
'</TEI>')
|
'</TEI>')
|
||||||
|
Loading…
Reference in New Issue
Block a user