fix table desc bugs, add positions to chunks (#91)
This commit is contained in:
@@ -13,7 +13,7 @@
|
||||
import copy
|
||||
import re
|
||||
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
|
||||
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table
|
||||
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions
|
||||
from rag.nlp import huqie
|
||||
from deepdoc.parser import PdfParser, DocxParser
|
||||
|
||||
@@ -21,6 +21,7 @@ from deepdoc.parser import PdfParser, DocxParser
|
||||
class Pdf(PdfParser):
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@@ -40,11 +41,11 @@ class Pdf(PdfParser):
|
||||
self._filter_forpages()
|
||||
self._merge_with_same_bullet()
|
||||
callback(0.75, "Text merging finished.")
|
||||
tbls = self._extract_table_figure(True, zoomin, False)
|
||||
tbls = self._extract_table_figure(True, zoomin, False, True)
|
||||
|
||||
callback(0.8, "Text extraction finished")
|
||||
|
||||
return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls
|
||||
return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls, tbl_poss
|
||||
|
||||
|
||||
def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
|
||||
@@ -69,7 +70,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
callback(0.8, "Finish parsing.")
|
||||
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
pdf_parser = Pdf()
|
||||
sections,tbls = pdf_parser(filename if not binary else binary,
|
||||
sections, tbls = pdf_parser(filename if not binary else binary,
|
||||
from_page=from_page, to_page=to_page, callback=callback)
|
||||
elif re.search(r"\.txt$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
@@ -105,7 +106,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
d = copy.deepcopy(doc)
|
||||
ck = "\n".join(ck)
|
||||
if pdf_parser:
|
||||
d["image"] = pdf_parser.crop(ck)
|
||||
d["image"], poss = pdf_parser.crop(ck, need_position=True)
|
||||
add_positions(d, poss)
|
||||
ck = pdf_parser.remove_tag(ck)
|
||||
tokenize(d, ck, eng)
|
||||
res.append(d)
|
||||
|
||||
@@ -15,7 +15,7 @@ import re
|
||||
from io import BytesIO
|
||||
from docx import Document
|
||||
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
||||
make_colon_as_title
|
||||
make_colon_as_title, add_positions
|
||||
from rag.nlp import huqie
|
||||
from deepdoc.parser import PdfParser, DocxParser
|
||||
from rag.settings import cron_logger
|
||||
@@ -49,6 +49,7 @@ class Docx(DocxParser):
|
||||
class Pdf(PdfParser):
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@@ -122,7 +123,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
ck = "\n".join(ck)
|
||||
d = copy.deepcopy(doc)
|
||||
if pdf_parser:
|
||||
d["image"] = pdf_parser.crop(ck)
|
||||
d["image"], poss = pdf_parser.crop(ck, need_position=True)
|
||||
add_positions(d, poss)
|
||||
ck = pdf_parser.remove_tag(ck)
|
||||
tokenize(d, ck, eng)
|
||||
res.append(d)
|
||||
|
||||
@@ -2,7 +2,7 @@ import copy
|
||||
import re
|
||||
|
||||
from api.db import ParserType
|
||||
from rag.nlp import huqie, tokenize, tokenize_table
|
||||
from rag.nlp import huqie, tokenize, tokenize_table, add_positions
|
||||
from deepdoc.parser import PdfParser
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
@@ -14,6 +14,7 @@ class Pdf(PdfParser):
|
||||
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@@ -32,7 +33,7 @@ class Pdf(PdfParser):
|
||||
self._concat_downward(concat_between_pages=False)
|
||||
self._filter_forpages()
|
||||
callback(0.77, "Text merging finished")
|
||||
tbls = self._extract_table_figure(True, zoomin, False)
|
||||
tbls = self._extract_table_figure(True, zoomin, False, True)
|
||||
|
||||
# clean mess
|
||||
for b in self.boxes:
|
||||
@@ -91,7 +92,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
d = copy.deepcopy(doc)
|
||||
ck = "\n".join(chunk)
|
||||
tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english)
|
||||
d["image"] = pdf_parser.crop(ck)
|
||||
d["image"], poss = pdf_parser.crop(ck, need_position=True)
|
||||
add_positions(d, poss)
|
||||
res.append(d)
|
||||
chunk = []
|
||||
tk_cnt = 0
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
import copy
|
||||
import re
|
||||
from rag.app import laws
|
||||
from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table
|
||||
from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table, add_positions
|
||||
from deepdoc.parser import PdfParser
|
||||
from rag.settings import cron_logger
|
||||
|
||||
@@ -21,6 +21,7 @@ from rag.settings import cron_logger
|
||||
class Pdf(PdfParser):
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@@ -39,7 +40,7 @@ class Pdf(PdfParser):
|
||||
self._concat_downward(concat_between_pages=False)
|
||||
self._filter_forpages()
|
||||
callback(0.77, "Text merging finished")
|
||||
tbls = self._extract_table_figure(True, zoomin, False)
|
||||
tbls = self._extract_table_figure(True, zoomin, False, True)
|
||||
|
||||
cron_logger.info("paddle layouts:".format((timer() - start) / (self.total_page + 0.1)))
|
||||
#self._naive_vertical_merge()
|
||||
@@ -95,11 +96,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
|
||||
# wrap up to es documents
|
||||
for ck in cks:
|
||||
if len(ck.strip()) == 0:continue
|
||||
print("--", ck)
|
||||
if not ck:continue
|
||||
d = copy.deepcopy(doc)
|
||||
if pdf_parser:
|
||||
d["image"] = pdf_parser.crop(ck)
|
||||
d["image"], poss = pdf_parser.crop(ck, need_position=True)
|
||||
add_positions(d, poss)
|
||||
ck = pdf_parser.remove_tag(ck)
|
||||
tokenize(d, ck, eng)
|
||||
res.append(d)
|
||||
|
||||
@@ -15,7 +15,7 @@ import re
|
||||
from collections import Counter
|
||||
|
||||
from api.db import ParserType
|
||||
from rag.nlp import huqie, tokenize, tokenize_table
|
||||
from rag.nlp import huqie, tokenize, tokenize_table, add_positions
|
||||
from deepdoc.parser import PdfParser
|
||||
import numpy as np
|
||||
from rag.utils import num_tokens_from_string
|
||||
@@ -28,6 +28,7 @@ class Pdf(PdfParser):
|
||||
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@@ -47,7 +48,7 @@ class Pdf(PdfParser):
|
||||
self._concat_downward(concat_between_pages=False)
|
||||
self._filter_forpages()
|
||||
callback(0.75, "Text merging finished.")
|
||||
tbls = self._extract_table_figure(True, zoomin, False)
|
||||
tbls = self._extract_table_figure(True, zoomin, False, True)
|
||||
|
||||
# clean mess
|
||||
if column_width < self.page_images[0].size[0] / zoomin / 2:
|
||||
@@ -165,7 +166,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
txt = pdf_parser.remove_tag(paper["abstract"])
|
||||
d["important_kwd"] = ["abstract", "总结", "概括", "summary", "summarize"]
|
||||
d["important_tks"] = " ".join(d["important_kwd"])
|
||||
d["image"] = pdf_parser.crop(paper["abstract"])
|
||||
d["image"], poss = pdf_parser.crop(paper["abstract"], need_position=True)
|
||||
add_positions(d, poss)
|
||||
tokenize(d, txt, eng)
|
||||
res.append(d)
|
||||
|
||||
@@ -198,8 +200,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
for p in proj:
|
||||
d = copy.deepcopy(doc)
|
||||
txt += "\n" + pdf_parser.remove_tag(p)
|
||||
d["image"] = pdf_parser.crop(p)
|
||||
tokenize(d, txt)
|
||||
d["image"], poss = pdf_parser.crop(p, need_position=True)
|
||||
add_positions(d, poss)
|
||||
tokenize(d, txt, eng)
|
||||
res.append(d)
|
||||
|
||||
i = 0
|
||||
@@ -210,7 +213,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
d = copy.deepcopy(doc)
|
||||
ck = "\n".join(chunk)
|
||||
tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english)
|
||||
d["image"] = pdf_parser.crop(ck)
|
||||
d["image"], poss = pdf_parser.crop(ck, need_position=True)
|
||||
add_positions(d, poss)
|
||||
res.append(d)
|
||||
chunk = []
|
||||
tk_cnt = 0
|
||||
|
||||
@@ -48,6 +48,7 @@ class Pdf(PdfParser):
|
||||
return False
|
||||
|
||||
def __call__(self, filename, binary=None, from_page=0, to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(filename if not binary else binary, zoomin, from_page, to_page)
|
||||
callback(0.8, "Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)))
|
||||
assert len(self.boxes) == len(self.page_images), "{} vs. {}".format(len(self.boxes), len(self.page_images))
|
||||
@@ -94,9 +95,10 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **k
|
||||
return res
|
||||
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
pdf_parser = Pdf()
|
||||
for txt,img in pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback):
|
||||
for pn, (txt,img) in enumerate(pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback)):
|
||||
d = copy.deepcopy(doc)
|
||||
d["image"] = img
|
||||
d["page_num_obj"] = [pn+1]
|
||||
tokenize(d, txt, pdf_parser.is_english)
|
||||
res.append(d)
|
||||
return res
|
||||
|
||||
@@ -83,17 +83,39 @@ def tokenize(d, t, eng):
|
||||
def tokenize_table(tbls, doc, eng, batch_size=10):
|
||||
res = []
|
||||
# add tables
|
||||
for img, rows in tbls:
|
||||
for (img, rows), poss in tbls:
|
||||
if not rows:continue
|
||||
if isinstance(rows, str):
|
||||
d = copy.deepcopy(doc)
|
||||
r = re.sub(r"<[^<>]{,12}>", "", rows)
|
||||
tokenize(d, r, eng)
|
||||
d["content_with_weight"] = rows
|
||||
d["image"] = img
|
||||
add_positions(d, poss)
|
||||
res.append(d)
|
||||
continue
|
||||
de = "; " if eng else "; "
|
||||
for i in range(0, len(rows), batch_size):
|
||||
d = copy.deepcopy(doc)
|
||||
r = de.join(rows[i:i + batch_size])
|
||||
tokenize(d, r, eng)
|
||||
d["image"] = img
|
||||
add_positions(d, poss)
|
||||
res.append(d)
|
||||
return res
|
||||
|
||||
|
||||
def add_positions(d, poss):
|
||||
if not poss:return
|
||||
d["page_num_int"] = []
|
||||
d["position_int"] = []
|
||||
d["top_int"] = []
|
||||
for pn, left, right, top, bottom in poss:
|
||||
d["page_num_int"].append(pn+1)
|
||||
d["top_int"].append(top)
|
||||
d["position_int"].append((pn+1, left, right, top, bottom))
|
||||
|
||||
|
||||
def remove_contents_table(sections, eng=False):
|
||||
i = 0
|
||||
while i < len(sections):
|
||||
|
||||
@@ -68,17 +68,25 @@ class Dealer:
|
||||
pg = int(req.get("page", 1)) - 1
|
||||
ps = int(req.get("size", 1000))
|
||||
src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id",
|
||||
"image_id", "doc_id", "q_512_vec", "q_768_vec",
|
||||
"image_id", "doc_id", "q_512_vec", "q_768_vec", "position_int",
|
||||
"q_1024_vec", "q_1536_vec", "available_int", "content_with_weight"])
|
||||
|
||||
s = s.query(bqry)[pg * ps:(pg + 1) * ps]
|
||||
s = s.highlight("content_ltks")
|
||||
s = s.highlight("title_ltks")
|
||||
if not qst:
|
||||
s = s.sort(
|
||||
{"create_time": {"order": "desc", "unmapped_type": "date"}},
|
||||
{"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
|
||||
)
|
||||
if not req.get("sort"):
|
||||
s = s.sort(
|
||||
{"create_time": {"order": "desc", "unmapped_type": "date"}},
|
||||
{"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
|
||||
)
|
||||
else:
|
||||
s = s.sort(
|
||||
{"page_num_int": {"order": "asc", "unmapped_type": "float"}},
|
||||
{"top_int": {"order": "asc", "unmapped_type": "float"}},
|
||||
{"create_time": {"order": "desc", "unmapped_type": "date"}},
|
||||
{"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
|
||||
)
|
||||
|
||||
if qst:
|
||||
s = s.highlight_options(
|
||||
@@ -169,7 +177,7 @@ class Dealer:
|
||||
m = {n: d.get(n) for n in flds if d.get(n) is not None}
|
||||
for n, v in m.items():
|
||||
if isinstance(v, type([])):
|
||||
m[n] = "\t".join([str(vv) for vv in v])
|
||||
m[n] = "\t".join([str(vv) if not isinstance(vv, list) else "\t".join([str(vvv) for vvv in vv]) for vv in v])
|
||||
continue
|
||||
if not isinstance(v, type("")):
|
||||
m[n] = str(m[n])
|
||||
|
||||
@@ -48,6 +48,7 @@ from api.utils.file_utils import get_project_base_directory
|
||||
BATCH_SIZE = 64
|
||||
|
||||
FACTORY = {
|
||||
"general": naive,
|
||||
ParserType.NAIVE.value: naive,
|
||||
ParserType.PAPER.value: paper,
|
||||
ParserType.BOOK.value: book,
|
||||
@@ -228,6 +229,8 @@ def main(comm, mod):
|
||||
es_r = ELASTICSEARCH.bulk(cks, search.index_name(r["tenant_id"]))
|
||||
if es_r:
|
||||
callback(-1, "Index failure!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=r["doc_id"]), idxnm=search.index_name(r["tenant_id"]))
|
||||
cron_logger.error(str(es_r))
|
||||
else:
|
||||
if TaskService.do_cancel(r["id"]):
|
||||
|
||||
Reference in New Issue
Block a user