From 9da671b951eb21498ed0b84a2be49545f9326967 Mon Sep 17 00:00:00 2001
From: KevinHuSh <kevinhu.sh@gmail.com>
Date: Tue, 19 Mar 2024 12:26:04 +0800
Subject: [PATCH] refine manul parser (#131)

---
 README.md                    |  2 +-
 api/apps/conversation_app.py |  2 +
 api/apps/llm_app.py          |  2 +-
 api/db/init_data.py          |  4 +-
 api/settings.py              |  4 +-
 deepdoc/parser/pdf_parser.py |  6 ++-
 deepdoc/vision/ocr.py        | 34 +++++++++++++-
 deepdoc/vision/recognizer.py |  2 +-
 rag/app/manual.py            | 89 ++++++++++++++++++++++++------------
 rag/app/naive.py             |  5 +-
 rag/nlp/__init__.py          | 38 ++++++++++++---
 rag/nlp/query.py             | 12 ++---
 rag/nlp/search.py            |  1 +
 13 files changed, 147 insertions(+), 54 deletions(-)

diff --git a/README.md b/README.md
index b59a4ad..d720f75 100644
--- a/README.md
+++ b/README.md
@@ -50,7 +50,7 @@ platform to empower your business with AI.
 
 # Release Notification
 **Star us on GitHub, and be notified for a new releases instantly!**
-![star-us](https://github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f)
+![star-us](https://github.com/infiniflow/ragflow/assets/12318111/2c2fbb5e-c403-496f-a1fd-64ba0fdbf74f)
 
 # Installation
 ## System Requirements
diff --git a/api/apps/conversation_app.py b/api/apps/conversation_app.py
index 9ee201f..23db038 100644
--- a/api/apps/conversation_app.py
+++ b/api/apps/conversation_app.py
@@ -274,6 +274,8 @@ def use_sql(question, field_map, tenant_id, chat_mdl):
         return retrievaler.sql_retrieval(sql, format="json"), sql
 
     tbl, sql = get_table()
+    if tbl is None:
+        return None, None
     if tbl.get("error") and tried_times <= 2:
         user_promt = """
         表名:{};
diff --git a/api/apps/llm_app.py b/api/apps/llm_app.py
index d6a7b81..6ed3bc3 100644
--- a/api/apps/llm_app.py
+++ b/api/apps/llm_app.py
@@ -107,7 +107,7 @@ def list():
         llms = LLMService.get_all()
         llms = [m.to_dict() for m in llms if m.status == StatusEnum.VALID.value]
         for m in llms:
-            m["available"] = m["fid"] in facts
+            m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding"
 
         res = {}
         for m in llms:
diff --git a/api/db/init_data.py b/api/db/init_data.py
index 5696cd8..a930fb4 100644
--- a/api/db/init_data.py
+++ b/api/db/init_data.py
@@ -227,7 +227,7 @@ def init_llm_factory():
             "model_type": LLMType.CHAT.value
         }, {
             "fid": factory_infos[3]["name"],
-            "llm_name": "flag-enbedding",
+            "llm_name": "flag-embedding",
             "tags": "TEXT EMBEDDING,",
             "max_tokens": 128 * 1000,
             "model_type": LLMType.EMBEDDING.value
@@ -241,7 +241,7 @@ def init_llm_factory():
             "model_type": LLMType.CHAT.value
         }, {
             "fid": factory_infos[4]["name"],
-            "llm_name": "flag-enbedding",
+            "llm_name": "flag-embedding",
             "tags": "TEXT EMBEDDING,",
             "max_tokens": 128 * 1000,
             "model_type": LLMType.EMBEDDING.value
diff --git a/api/settings.py b/api/settings.py
index ee3445c..b2fe8d8 100644
--- a/api/settings.py
+++ b/api/settings.py
@@ -72,13 +72,13 @@ default_llm = {
     },
     "Local": {
         "chat_model": "qwen-14B-chat",
-        "embedding_model": "flag-enbedding",
+        "embedding_model": "flag-embedding",
         "image2text_model": "",
         "asr_model": "",
     },
     "Moonshot": {
         "chat_model": "moonshot-v1-8k",
-        "embedding_model": "flag-enbedding",
+        "embedding_model": "",
         "image2text_model": "",
         "asr_model": "",
     }
diff --git a/deepdoc/parser/pdf_parser.py b/deepdoc/parser/pdf_parser.py
index fdfe6a2..8aaa7dc 100644
--- a/deepdoc/parser/pdf_parser.py
+++ b/deepdoc/parser/pdf_parser.py
@@ -247,7 +247,7 @@ class HuParser:
                 b["SP"] = ii
 
     def __ocr(self, pagenum, img, chars, ZM=3):
-        bxs = self.ocr(np.array(img))
+        bxs = self.ocr.detect(np.array(img))
         if not bxs:
             self.boxes.append([])
             return
@@ -278,8 +278,10 @@ class HuParser:
 
         for b in bxs:
             if not b["text"]:
-                b["text"] = b["txt"]
+                left, right, top, bott = b["x0"]*ZM, b["x1"]*ZM, b["top"]*ZM, b["bottom"]*ZM
+                b["text"] = self.ocr.recognize(np.array(img), np.array([[left, top], [right, top], [right, bott], [left, bott]], dtype=np.float32))
             del b["txt"]
+        bxs = [b for b in bxs if b["text"]]
         if self.mean_height[-1] == 0:
             self.mean_height[-1] = np.median([b["bottom"] - b["top"]
                                               for b in bxs])
diff --git a/deepdoc/vision/ocr.py b/deepdoc/vision/ocr.py
index 5025219..e41653d 100644
--- a/deepdoc/vision/ocr.py
+++ b/deepdoc/vision/ocr.py
@@ -69,7 +69,7 @@ def load_model(model_dir, nm):
     options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
     options.intra_op_num_threads = 2
     options.inter_op_num_threads = 2
-    if ort.get_device() == "GPU":
+    if False and ort.get_device() == "GPU":
         sess = ort.InferenceSession(model_file_path, options=options, providers=['CUDAExecutionProvider'])
     else:
         sess = ort.InferenceSession(model_file_path, options=options, providers=['CPUExecutionProvider'])
@@ -366,7 +366,7 @@ class TextDetector(object):
                 'keep_keys': ['image', 'shape']
             }
         }]
-        postprocess_params = {"name": "DBPostProcess", "thresh": 0.3, "box_thresh": 0.6, "max_candidates": 1000,
+        postprocess_params = {"name": "DBPostProcess", "thresh": 0.3, "box_thresh": 0.5, "max_candidates": 1000,
                               "unclip_ratio": 1.5, "use_dilation": False, "score_mode": "fast", "box_type": "quad"}
 
         self.postprocess_op = build_post_process(postprocess_params)
@@ -534,6 +534,34 @@ class OCR(object):
                     break
         return _boxes
 
+    def detect(self, img):
+        time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
+
+        if img is None:
+            return None, None, time_dict
+
+        start = time.time()
+        dt_boxes, elapse = self.text_detector(img)
+        time_dict['det'] = elapse
+
+        if dt_boxes is None:
+            end = time.time()
+            time_dict['all'] = end - start
+            return None, None, time_dict
+        else:
+            cron_logger.debug("dt_boxes num : {}, elapsed : {}".format(
+                len(dt_boxes), elapse))
+
+        return zip(self.sorted_boxes(dt_boxes), [("",0) for _ in range(len(dt_boxes))])
+
+    def recognize(self, ori_im, box):
+        img_crop = self.get_rotate_crop_image(ori_im, box)
+
+        rec_res, elapse = self.text_recognizer([img_crop])
+        text, score = rec_res[0]
+        if score < self.drop_score:return ""
+        return text
+
     def __call__(self, img, cls=True):
         time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
 
@@ -562,6 +590,7 @@ class OCR(object):
             img_crop_list.append(img_crop)
 
         rec_res, elapse = self.text_recognizer(img_crop_list)
+
         time_dict['rec'] = elapse
         cron_logger.debug("rec_res num  : {}, elapsed : {}".format(
             len(rec_res), elapse))
@@ -575,6 +604,7 @@ class OCR(object):
         end = time.time()
         time_dict['all'] = end - start
 
+
         #for bno in range(len(img_crop_list)):
         #    print(f"{bno}, {rec_res[bno]}")
 
diff --git a/deepdoc/vision/recognizer.py b/deepdoc/vision/recognizer.py
index 33adff2..1de3cd5 100644
--- a/deepdoc/vision/recognizer.py
+++ b/deepdoc/vision/recognizer.py
@@ -41,7 +41,7 @@ class Recognizer(object):
         if not os.path.exists(model_file_path):
             raise ValueError("not find model file path {}".format(
                 model_file_path))
-        if ort.get_device() == "GPU":
+        if False and ort.get_device() == "GPU":
             options = ort.SessionOptions()
             options.enable_cpu_mem_arena = False
             self.ort_sess = ort.InferenceSession(model_file_path, options=options, providers=[('CUDAExecutionProvider')])
diff --git a/rag/app/manual.py b/rag/app/manual.py
index 6effc3d..018a3b7 100644
--- a/rag/app/manual.py
+++ b/rag/app/manual.py
@@ -2,7 +2,7 @@ import copy
 import re
 
 from api.db import ParserType
-from rag.nlp import huqie, tokenize, tokenize_table, add_positions
+from rag.nlp import huqie, tokenize, tokenize_table, add_positions, bullets_category, title_frequency
 from deepdoc.parser import PdfParser
 from rag.utils import num_tokens_from_string
 
@@ -14,6 +14,8 @@ class Pdf(PdfParser):
 
     def __call__(self, filename, binary=None, from_page=0,
                  to_page=100000, zoomin=3, callback=None):
+        from timeit import default_timer as timer
+        start = timer()
         callback(msg="OCR is  running...")
         self.__images__(
             filename if not binary else binary,
@@ -23,19 +25,38 @@ class Pdf(PdfParser):
             callback
         )
         callback(msg="OCR finished.")
+        #for bb in self.boxes:
+        #    for b in bb:
+        #        print(b)
+        print("OCR:", timer()-start)
+
+        def get_position(bx):
+            poss = []
+            pn = bx["page_number"]
+            top = bx["top"] - self.page_cum_height[pn - 1]
+            bott = bx["bottom"] - self.page_cum_height[pn - 1]
+            poss.append((pn, bx["x0"], bx["x1"], top, min(bott, self.page_images[pn-1].size[1]/zoomin)))
+            while bott * zoomin > self.page_images[pn - 1].size[1]:
+                bott -= self.page_images[pn- 1].size[1] / zoomin
+                top = 0
+                pn += 1
+                poss.append((pn, bx["x0"], bx["x1"], top, min(bott, self.page_images[pn - 1].size[1] / zoomin)))
+            return poss
+
+        def tag(pn, left, right, top, bottom):
+            return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##" \
+                .format(pn, left, right, top, bottom)
 
-        from timeit import default_timer as timer
-        start = timer()
         self._layouts_rec(zoomin)
         callback(0.65, "Layout analysis finished.")
         print("paddle layouts:", timer() - start)
         self._table_transformer_job(zoomin)
         callback(0.67, "Table analysis finished.")
         self._text_merge()
-        self._concat_downward(concat_between_pages=False)
+        tbls = self._extract_table_figure(True, zoomin, True, True)
+        self._naive_vertical_merge()
         self._filter_forpages()
         callback(0.68, "Text merging finished")
-        tbls = self._extract_table_figure(True, zoomin, True, True)
 
         # clean mess
         for b in self.boxes:
@@ -44,25 +65,33 @@ class Pdf(PdfParser):
         # merge chunks with the same bullets
         self._merge_with_same_bullet()
 
-        # merge title with decent chunk
-        i = 0
-        while i + 1 < len(self.boxes):
-            b = self.boxes[i]
-            if b.get("layoutno","").find("title") < 0:
-                i += 1
-                continue
-            b_ = self.boxes[i + 1]
-            b_["text"] = b["text"] + "\n" + b_["text"]
-            b_["x0"] = min(b["x0"], b_["x0"])
-            b_["x1"] = max(b["x1"], b_["x1"])
-            b_["top"] = b["top"]
-            self.boxes.pop(i)
-
-        callback(0.8, "Parsing finished")
-        for b in self.boxes: print(b["text"], b.get("layoutno"))
-
-        print(tbls)
-        return [b["text"] + self._line_tag(b, zoomin) for b in self.boxes], tbls
+        # set pivot using the most frequent type of title,
+        # then merge between 2 pivot
+        bull = bullets_category([b["text"] for b in self.boxes])
+        most_level, levels = title_frequency(bull, [(b["text"], b.get("layout_no","")) for b in self.boxes])
+        assert len(self.boxes) == len(levels)
+        sec_ids = []
+        sid = 0
+        for i, lvl in enumerate(levels):
+            if lvl <= most_level: sid += 1
+            sec_ids.append(sid)
+            #print(lvl, self.boxes[i]["text"], most_level)
+
+        sections = [(b["text"], sec_ids[i], get_position(b)) for i, b in enumerate(self.boxes)]
+        for (img, rows), poss in tbls:
+            sections.append((rows[0], -1, [(p[0]+1, p[1], p[2], p[3], p[4]) for p in poss]))
+
+        chunks = []
+        last_sid = -2
+        for txt, sec_id, poss in sorted(sections, key=lambda x: (x[-1][0][0], x[-1][0][3], x[-1][0][1])):
+            poss = "\t".join([tag(*pos) for pos in poss])
+            if sec_id == last_sid or sec_id == -1:
+                if chunks:
+                    chunks[-1] += "\n" + txt + poss
+                    continue
+            chunks.append(txt + poss)
+            if sec_id >-1: last_sid = sec_id
+        return chunks
 
 
 def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
@@ -73,7 +102,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
 
     if re.search(r"\.pdf$", filename, re.IGNORECASE):
         pdf_parser = Pdf()
-        cks, tbls = pdf_parser(filename if not binary else binary,
+        cks = pdf_parser(filename if not binary else binary,
                            from_page=from_page, to_page=to_page, callback=callback)
     else: raise NotImplementedError("file type not supported yet(pdf supported)")
     doc = {
@@ -84,16 +113,15 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
     # is it English
     eng = lang.lower() == "english"#pdf_parser.is_english
 
-    res = tokenize_table(tbls, doc, eng)
-
     i = 0
     chunk = []
     tk_cnt = 0
+    res = []
     def add_chunk():
         nonlocal chunk, res, doc, pdf_parser, tk_cnt
         d = copy.deepcopy(doc)
         ck = "\n".join(chunk)
-        tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english)
+        tokenize(d, pdf_parser.remove_tag(ck), eng)
         d["image"], poss = pdf_parser.crop(ck, need_position=True)
         add_positions(d, poss)
         res.append(d)
@@ -101,7 +129,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
         tk_cnt = 0
 
     while i < len(cks):
-        if tk_cnt > 128: add_chunk()
+        if tk_cnt > 256: add_chunk()
         txt = cks[i]
         txt_ = pdf_parser.remove_tag(txt)
         i += 1
@@ -109,6 +137,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
         chunk.append(txt)
         tk_cnt += cnt
     if chunk: add_chunk()
+
     for i, d in enumerate(res):
         print(d)
         # d["image"].save(f"./logs/{i}.jpg")
@@ -117,6 +146,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
 
 if __name__ == "__main__":
     import sys
-    def dummy(a, b):
+    def dummy(prog=None, msg=""):
         pass
     chunk(sys.argv[1], callback=dummy)
diff --git a/rag/app/naive.py b/rag/app/naive.py
index 82ecfca..cc48f5f 100644
--- a/rag/app/naive.py
+++ b/rag/app/naive.py
@@ -100,7 +100,10 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
         print("--", ck)
         d = copy.deepcopy(doc)
         if pdf_parser:
-            d["image"], poss = pdf_parser.crop(ck, need_position=True)
+            try:
+                d["image"], poss = pdf_parser.crop(ck, need_position=True)
+            except Exception as e:
+                continue
             add_positions(d, poss)
             ck = pdf_parser.remove_tag(ck)
         tokenize(d, ck, eng)
diff --git a/rag/nlp/__init__.py b/rag/nlp/__init__.py
index b027ac7..95e8308 100644
--- a/rag/nlp/__init__.py
+++ b/rag/nlp/__init__.py
@@ -1,4 +1,6 @@
 import random
+from collections import Counter
+
 from rag.utils import num_tokens_from_string
 from . import huqie
 from nltk import word_tokenize
@@ -175,6 +177,36 @@ def make_colon_as_title(sections):
         i += 1
 
 
+def title_frequency(bull, sections):
+    bullets_size = len(BULLET_PATTERN[bull])
+    levels = [bullets_size+1 for _ in range(len(sections))]
+    if not sections or bull < 0:
+        return bullets_size+1, levels
+
+    for i, (txt, layout) in enumerate(sections):
+        for j, p in enumerate(BULLET_PATTERN[bull]):
+            if re.match(p, txt.strip()):
+                levels[i] = j
+                break
+        else:
+            if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
+                levels[i] = bullets_size
+    most_level = bullets_size+1
+    for l, c in sorted(Counter(levels).items(), key=lambda x:x[1]*-1):
+        if l <= bullets_size:
+            most_level = l
+            break
+    return most_level, levels
+
+
+def not_title(txt):
+    if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
+        return False
+    if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
+        return True
+    return re.search(r"[,;,。;!!]", txt)
+
+
 def hierarchical_merge(bull, sections, depth):
     if not sections or bull < 0:
         return []
@@ -185,12 +217,6 @@ def hierarchical_merge(bull, sections, depth):
     bullets_size = len(BULLET_PATTERN[bull])
     levels = [[] for _ in range(bullets_size + 2)]
 
-    def not_title(txt):
-        if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
-            return False
-        if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
-            return True
-        return re.search(r"[,;,。;!!]", txt)
 
     for i, (txt, layout) in enumerate(sections):
         for j, p in enumerate(BULLET_PATTERN[bull]):
diff --git a/rag/nlp/query.py b/rag/nlp/query.py
index 7486547..3d944b3 100644
--- a/rag/nlp/query.py
+++ b/rag/nlp/query.py
@@ -38,7 +38,7 @@ class EsQueryer:
             "",
             txt)
         return re.sub(
-            r"(what|who|how|which|where|why|(is|are|were|was) there) (is|are|were|was)*", "", txt, re.IGNORECASE)
+            r"(what|who|how|which|where|why|(is|are|were|was) there) (is|are|were|was|to)*", "", txt, re.IGNORECASE)
 
     def question(self, txt, tbl="qa", min_match="60%"):
         txt = re.sub(
@@ -50,16 +50,16 @@ class EsQueryer:
         txt = EsQueryer.rmWWW(txt)
 
         if not self.isChinese(txt):
-            tks = txt.split(" ")
-            q = []
+            tks = [t for t in txt.split(" ") if t.strip()]
+            q = tks
             for i in range(1, len(tks)):
-                q.append("\"%s %s\"~2" % (tks[i - 1], tks[i]))
+                q.append("\"%s %s\"^2" % (tks[i - 1], tks[i]))
             if not q:
                 q.append(txt)
             return Q("bool",
                      must=Q("query_string", fields=self.flds,
                             type="best_fields", query=" OR ".join(q),
-                            boost=1, minimum_should_match="60%")
+                            boost=1, minimum_should_match=min_match)
                      ), txt.split(" ")
 
         def needQieqie(tk):
@@ -147,7 +147,7 @@ class EsQueryer:
         atks = toDict(atks)
         btkss = [toDict(tks) for tks in btkss]
         tksim = [self.similarity(atks, btks) for btks in btkss]
-        return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, sims[0], tksim
+        return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, tksim, sims[0]
 
     def similarity(self, qtwt, dtwt):
         if isinstance(dtwt, type("")):
diff --git a/rag/nlp/search.py b/rag/nlp/search.py
index 422d23f..6b99165 100644
--- a/rag/nlp/search.py
+++ b/rag/nlp/search.py
@@ -119,6 +119,7 @@ class Dealer:
             s["knn"]["filter"] = bqry.to_dict()
             s["knn"]["similarity"] = 0.17
             res = self.es.search(s, idxnm=idxnm, timeout="600s", src=src)
+            es_logger.info("【Q】: {}".format(json.dumps(s)))
 
         kwds = set([])
         for k in keywords:
-- 
GitLab