diff --git a/api/apps/chunk_app.py b/api/apps/chunk_app.py
index 247627af5ab099d9e559b96a5850e8bad629c144..0458160b28505b38fe53733b284284d37ba79de6 100644
--- a/api/apps/chunk_app.py
+++ b/api/apps/chunk_app.py
@@ -79,7 +79,7 @@ def list():
         return get_json_result(data=res)
     except Exception as e:
         if str(e).find("not_found") > 0:
-            return get_json_result(data=False, retmsg=f'Index not found!',
+            return get_json_result(data=False, retmsg=f'No chunk found!',
                                    retcode=RetCode.DATA_ERROR)
         return server_error_response(e)
 
@@ -262,6 +262,6 @@ def retrieval_test():
         return get_json_result(data=ranks)
     except Exception as e:
         if str(e).find("not_found") > 0:
-            return get_json_result(data=False, retmsg=f'Index not found!',
+            return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
                                    retcode=RetCode.DATA_ERROR)
         return server_error_response(e)
diff --git a/api/db/services/knowledgebase_service.py b/api/db/services/knowledgebase_service.py
index ce34b72a334617ce0ae7be777c5e23947465c98e..3146cab688e34d338637e453f4041a045de8dd1f 100644
--- a/api/db/services/knowledgebase_service.py
+++ b/api/db/services/knowledgebase_service.py
@@ -44,7 +44,8 @@ class KnowledgebaseService(CommonService):
     def get_detail(cls, kb_id):
         fields = [
             cls.model.id,
-            Tenant.embd_id,
+            #Tenant.embd_id,
+            cls.embd_id,
             cls.model.avatar,
             cls.model.name,
             cls.model.language,
diff --git a/rag/app/manual.py b/rag/app/manual.py
index 284e3d6e59cb367374b8a0f3b0476b314074c35e..bc8e0f17c4ca870cc4e5b581a6a4d961fbf4c7b1 100644
--- a/rag/app/manual.py
+++ b/rag/app/manual.py
@@ -85,7 +85,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
             for t, lvl in pdf_parser.outlines:
                 tks = set([t[i] + t[i + 1] for i in range(len(t) - 1)])
                 tks_ = set([txt[i] + txt[i + 1]
-                           for i in range(min(len(t), len(txt) - 1))])
+                            for i in range(min(len(t), len(txt) - 1))])
                 if len(set(tks & tks_)) / max([len(tks), len(tks_), 1]) > 0.8:
                     levels.append(lvl)
                     break
@@ -109,7 +109,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
     sections = [(txt, sec_ids[i], poss)
                 for i, (txt, _, poss) in enumerate(sections)]
     for (img, rows), poss in tbls:
-        if not rows:continue
+        if not rows: continue
         sections.append((rows if isinstance(rows, str) else rows[0], -1,
                          [(p[0] + 1 - from_page, p[1], p[2], p[3], p[4]) for p in poss]))
 
@@ -125,7 +125,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
     for txt, sec_id, poss in sorted(sections, key=lambda x: (
             x[-1][0][0], x[-1][0][3], x[-1][0][1])):
         poss = "\t".join([tag(*pos) for pos in poss])
-        if tk_cnt < 2048 and (sec_id == last_sid or sec_id == -1):
+        if tk_cnt < 32 or (tk_cnt < 1024 and (sec_id == last_sid or sec_id == -1)):
             if chunks:
                 chunks[-1] += "\n" + txt + poss
                 tk_cnt += num_tokens_from_string(txt)
@@ -143,6 +143,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
 if __name__ == "__main__":
     import sys
 
+
     def dummy(prog=None, msg=""):
         pass
+
+
     chunk(sys.argv[1], callback=dummy)
diff --git a/rag/app/qa.py b/rag/app/qa.py
index 1b42f1a8df92bf9f922078784c756a83abcf5180..a4d49cd74088949d6203d12b1dd2b4d15b1613c6 100644
--- a/rag/app/qa.py
+++ b/rag/app/qa.py
@@ -133,7 +133,7 @@ def chunk(filename, binary=None, lang="Chinese", callback=None, **kwargs):
         return res
 
     raise NotImplementedError(
-        "file type not supported yet(pptx, pdf supported)")
+        "Excel and csv(txt) format files are supported.")
 
 
 if __name__ == "__main__":
diff --git a/rag/nlp/query.py b/rag/nlp/query.py
index 6a829a1d3c92dbd93152c4441318a942c3372d48..26753d72107e1bddd86d3c5926f55f46928e3d0c 100644
--- a/rag/nlp/query.py
+++ b/rag/nlp/query.py
@@ -73,7 +73,7 @@ class EsQueryer:
             return True
 
         qs, keywords = [], []
-        for tt in self.tw.split(txt):  # .split(" "):
+        for tt in self.tw.split(txt)[:256]:  # .split(" "):
             if not tt:
                 continue
             twts = self.tw.weights([tt])