diff --git a/README.md b/README.md index 0325cbecbc40a7f6c1c5e2bc75ea4edf82987a33..c55847d55223d0b9df4ec303edf59c21737f8fc4 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ <img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=7d09f1" alt="license"> </a> </p> -[RagFlow](http://demo.ragflow.io) is a knowledge management platform built on custom-build document understanding engine and LLM, with reasoned and well-founded answers to your question. Clone this repository, you can deploy your own knowledge management platform to empower your business with AI. +[RagFlow](https://demo.ragflow.io) is a knowledge management platform built on custom-build document understanding engine and LLM, with reasoned and well-founded answers to your question. Clone this repository, you can deploy your own knowledge management platform to empower your business with AI. <div align="center" style="margin-top:20px;margin-bottom:20px;"> @@ -56,12 +56,12 @@ Then, you need to check the following command: ```bash -121:/ragflow# sysctl vm.max_map_count +$ sysctl vm.max_map_count vm.max_map_count = 262144 ``` If **vm.max_map_count** is not greater than 65535: ```bash -121:/ragflow# sudo sysctl -w vm.max_map_count=262144 +$ sudo sysctl -w vm.max_map_count=262144 ``` Note that this change is reset after a system reboot. To render your change permanent, add or update the following line in **/etc/sysctl.conf**: @@ -126,6 +126,7 @@ Open your browser, enter the IP address of your server, _**Hallelujah**_ again! <div align="center" style="margin-top:20px;margin-bottom:20px;"> <img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/> </div> + ## đź”§ Configurations If you need to change the default setting of the system when you deploy it. There several ways to configure it. diff --git a/api/apps/llm_app.py b/api/apps/llm_app.py index e8b3dcdd2f0b326ddce6e195f5412be6d8ba73dc..25bcca25942fb8769618a2193d7968bfb62e2790 100644 --- a/api/apps/llm_app.py +++ b/api/apps/llm_app.py @@ -45,7 +45,7 @@ def set_api_key(): for llm in LLMService.query(fid=factory): if llm.model_type == LLMType.EMBEDDING.value: mdl = EmbeddingModel[factory]( - req["api_key"], llm.llm_name) + req["api_key"], llm.llm_name, req.get("base_url")) try: arr, tc = mdl.encode(["Test if the api key is available"]) if len(arr[0]) == 0 or tc == 0: @@ -54,7 +54,7 @@ def set_api_key(): msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e) elif not chat_passed and llm.model_type == LLMType.CHAT.value: mdl = ChatModel[factory]( - req["api_key"], llm.llm_name) + req["api_key"], llm.llm_name, req.get("base_url")) try: m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], { "temperature": 0.9}) @@ -83,7 +83,9 @@ def set_api_key(): llm_factory=factory, llm_name=llm.llm_name, model_type=llm.model_type, - api_key=req["api_key"]) + api_key=req["api_key"], + api_base=req.get("base_url", "") + ) return get_json_result(data=True) diff --git a/api/db/services/llm_service.py b/api/db/services/llm_service.py index f4e2a41c1cde9fed8b696e2026bd4636a7f50dd4..83eb82f162331d45b0bdbb5cf183d449dc79c108 100644 --- a/api/db/services/llm_service.py +++ b/api/db/services/llm_service.py @@ -84,19 +84,21 @@ class TenantLLMService(CommonService): if model_config["llm_factory"] not in EmbeddingModel: return return EmbeddingModel[model_config["llm_factory"]]( - model_config["api_key"], model_config["llm_name"]) + model_config["api_key"], model_config["llm_name"], model_config["api_base"]) if llm_type == LLMType.IMAGE2TEXT.value: if model_config["llm_factory"] not in CvModel: return return CvModel[model_config["llm_factory"]]( - model_config["api_key"], model_config["llm_name"], lang) + model_config["api_key"], model_config["llm_name"], lang, + base_url=model_config["api_base"] + ) if llm_type == LLMType.CHAT.value: if model_config["llm_factory"] not in ChatModel: return return ChatModel[model_config["llm_factory"]]( - model_config["api_key"], model_config["llm_name"]) + model_config["api_key"], model_config["llm_name"], model_config["api_base"]) @classmethod @DB.connection_context() diff --git a/deepdoc/vision/recognizer.py b/deepdoc/vision/recognizer.py index ad8b9ba24fa1310fac7827d7a746b580dc552587..67e096ef2def315e1b51a1891e741491cbee89d2 100644 --- a/deepdoc/vision/recognizer.py +++ b/deepdoc/vision/recognizer.py @@ -43,6 +43,8 @@ class Recognizer(object): if not os.path.exists(model_file_path): model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc") model_file_path = os.path.join(model_dir, task_name + ".onnx") + else: + model_file_path = os.path.join(model_dir, task_name + ".onnx") if not os.path.exists(model_file_path): raise ValueError("not find model file path {}".format( diff --git a/rag/llm/chat_model.py b/rag/llm/chat_model.py index e44af5360c7513759f9a69dd756462d42449d291..af3bbac33f76c13aece7f15f9189b3aedea1e241 100644 --- a/rag/llm/chat_model.py +++ b/rag/llm/chat_model.py @@ -31,8 +31,9 @@ class Base(ABC): class GptTurbo(Base): - def __init__(self, key, model_name="gpt-3.5-turbo"): - self.client = OpenAI(api_key=key) + def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"): + if not base_url: base_url="https://api.openai.com/v1" + self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name def chat(self, system, history, gen_conf): @@ -53,9 +54,10 @@ class GptTurbo(Base): class MoonshotChat(GptTurbo): - def __init__(self, key, model_name="moonshot-v1-8k"): + def __init__(self, key, model_name="moonshot-v1-8k", base_url="https://api.moonshot.cn/v1"): + if not base_url: base_url="https://api.moonshot.cn/v1" self.client = OpenAI( - api_key=key, base_url="https://api.moonshot.cn/v1",) + api_key=key, base_url=base_url) self.model_name = model_name def chat(self, system, history, gen_conf): @@ -76,7 +78,7 @@ class MoonshotChat(GptTurbo): class QWenChat(Base): - def __init__(self, key, model_name=Generation.Models.qwen_turbo): + def __init__(self, key, model_name=Generation.Models.qwen_turbo, **kwargs): import dashscope dashscope.api_key = key self.model_name = model_name @@ -105,7 +107,7 @@ class QWenChat(Base): class ZhipuChat(Base): - def __init__(self, key, model_name="glm-3-turbo"): + def __init__(self, key, model_name="glm-3-turbo", **kwargs): self.client = ZhipuAI(api_key=key) self.model_name = model_name @@ -154,7 +156,7 @@ class LocalLLM(Base): return do_rpc - def __init__(self, key, model_name="glm-3-turbo"): + def __init__(self, **kwargs): self.client = LocalLLM.RPCProxy("127.0.0.1", 7860) def chat(self, system, history, gen_conf): diff --git a/rag/llm/cv_model.py b/rag/llm/cv_model.py index cb89509acbbb8759cbbe38b53c7f7679036a0978..61b942cdef72aec227d36f2fdad22889d081f5a6 100644 --- a/rag/llm/cv_model.py +++ b/rag/llm/cv_model.py @@ -67,8 +67,9 @@ class Base(ABC): class GptV4(Base): - def __init__(self, key, model_name="gpt-4-vision-preview", lang="Chinese"): - self.client = OpenAI(api_key=key) + def __init__(self, key, model_name="gpt-4-vision-preview", lang="Chinese", base_url="https://api.openai.com/v1"): + if not base_url: base_url="https://api.openai.com/v1" + self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name self.lang = lang @@ -84,7 +85,7 @@ class GptV4(Base): class QWenCV(Base): - def __init__(self, key, model_name="qwen-vl-chat-v1", lang="Chinese"): + def __init__(self, key, model_name="qwen-vl-chat-v1", lang="Chinese", **kwargs): import dashscope dashscope.api_key = key self.model_name = model_name @@ -123,7 +124,7 @@ class QWenCV(Base): class Zhipu4V(Base): - def __init__(self, key, model_name="glm-4v", lang="Chinese"): + def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs): self.client = ZhipuAI(api_key=key) self.model_name = model_name self.lang = lang @@ -140,7 +141,7 @@ class Zhipu4V(Base): class LocalCV(Base): - def __init__(self, key, model_name="glm-4v", lang="Chinese"): + def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs): pass def describe(self, image, max_tokens=1024): diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index 169a4df836f8328eedab6cc72c743030d0985798..06a573d5a47e0f707b4cabb6852bb770656be385 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -51,7 +51,7 @@ class Base(ABC): class HuEmbedding(Base): - def __init__(self, key="", model_name=""): + def __init__(self, **kwargs): """ If you have trouble downloading HuggingFace models, -_^ this might help!! @@ -81,8 +81,9 @@ class HuEmbedding(Base): class OpenAIEmbed(Base): - def __init__(self, key, model_name="text-embedding-ada-002"): - self.client = OpenAI(api_key=key) + def __init__(self, key, model_name="text-embedding-ada-002", base_url="https://api.openai.com/v1"): + if not base_url: base_url="https://api.openai.com/v1" + self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name def encode(self, texts: list, batch_size=32): @@ -98,7 +99,7 @@ class OpenAIEmbed(Base): class QWenEmbed(Base): - def __init__(self, key, model_name="text_embedding_v2"): + def __init__(self, key, model_name="text_embedding_v2", **kwargs): dashscope.api_key = key self.model_name = model_name @@ -131,7 +132,7 @@ class QWenEmbed(Base): class ZhipuEmbed(Base): - def __init__(self, key, model_name="embedding-2"): + def __init__(self, key, model_name="embedding-2", **kwargs): self.client = ZhipuAI(api_key=key) self.model_name = model_name diff --git a/rag/svr/task_executor.py b/rag/svr/task_executor.py index 517d8a2fe39a74ef5d256a0ed4c3334150650973..79186487d76efa0dc7e4fe00ecd163d7ea29bb52 100644 --- a/rag/svr/task_executor.py +++ b/rag/svr/task_executor.py @@ -280,4 +280,5 @@ if __name__ == "__main__": from mpi4py import MPI comm = MPI.COMM_WORLD - main(int(sys.argv[2]), int(sys.argv[1])) + while True: + main(int(sys.argv[2]), int(sys.argv[1]))