From e876f58b4c98d3ed8b31c3a736b7adc7ea3b9f5e Mon Sep 17 00:00:00 2001 From: KevinHuSh <kevinhu.sh@gmail.com> Date: Fri, 29 Mar 2024 14:38:15 +0800 Subject: [PATCH] refine readme (#170) --- README.md | 24 ++++++++++++------------ api/db/services/llm_service.py | 4 ++-- rag/llm/embedding_model.py | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 0d898e8..7bf4382 100644 --- a/README.md +++ b/README.md @@ -47,19 +47,19 @@ ## 🤺RagFlow vs. other RAG applications -| Feature | RagFlow | Langchain-Chatchat | Dify.AI | Assistants API | QAnythig | LangChain | -|---------|:---------:|:----------------:|:-----------:|:-----------:|:-----------:|:-----------:| -| **Well-Founded Answer** | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | -| **Trackable Chunking** | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | -| **Chunking Method** | Rich Variety | Naive | Naive | Naive | Naive | Naive | -| **Table Structure Recognition** | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | +| Feature | RagFlow | Langchain-Chatchat | Assistants API | QAnythig | LangChain | +|---------|:---------:|:----------------:|:-----------:|:-----------:|:-----------:| +| **Well-Founded Answer** | :white_check_mark: | :x: | :x: | :x: | :x: | +| **Trackable Chunking** | :white_check_mark: | :x: | :x: | :x: | :x: | +| **Chunking Method** | Rich Variety | Naive | Naive | | Naive | Naive | +| **Table Structure Recognition** | :white_check_mark: | :x: | | :x: | :x: | :x: | | **Structured Data Lookup** | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | -| **Programming Approach** | API-oriented | API-oriented | API-oriented | API-oriented | API-oriented | Python Code-oriented | -| **RAG Engine** | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | -| **Prompt IDE** | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | -| **Supported LLMs** | Rich Variety | Rich Variety | Rich Variety | OpenAI-only | QwenLLM | Rich Variety | -| **Local Deployment** | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | -| **Ecosystem Strategy** | Open Source | Open Source | Open Source | Close Source | Open Source | Open Source | +| **Programming Approach** | API-oriented | API-oriented | API-oriented | API-oriented | Python Code-oriented | +| **RAG Engine** | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | +| **Prompt IDE** | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | +| **Supported LLMs** | Rich Variety | Rich Variety | OpenAI-only | QwenLLM | Rich Variety | +| **Local Deployment** | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | +| **Ecosystem Strategy** | Open Source | Open Source | Close Source | Open Source | Open Source | ## 🔎 System Architecture diff --git a/api/db/services/llm_service.py b/api/db/services/llm_service.py index 83eb82f..0e9e774 100644 --- a/api/db/services/llm_service.py +++ b/api/db/services/llm_service.py @@ -84,7 +84,7 @@ class TenantLLMService(CommonService): if model_config["llm_factory"] not in EmbeddingModel: return return EmbeddingModel[model_config["llm_factory"]]( - model_config["api_key"], model_config["llm_name"], model_config["api_base"]) + model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"]) if llm_type == LLMType.IMAGE2TEXT.value: if model_config["llm_factory"] not in CvModel: @@ -98,7 +98,7 @@ class TenantLLMService(CommonService): if model_config["llm_factory"] not in ChatModel: return return ChatModel[model_config["llm_factory"]]( - model_config["api_key"], model_config["llm_name"], model_config["api_base"]) + model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"]) @classmethod @DB.connection_context() diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index 06a573d..6ee3a58 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -51,7 +51,7 @@ class Base(ABC): class HuEmbedding(Base): - def __init__(self, **kwargs): + def __init__(self, *args, **kwargs): """ If you have trouble downloading HuggingFace models, -_^ this might help!! -- GitLab