From 8a74594cf551352c37c363abc008a31ef572d35a Mon Sep 17 00:00:00 2001 From: Zncl2222 <3002shinning@gmail.com> Date: Tue, 29 Oct 2024 22:52:34 +0800 Subject: [PATCH 1/3] feat: add support to use custom_prompt in memory.add funciton --- mem0/memory/graph_memory.py | 5 +++-- mem0/memory/main.py | 17 ++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/mem0/memory/graph_memory.py b/mem0/memory/graph_memory.py index 09ad55cfe9..5ca7dca5ff 100644 --- a/mem0/memory/graph_memory.py +++ b/mem0/memory/graph_memory.py @@ -48,7 +48,7 @@ def __init__(self, config): self.user_id = None self.threshold = 0.7 - def add(self, data, filters): + def add(self, data, filters, graph_prompt): """ Adds data to the graph. @@ -60,7 +60,8 @@ def add(self, data, filters): # retrieve the search results search_output = self._search(data, filters) - if self.config.graph_store.custom_prompt: + custom_prompt = graph_prompt if graph_prompt else self.config.graph_store.custom_prompt + if custom_prompt: messages = [ { "role": "system", diff --git a/mem0/memory/main.py b/mem0/memory/main.py index a0d0fbfe3b..6c492c42ff 100644 --- a/mem0/memory/main.py +++ b/mem0/memory/main.py @@ -67,6 +67,7 @@ def add( metadata=None, filters=None, prompt=None, + graph_prompt=None, ): """ Create a new memory. @@ -79,6 +80,7 @@ def add( metadata (dict, optional): Metadata to store with the memory. Defaults to None. filters (dict, optional): Filters to apply to the search. Defaults to None. prompt (str, optional): Prompt to use for memory deduction. Defaults to None. + prompt (str, optional): Prompt to use for graph memory deduction. Defaults to None. Returns: dict: A dictionary containing the result of the memory addition operation. @@ -111,8 +113,8 @@ def add( messages = [{"role": "user", "content": messages}] with concurrent.futures.ThreadPoolExecutor() as executor: - future1 = executor.submit(self._add_to_vector_store, messages, metadata, filters) - future2 = executor.submit(self._add_to_graph, messages, filters) + future1 = executor.submit(self._add_to_vector_store, messages, metadata, filters, prompt) + future2 = executor.submit(self._add_to_graph, messages, filters, graph_prompt) concurrent.futures.wait([future1, future2]) @@ -134,11 +136,12 @@ def add( ) return {"message": "ok"} - def _add_to_vector_store(self, messages, metadata, filters): + def _add_to_vector_store(self, messages, metadata, filters, prompt): parsed_messages = parse_messages(messages) - if self.custom_prompt: - system_prompt = self.custom_prompt + custom_prompt = prompt if prompt else self.custom_prompt + if custom_prompt: + system_prompt = custom_prompt user_prompt = f"Input: {parsed_messages}" else: system_prompt, user_prompt = get_fact_retrieval_messages(parsed_messages) @@ -230,7 +233,7 @@ def _add_to_vector_store(self, messages, metadata, filters): return returned_memories - def _add_to_graph(self, messages, filters): + def _add_to_graph(self, messages, filters, graph_prompt): added_entities = [] if self.api_version == "v1.1" and self.enable_graph: if filters["user_id"]: @@ -242,7 +245,7 @@ def _add_to_graph(self, messages, filters): else: self.graph.user_id = "USER" data = "\n".join([msg["content"] for msg in messages if "content" in msg and msg["role"] != "system"]) - added_entities = self.graph.add(data, filters) + added_entities = self.graph.add(data, filters, graph_prompt) return added_entities From cd354861d7977f51cec7c35ad4137a5e4f55cce4 Mon Sep 17 00:00:00 2001 From: Zncl2222 <3002shinning@gmail.com> Date: Wed, 30 Oct 2024 20:56:59 +0800 Subject: [PATCH 2/3] test: add test cases to add prompt in memory.add --- tests/test_main.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/tests/test_main.py b/tests/test_main.py index a311f854b2..1ca4eba4bd 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -32,14 +32,27 @@ def memory_instance(): return Memory(config) -@pytest.mark.parametrize("version, enable_graph", [("v1.0", False), ("v1.1", True)]) -def test_add(memory_instance, version, enable_graph): +@pytest.mark.parametrize( + "version, enable_graph, custom_prompt", + [ + ("v1.0", False, None), + ("v1.1", True, None), + ("v1.0", False, "CustomPrompt"), + ("v1.1", True, "CustomPrompt"), + ] +) +def test_add(memory_instance, version, enable_graph, custom_prompt): memory_instance.config.version = version memory_instance.enable_graph = enable_graph memory_instance._add_to_vector_store = Mock(return_value=[{"memory": "Test memory", "event": "ADD"}]) memory_instance._add_to_graph = Mock(return_value=[]) - result = memory_instance.add(messages=[{"role": "user", "content": "Test message"}], user_id="test_user") + result = memory_instance.add( + messages=[{"role": "user", "content": "Test message"}], + user_id="test_user", + prompt=custom_prompt, + graph_prompt=custom_prompt + ) assert "results" in result assert result["results"] == [{"memory": "Test memory", "event": "ADD"}] @@ -47,12 +60,12 @@ def test_add(memory_instance, version, enable_graph): assert result["relations"] == [] memory_instance._add_to_vector_store.assert_called_once_with( - [{"role": "user", "content": "Test message"}], {"user_id": "test_user"}, {"user_id": "test_user"} + [{"role": "user", "content": "Test message"}], {"user_id": "test_user"}, {"user_id": "test_user"}, custom_prompt ) # Remove the conditional assertion for _add_to_graph memory_instance._add_to_graph.assert_called_once_with( - [{"role": "user", "content": "Test message"}], {"user_id": "test_user"} + [{"role": "user", "content": "Test message"}], {"user_id": "test_user"}, custom_prompt ) From e2debe3a3e1126ed3769a60449a2920093be919e Mon Sep 17 00:00:00 2001 From: Zncl2222 <3002shinning@gmail.com> Date: Thu, 31 Oct 2024 11:07:25 +0800 Subject: [PATCH 3/3] docs: add documentation for prompt parameter in memory.add function --- docs/features/custom-prompts.mdx | 17 +++++++++++++++++ docs/open-source/graph_memory/features.mdx | 6 ++++++ mem0/memory/main.py | 2 +- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/docs/features/custom-prompts.mdx b/docs/features/custom-prompts.mdx index ff6fe0e67f..7df131f099 100644 --- a/docs/features/custom-prompts.mdx +++ b/docs/features/custom-prompts.mdx @@ -107,3 +107,20 @@ m.add("I like going to hikes", user_id="alice") } ``` + + +## Customizing Prompts per Memory Addition + +In addition to setting a default prompt in the configuration, you can also override prompts for individual memory entries by using the prompt and graph_prompt parameters in m.add(). This allows you to tailor specific entries without changing the overall configuration. + +For example, to add a memory with a custom prompt: + +```python Code +m.add("Yesterday, I ordered a laptop, the order id is 12345", user_id="alice", prompt=custom_prompt) +``` + +You can also use graph_prompt to customize the prompt specifically for graph memory entries: + +```python Code +m.add("Yesterday, I ordered a laptop, the order id is 12345", user_id="alice", graph_prompt=graph_prompt) +``` diff --git a/docs/open-source/graph_memory/features.mdx b/docs/open-source/graph_memory/features.mdx index dd2cc3e352..908f6eac1a 100644 --- a/docs/open-source/graph_memory/features.mdx +++ b/docs/open-source/graph_memory/features.mdx @@ -33,6 +33,12 @@ config = { m = Memory.from_config(config_dict=config) ``` +You can also **override prompts** for individual memory additions by using the `graph_prompt` parameter in `m.add()` + +```python Code +m.add("Yesterday, I ordered a laptop, the order id is 12345", user_id="alice", graph_prompt=graph_prompt) +``` + If you want to use a managed version of Mem0, please check out [Mem0](https://mem0.dev/pd). If you have any questions, please feel free to reach out to us using one of the following methods: diff --git a/mem0/memory/main.py b/mem0/memory/main.py index 6c492c42ff..df0f6e9cf2 100644 --- a/mem0/memory/main.py +++ b/mem0/memory/main.py @@ -80,7 +80,7 @@ def add( metadata (dict, optional): Metadata to store with the memory. Defaults to None. filters (dict, optional): Filters to apply to the search. Defaults to None. prompt (str, optional): Prompt to use for memory deduction. Defaults to None. - prompt (str, optional): Prompt to use for graph memory deduction. Defaults to None. + graph_prompt (str, optional): Prompt to use for graph memory deduction. Defaults to None. Returns: dict: A dictionary containing the result of the memory addition operation.