-
Notifications
You must be signed in to change notification settings - Fork 642
Expand file tree
/
Copy pathfeedback.py
More file actions
1154 lines (1029 loc) · 45.9 KB
/
feedback.py
File metadata and controls
1154 lines (1029 loc) · 45.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import concurrent.futures
import difflib
import json
import re
from datetime import datetime
from typing import TYPE_CHECKING, Any, Literal
from tenacity import retry, stop_after_attempt, wait_random_exponential
from memos.configs.memory import MemFeedbackConfig
from memos.context.context import ContextThreadPoolExecutor
from memos.dependency import require_python_package
from memos.embedders.factory import EmbedderFactory, OllamaEmbedder
from memos.graph_dbs.factory import GraphStoreFactory, PolarDBGraphDB
from memos.llms.factory import AzureLLM, LLMFactory, OllamaLLM, OpenAILLM
from memos.log import get_logger
from memos.mem_feedback.base import BaseMemFeedback
from memos.mem_feedback.utils import (
extract_bracket_content,
extract_square_brackets_content,
general_split_into_chunks,
make_mem_item,
should_keep_update,
split_into_chunks,
)
from memos.mem_reader.factory import MemReaderFactory
from memos.mem_reader.read_multi_modal import detect_lang
from memos.memories.textual.item import TextualMemoryItem
from memos.memories.textual.tree_text_memory.organize.manager import (
MemoryManager,
extract_working_binding_ids,
)
from memos.memories.textual.tree_text_memory.retrieve.retrieve_utils import StopwordManager
if TYPE_CHECKING:
from memos.memories.textual.tree_text_memory.retrieve.searcher import Searcher
from memos.templates.mem_feedback_prompts import (
FEEDBACK_ANSWER_PROMPT,
FEEDBACK_ANSWER_PROMPT_ZH,
FEEDBACK_JUDGEMENT_PROMPT,
FEEDBACK_JUDGEMENT_PROMPT_ZH,
KEYWORDS_REPLACE,
KEYWORDS_REPLACE_ZH,
OPERATION_UPDATE_JUDGEMENT,
OPERATION_UPDATE_JUDGEMENT_ZH,
UPDATE_FORMER_MEMORIES,
UPDATE_FORMER_MEMORIES_ZH,
)
from memos.types import MessageDict
FEEDBACK_PROMPT_DICT = {
"if_kw_replace": {"en": KEYWORDS_REPLACE, "zh": KEYWORDS_REPLACE_ZH},
"judge": {"en": FEEDBACK_JUDGEMENT_PROMPT, "zh": FEEDBACK_JUDGEMENT_PROMPT_ZH},
"compare": {"en": UPDATE_FORMER_MEMORIES, "zh": UPDATE_FORMER_MEMORIES_ZH},
"compare_judge": {"en": OPERATION_UPDATE_JUDGEMENT, "zh": OPERATION_UPDATE_JUDGEMENT_ZH},
"generation": {"en": FEEDBACK_ANSWER_PROMPT, "zh": FEEDBACK_ANSWER_PROMPT_ZH},
}
logger = get_logger(__name__)
class MemFeedback(BaseMemFeedback):
def __init__(self, config: MemFeedbackConfig):
"""
Initialize the MemFeedback with configuration.
Args:
config: Configuration object for the MemFeedback
"""
self.config = config
self.llm: OpenAILLM | OllamaLLM | AzureLLM = LLMFactory.from_config(config.extractor_llm)
self.embedder: OllamaEmbedder = EmbedderFactory.from_config(config.embedder)
self.graph_store: PolarDBGraphDB = GraphStoreFactory.from_config(config.graph_db)
# Pass graph_store to mem_reader for recall operations (deduplication, conflict detection)
self.mem_reader = MemReaderFactory.from_config(config.mem_reader, graph_db=self.graph_store)
self.is_reorganize = config.reorganize
self.memory_manager: MemoryManager = MemoryManager(
self.graph_store,
self.embedder,
self.llm,
memory_size=config.memory_size
or {
"WorkingMemory": 20,
"LongTermMemory": 1500,
"UserMemory": 480,
},
is_reorganize=self.is_reorganize,
)
self.stopword_manager = StopwordManager
self.searcher: Searcher = None
self.reranker = None
self.pref_feedback: bool = False
self.DB_IDX_READY = False
@require_python_package(
import_name="jieba",
install_command="pip install jieba",
install_link="https://github.com/fxsjy/jieba",
)
def _tokenize_chinese(self, text):
"""split zh jieba"""
import jieba
tokens = jieba.lcut(text)
tokens = [token.strip() for token in tokens if token.strip()]
return self.stopword_manager.filter_words(tokens)
@retry(stop=stop_after_attempt(4), wait=wait_random_exponential(multiplier=1, max=10))
def _embed_once(self, texts):
return self.embedder.embed(texts)
@retry(stop=stop_after_attempt(3), wait=wait_random_exponential(multiplier=1, min=4, max=10))
def _retry_db_operation(self, operation):
try:
return operation()
except Exception as e:
logger.error(
f"[0107 Feedback Core: _retry_db_operation] DB operation failed: {e}", exc_info=True
)
raise
def _batch_embed(self, texts: list[str], embed_bs: int = 5):
results = []
dim = self.embedder.config.embedding_dims
for i in range(0, len(texts), embed_bs):
batch = texts[i : i + embed_bs]
try:
results.extend(self._embed_once(batch))
except Exception as e:
logger.error(
f"[0107 Feedback Core: process_feedback_core] Embedding batch failed, Cover with all zeros: {len(batch)} entries: {e}"
)
results.extend([[0.0] * dim for _ in range(len(batch))])
return results
def _pure_add(self, user_name: str, feedback_content: str, feedback_time: str, info: dict):
"""
Directly add new memory
"""
scene_data = [[{"role": "user", "content": feedback_content, "chat_time": feedback_time}]]
memories = self.mem_reader.get_memory(scene_data, type="chat", info=info)
to_add_memories = [item for scene in memories for item in scene]
added_ids = self._retry_db_operation(
lambda: self.memory_manager.add(to_add_memories, user_name=user_name, use_batch=False)
)
logger.info(
f"[0107 Feedback Core: _pure_add] Pure added {len(added_ids)} memories for user {user_name}."
)
return {
"record": {
"add": [
{
"id": _id,
"text": added_mem.memory,
"source_doc_id": (
added_mem.metadata.file_ids[0]
if hasattr(added_mem.metadata, "file_ids")
and isinstance(added_mem.metadata.file_ids, list)
and added_mem.metadata.file_ids
else None
),
}
for _id, added_mem in zip(added_ids, to_add_memories, strict=False)
],
"update": [],
}
}
def _keyword_replace_judgement(self, feedback_content: str) -> dict | None:
"""
Determine whether it is keyword replacement
"""
lang = detect_lang(feedback_content)
template = FEEDBACK_PROMPT_DICT["if_kw_replace"][lang]
prompt = template.format(
user_feedback=feedback_content,
)
judge_res = self._get_llm_response(prompt, load_type="bracket")
if judge_res:
return judge_res
else:
logger.warning(
"[0107 Feedback Core: _feedback_judgement] feedback judgement failed, return []"
)
return {}
def _feedback_judgement(
self, chat_history: list[MessageDict], feedback_content: str, feedback_time: str = ""
) -> dict | None:
"""
Generate a judgement for a given feedback.
"""
lang = detect_lang(feedback_content)
template = FEEDBACK_PROMPT_DICT["judge"][lang]
chat_history_lis = [f"""{msg["role"]}: {msg["content"]}""" for msg in chat_history[-4:]]
chat_history_str = "\n".join(chat_history_lis)
prompt = template.format(
chat_history=chat_history_str,
user_feedback=feedback_content,
feedback_time=feedback_time,
)
judge_res = self._get_llm_response(prompt, load_type="square_bracket")
if judge_res:
return judge_res
else:
logger.warning(
"[0107 Feedback Core: _feedback_judgement] feedback judgement failed, return []"
)
return []
def _single_add_operation(
self,
old_memory_item: TextualMemoryItem | None,
new_memory_item: TextualMemoryItem,
user_id: str,
user_name: str,
async_mode: str = "sync",
) -> dict:
"""
Individual addition operations
"""
if old_memory_item:
to_add_memory = old_memory_item.model_copy(deep=True)
to_add_memory.metadata.key = new_memory_item.metadata.key
to_add_memory.metadata.tags = new_memory_item.metadata.tags
to_add_memory.memory = new_memory_item.memory
to_add_memory.metadata.embedding = new_memory_item.metadata.embedding
to_add_memory.metadata.user_id = new_memory_item.metadata.user_id
else:
to_add_memory = new_memory_item.model_copy(deep=True)
if to_add_memory.metadata.memory_type == "PreferenceMemory":
to_add_memory.metadata.preference = new_memory_item.memory
to_add_memory.metadata.created_at = to_add_memory.metadata.updated_at = (
datetime.now().isoformat()
)
to_add_memory.metadata.background = new_memory_item.metadata.background
added_ids = self._retry_db_operation(
lambda: self.memory_manager.add([to_add_memory], user_name=user_name, use_batch=False)
)
logger.info(f"[Memory Feedback ADD] memory id: {added_ids!s}")
return {
"id": added_ids[0],
"text": to_add_memory.memory,
"source_doc_id": (
to_add_memory.metadata.file_ids[0]
if hasattr(to_add_memory.metadata, "file_ids")
and isinstance(to_add_memory.metadata.file_ids, list)
and to_add_memory.metadata.file_ids
else None
),
}
def _single_update_operation(
self,
old_memory_item: TextualMemoryItem,
new_memory_item: TextualMemoryItem,
user_id: str,
user_name: str,
async_mode: str = "sync",
operation: dict | None = None,
) -> dict:
"""
Individual update operations
"""
memory_type = old_memory_item.metadata.memory_type
source_doc_id = (
old_memory_item.metadata.file_ids[0]
if hasattr(old_memory_item.metadata, "file_ids")
and isinstance(old_memory_item.metadata.file_ids, list)
and old_memory_item.metadata.file_ids
else None
)
if operation and "text" in operation and operation["text"]:
new_memory_item.memory = operation["text"]
new_memory_item.metadata.embedding = self._batch_embed([operation["text"]])[0]
if memory_type == "WorkingMemory":
fields = {
"memory": new_memory_item.memory,
"key": new_memory_item.metadata.key,
"tags": new_memory_item.metadata.tags,
"embedding": new_memory_item.metadata.embedding,
"background": new_memory_item.metadata.background,
"covered_history": old_memory_item.id,
}
self.graph_store.update_node(old_memory_item.id, fields=fields, user_name=user_name)
item_id = old_memory_item.id
else:
done = self._single_add_operation(
old_memory_item, new_memory_item, user_id, user_name, async_mode
)
item_id = done.get("id")
self.graph_store.update_node(
item_id, {"covered_history": old_memory_item.id}, user_name=user_name
)
self.graph_store.update_node(
old_memory_item.id, {"status": "archived"}, user_name=user_name
)
logger.info(
f"[Memory Feedback UPDATE] New Add:{item_id} | Set archived:{old_memory_item.id} | memory_type: {memory_type}"
)
return {
"id": item_id,
"text": new_memory_item.memory,
"source_doc_id": source_doc_id,
"archived_id": old_memory_item.id,
"origin_memory": old_memory_item.memory,
}
def _del_working_binding(self, user_name, mem_items: list[TextualMemoryItem]) -> set[str]:
"""Delete working memory bindings"""
bindings_to_delete = extract_working_binding_ids(mem_items)
logger.info(
f"[Memory Feedback UPDATE] Extracted {len(bindings_to_delete)} working_binding ids to cleanup: {list(bindings_to_delete)}"
)
delete_ids = []
if bindings_to_delete:
delete_ids = list({bindings_to_delete})
for mid in delete_ids:
try:
self.graph_store.delete_node(mid, user_name=user_name)
logger.info(
f"[0107 Feedback Core:_del_working_binding] Delete raw/working mem_ids: {delete_ids} for user_name: {user_name}"
)
except Exception as e:
logger.warning(
f"[0107 Feedback Core:_del_working_binding] TreeTextMemory.delete_hard: failed to delete {mid}: {e}"
)
def semantics_feedback(
self,
user_id: str,
user_name: str,
memory_item: TextualMemoryItem,
current_memories: list[TextualMemoryItem],
history_str: str,
chat_history_list: list,
info: dict,
):
"""Modify memory at the semantic level"""
lang = detect_lang("".join(memory_item.memory))
template = FEEDBACK_PROMPT_DICT["compare"][lang]
if current_memories == []:
# retrieve
last_user_index = max(i for i, d in enumerate(chat_history_list) if d["role"] == "user")
last_qa = " ".join([item["content"] for item in chat_history_list[last_user_index:]])
supplementary_retrieved = self._retrieve(last_qa, info=info, user_name=user_name)
feedback_retrieved = self._retrieve(memory_item.memory, info=info, user_name=user_name)
ids = []
for item in feedback_retrieved + supplementary_retrieved:
if item.id not in ids:
ids.append(item.id)
current_memories.append(item)
include_keys = ["agent_id", "app_id"]
current_memories = [
item for item in current_memories if self._info_comparison(item, info, include_keys)
]
operations = []
if not current_memories:
operations = [{"operation": "ADD"}]
logger.warning(
"[Feedback Core]: There was no recall of the relevant memory, so it was added directly."
)
else:
memory_chunks = split_into_chunks(current_memories, max_tokens_per_chunk=500)
all_operations = []
now_time = datetime.now().isoformat()
with ContextThreadPoolExecutor(max_workers=10) as executor:
future_to_chunk_idx = {}
for chunk in memory_chunks:
chunk_list = []
for item in chunk:
if item.metadata.memory_type == "PreferenceMemory":
chunk_list.append(f"{item.id}: {item.metadata.preference}")
else:
chunk_list.append(f"{item.id}: {item.memory}")
current_memories_str = "\n".join(chunk_list)
prompt = template.format(
now_time=now_time,
current_memories=current_memories_str,
new_facts=memory_item.memory,
chat_history=history_str,
)
future = executor.submit(self._get_llm_response, prompt, load_type="bracket")
future_to_chunk_idx[future] = chunk
for future in concurrent.futures.as_completed(future_to_chunk_idx):
try:
chunk_operations = future.result()
if (
chunk_operations
and "operations" in chunk_operations
and isinstance(chunk_operations["operations"], list)
):
all_operations.extend(chunk_operations["operations"])
except Exception as e:
logger.error(
f"[0107 Feedback Core: semantics_feedback] Operation failed: {e}"
)
standard_operations = self.standard_operations(all_operations, current_memories)
operations = self.filter_fault_update(standard_operations)
logger.info(f"[Feedback Core Operations]: {operations!s}")
if not operations:
return {"record": {"add": [], "update": []}}
add_results = []
update_results = []
id_to_item = {item.id: item for item in current_memories}
with ContextThreadPoolExecutor(max_workers=10) as executor:
future_to_op = {}
for op in operations:
event_type = op.get("operation", "").lower()
if event_type == "add":
future = executor.submit(
self._single_add_operation,
None,
memory_item,
user_id,
user_name,
)
future_to_op[future] = ("add", op)
elif event_type == "update":
future = executor.submit(
self._single_update_operation,
id_to_item[op["id"]],
memory_item,
user_id,
user_name,
operation=op,
)
future_to_op[future] = ("update", op)
for future in concurrent.futures.as_completed(future_to_op):
result_type, original_op = future_to_op[future]
try:
result = future.result()
if result_type == "add" and result:
add_results.append(result)
elif result_type == "update" and result:
update_results.append(result)
except Exception as e:
logger.error(
f"[0107 Feedback Core: semantics_feedback] Operation failed for {original_op}: {e}",
exc_info=True,
)
if update_results:
updated_ids = [item["archived_id"] for item in update_results]
self._del_working_binding(updated_ids, user_name)
return {"record": {"add": add_results, "update": update_results}}
def _feedback_memory(
self, user_id: str, user_name: str, feedback_memories: list[TextualMemoryItem], **kwargs
) -> dict:
retrieved_memory_ids = kwargs.get("retrieved_memory_ids") or []
chat_history = kwargs.get("chat_history", [])
feedback_content = kwargs.get("feedback_content", "")
info = kwargs.get("info", {})
chat_history_lis = [f"""{msg["role"]}: {msg["content"]}""" for msg in chat_history[-4:]]
history_str = "\n".join(chat_history_lis) + f"\nuser feedback: \n{feedback_content}"
retrieved_memories = [
self.graph_store.get_node(_id, user_name=user_name) for _id in retrieved_memory_ids
]
filterd_ids = [
item["id"] for item in retrieved_memories if "mode:fast" in item["metadata"]["tags"]
]
if filterd_ids:
logger.warning(
f"[0107 Feedback Core: _feedback_memory] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}."
)
current_memories = [
TextualMemoryItem(**item)
for item in retrieved_memories
if "mode:fast" not in item["metadata"]["tags"]
]
with ContextThreadPoolExecutor(max_workers=3) as ex:
futures = {
ex.submit(
self.semantics_feedback,
user_id,
user_name,
mem,
current_memories,
history_str,
chat_history,
info,
): i
for i, mem in enumerate(feedback_memories)
}
results = [None] * len(futures)
for fut in concurrent.futures.as_completed(futures):
i = futures[fut]
try:
node = fut.result()
if node:
results[i] = node
except Exception as e:
logger.error(
f"[0107 Feedback Core: _feedback_memory] Error processing memory index {i}: {e}",
exc_info=True,
)
mem_res = [r for r in results if r]
return {
"record": {
"add": [element for item in mem_res for element in item["record"]["add"]],
"update": [element for item in mem_res for element in item["record"]["update"]],
}
}
def _info_comparison(self, memory: TextualMemoryItem, _info: dict, include_keys: list) -> bool:
"""Filter the relevant memory items based on info"""
if not _info and not memory.metadata.info:
return True
record = []
for key in include_keys:
info_v = _info.get(key)
mem_v = memory.metadata.info.get(key, None) if memory.metadata.info else None
record.append(info_v == mem_v)
return all(record)
def _retrieve(self, query: str, info=None, top_k=20, user_name=None):
"""Retrieve memory items"""
def check_has_edges(mem_item: TextualMemoryItem) -> tuple[TextualMemoryItem, bool]:
"""Check if a memory item has edges."""
edges = self.searcher.graph_store.get_edges(mem_item.id, user_name=user_name)
return (mem_item, len(edges) == 0)
logger.info(f"[feedback _retrieve] query: {query}, user_name: {user_name}")
text_mems = self.searcher.search(
query=query,
top_k=top_k,
info=info,
memory_type="AllSummaryMemory",
user_name=user_name,
full_recall=True,
)
text_mems = [item[0] for item in text_mems if float(item[1]) > 0.01]
if self.pref_feedback:
pref_mems = self.searcher.search(
query=query,
top_k=top_k,
info=info,
memory_type="PreferenceMemory",
user_name=user_name,
include_preference_memory=True,
full_recall=True,
)
pref_mems = [item[0] for item in pref_mems if float(item[1]) > 0.01]
text_mems.extend(pref_mems)
# Memory with edges is not modified by feedback
retrieved_mems = []
with ContextThreadPoolExecutor(max_workers=10) as executor:
futures = {executor.submit(check_has_edges, item): item for item in text_mems}
for future in concurrent.futures.as_completed(futures):
try:
mem_item, has_no_edges = future.result()
if has_no_edges:
retrieved_mems.append(mem_item)
except Exception as e:
logger.error(f"[0107 Feedback Core: _retrieve] Error checking edges: {e}")
if len(retrieved_mems) < len(text_mems):
logger.info(
f"[0107 Feedback Core: _retrieve] {len(text_mems) - len(retrieved_mems)} "
f"text memories are not modified by feedback due to edges."
)
return retrieved_mems
def _vec_query(self, new_memories_embedding: list[float], user_name=None):
"""Vector retrieval query"""
retrieved_ids = []
retrieved_ids.extend(
self.graph_store.search_by_embedding(
new_memories_embedding,
scope="UserMemory",
user_name=user_name,
top_k=10,
threshold=0.2,
)
)
retrieved_ids.extend(
self.graph_store.search_by_embedding(
new_memories_embedding,
scope="LongTermMemory",
user_name=user_name,
top_k=10,
threshold=0.2,
)
)
current_memories = [
self.graph_store.get_node(item["id"], user_name=user_name) for item in retrieved_ids
]
if not retrieved_ids:
logger.info(
f"[0107 Feedback Core: _vec_query] No similar memories found for embedding query for user {user_name}."
)
filterd_ids = [
item["id"] for item in current_memories if "mode:fast" in item["metadata"]["tags"]
]
if filterd_ids:
logger.warning(
f"[0107 Feedback Core: _vec_query] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}."
)
return [
TextualMemoryItem(**item)
for item in current_memories
if "mode:fast" not in item["metadata"]["tags"]
]
def _get_llm_response(
self,
prompt: str,
dsl: bool = True,
load_type: Literal["bracket", "square_bracket"] | None = None,
) -> dict:
messages = [{"role": "user", "content": prompt}]
response_text = ""
try:
response_text = self.llm.generate(messages, temperature=0.3, timeout=60)
if not dsl:
return response_text
try:
response_text = response_text.replace("```", "").replace("json", "")
cleaned_text = re.sub(r"[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]", "", response_text)
response_json = json.loads(cleaned_text)
return response_json
except (json.JSONDecodeError, ValueError) as e:
if load_type == "bracket":
response_json = extract_bracket_content(response_text)
return response_json
elif load_type == "square_bracket":
response_json = extract_square_brackets_content(response_text)
return response_json
else:
logger.error(
f"[Feedback Core LLM Error] Exception during chat generation: {e} | response_text: {response_text}"
)
return None
except Exception as e:
logger.error(
f"[Feedback Core LLM Error] Exception during chat generation: {e} | response_text: {response_text}"
)
return None
def filter_fault_update(self, operations: list[dict]):
"""To address the randomness of large model outputs, it is necessary to conduct validity evaluation on the texts used for memory override operations."""
updated_operations = [item for item in operations if item["operation"] == "UPDATE"]
if len(updated_operations) < 5:
return operations
lang = detect_lang("".join(updated_operations[0]["text"]))
template = FEEDBACK_PROMPT_DICT["compare_judge"][lang]
all_judge = []
operations_chunks = general_split_into_chunks(updated_operations)
with ContextThreadPoolExecutor(max_workers=10) as executor:
future_to_chunk_idx = {}
for chunk in operations_chunks:
raw_operations_str = {"operations": chunk}
prompt = template.format(raw_operations=str(raw_operations_str))
future = executor.submit(self._get_llm_response, prompt, load_type="bracket")
future_to_chunk_idx[future] = chunk
for future in concurrent.futures.as_completed(future_to_chunk_idx):
try:
judge_res = future.result()
if (
judge_res
and "operations_judgement" in judge_res
and isinstance(judge_res["operations_judgement"], list)
):
all_judge.extend(judge_res["operations_judgement"])
except Exception as e:
logger.error(f"[0107 Feedback Core: filter_fault_update] Judgement failed: {e}")
logger.info(f"[0107 Feedback Core: filter_fault_update] LLM judgement: {all_judge}")
id2op = {item["id"]: item for item in updated_operations}
valid_updates = []
for judge in all_judge:
valid_update = None
if judge["judgement"] == "UPDATE_APPROVED":
valid_update = id2op.get(judge["id"], None)
if valid_update:
valid_updates.append(valid_update)
logger.info(
f"[0107 Feedback Core: filter_fault_update] {len(updated_operations)} -> {len(valid_updates)}"
)
return valid_updates + [item for item in operations if item["operation"] != "UPDATE"]
def standard_operations(self, operations, current_memories):
"""
Regularize the operation design
1. Map the id to the correct original memory id
2. If there is an update, skip the memory object of add
3. If the modified text is too long, skip the update
"""
right_ids = [item.id for item in current_memories]
right_lower_map = {x.lower(): x for x in right_ids}
def correct_item(data):
try:
assert "operation" in data
if data.get("operation", "").lower() == "add":
return data
if data.get("operation", "").lower() == "none":
return None
assert (
"id" in data
and "text" in data
and "old_memory" in data
and data["operation"].lower() == "update"
), "Invalid operation item"
if not should_keep_update(data["text"], data["old_memory"]):
logger.warning(
f"[0107 Feedback Core: correct_item] Due to the excessive proportion of changes, skip update: {data}"
)
return None
# id dehallucination
original_id = data["id"]
if original_id in right_ids:
return data
lower_id = original_id.lower()
if lower_id in right_lower_map:
data["id"] = right_lower_map[lower_id]
return data
matches = difflib.get_close_matches(original_id, right_ids, n=1, cutoff=0.8)
if matches:
data["id"] = matches[0]
return data
except Exception:
logger.error(
f"[0107 Feedback Core: standard_operations] Error processing operation item: {data}",
exc_info=True,
)
return None
dehallu_res = [correct_item(item) for item in operations]
dehalluded_operations = [item for item in dehallu_res if item]
logger.info(f"[0107 Feedback Core: dehalluded_operations] {dehalluded_operations}")
# c add objects
add_texts = []
llm_operations = []
for item in dehalluded_operations:
if item["operation"].lower() == "add" and "text" in item and item["text"]:
if item["text"] in add_texts:
continue
llm_operations.append(item)
add_texts.append(item["text"])
elif item["operation"].lower() == "update":
llm_operations.append(item)
logger.info(
f"[0107 Feedback Core: deduplicate add] {len(dehalluded_operations)} -> {len(llm_operations)} memories"
)
# Update takes precedence over add
has_update = any(item.get("operation").lower() == "update" for item in llm_operations)
if has_update:
filtered_items = [
item for item in llm_operations if item.get("operation").lower() == "add"
]
update_items = [
item for item in llm_operations if item.get("operation").lower() != "add"
]
if filtered_items:
logger.info(
f"[0107 Feedback Core: semantics_feedback] Due to have update objects, skip add: {filtered_items}"
)
return update_items
else:
return llm_operations
def _generate_answer(
self, chat_history: list[MessageDict], feedback_content: str, corrected_answer: bool
) -> str:
"""
Answer generation to facilitate concurrent submission.
"""
if not corrected_answer or feedback_content.strip() == "":
return ""
lang = detect_lang(feedback_content)
template = FEEDBACK_PROMPT_DICT["generation"][lang]
chat_history_str = "\n".join(
[f"{item['role']}: {item['content']}" for item in chat_history]
)
chat_history_str = chat_history_str if chat_history_str else "none"
prompt = template.format(chat_history=chat_history_str, question=feedback_content)
return self._get_llm_response(prompt, dsl=False)
def _doc_filter(self, doc_scope: str, memories: list[TextualMemoryItem]):
"""
Filter the memory based on filename
"""
filename2_memid = {}
filename_mems = []
for item in memories:
for file_info in item.metadata.sources:
if file_info.type == "file":
file_dict = file_info.original_part
filename = file_dict["file"]["filename"]
if filename not in filename2_memid:
filename2_memid[filename] = []
filename_mems.append(make_mem_item(filename))
filename2_memid[filename].append(item.id)
rerank_res = self.reranker.rerank(doc_scope, filename_mems, top_k=100)
inscope_docs = [item[0].memory for item in rerank_res if item[1] > 0.95]
inscope_ids = [
memid for inscope_file in inscope_docs for memid in filename2_memid[inscope_file]
]
logger.info(
f"[0107 Feedback Core: process_keyword_replace] These docs are in scope : {inscope_docs}, relared memids: {inscope_ids}"
)
filter_memories = [mem for mem in memories if mem.id in inscope_ids]
return filter_memories
def process_keyword_replace(
self, user_id: str, user_name: str, kwp_judge: dict | None = None, info: dict | None = None
):
"""
Memory keyword replace process
"""
info = info or {}
doc_scope = kwp_judge.get("doc_scope", "NONE")
original_word = kwp_judge.get("original")
target_word = kwp_judge.get("target")
include_keys = ["agent_id", "app_id"]
mem_info = {key: info[key] for key in info if key in include_keys}
filter_dict = {f"info.{key}": info[key] for key in mem_info}
if self.DB_IDX_READY:
# retrieve
lang = detect_lang(original_word)
queries = (
self._tokenize_chinese(original_word) if lang == "zh" else original_word.split()
)
must_part = f"{' & '.join(queries)}" if len(queries) > 1 else queries[0]
retrieved_ids = self.graph_store.search_by_keywords_tfidf(
[must_part], user_name=user_name, filter=filter_dict
)
if len(retrieved_ids) < 1:
retrieved_ids = self.graph_store.search_by_fulltext(
queries, top_k=100, user_name=user_name, filter=filter_dict
)
else:
retrieved_ids = self.graph_store.search_by_keywords_like(
f"%{original_word}%", user_name=user_name, filter=filter_dict
)
mem_data = [
self.graph_store.get_node(item["id"], user_name=user_name) for item in retrieved_ids
]
retrieved_memories = [TextualMemoryItem(**item) for item in mem_data]
retrieved_memories = [
item
for item in retrieved_memories
if self._info_comparison(item, mem_info, include_keys)
]
if doc_scope != "NONE":
retrieved_memories = self._doc_filter(doc_scope, retrieved_memories)
logger.info(
f"[0107 Feedback Core: process_keyword_replace] Keywords recalled memory for user {user_name}: {len(retrieved_ids)} memories | After filtering: {len(retrieved_memories)} memories."
)
if not retrieved_memories:
return {"record": {"add": [], "update": []}}
# replace keywords
pick_index = []
update_memories = []
for i, old_mem in enumerate(retrieved_memories):
if original_word in old_mem.memory:
mem = old_mem.model_copy(deep=True)
mem.memory = mem.memory.replace(original_word, target_word)
if original_word in mem.metadata.tags:
mem.metadata.tags.remove(original_word)
if target_word not in mem.metadata.tags:
mem.metadata.tags.append(target_word)
pick_index.append(i)
update_memories.append(mem)
update_memories_embed = self._batch_embed([mem.memory for mem in update_memories])
for _i, embed in zip(range(len(update_memories)), update_memories_embed, strict=False):
update_memories[_i].metadata.embedding = embed
update_results = []
with ContextThreadPoolExecutor(max_workers=10) as executor:
future_to_info = {}
for new_mem, old_idx in zip(update_memories, pick_index, strict=False):
old_mem = retrieved_memories[old_idx]
future = executor.submit(
self._single_update_operation,
old_mem,
new_mem,
user_id,
user_name,
)
future_to_info[future] = old_mem.id
for future in future_to_info:
try:
result = future.result()
update_results.append(result)
except Exception as e:
mem_id = future_to_info[future][0]
logger.error(
f"[Feedback Core DB] Exception during update operation for memory {mem_id}: {e}"
)
return {"record": {"add": [], "update": update_results}}
def process_feedback_core(
self,
user_id: str,
user_name: str,
chat_history: list[MessageDict],
feedback_content: str,
info: dict | None = None,
**kwargs,
) -> dict:
"""
Core feedback processing: judgment, memory extraction, addition/update. Return record.
"""
def check_validity(item):
return (
"validity" in item
and item["validity"].lower() == "true"
and "corrected_info" in item
and item["corrected_info"].strip()
and "key" in item
and "tags" in item
)
if feedback_content.strip() == "":
return {"record": {"add": [], "update": []}}
try:
feedback_time = kwargs.get("feedback_time") or datetime.now().isoformat()
session_id = kwargs.get("session_id")
if not info:
info = {"user_id": user_id, "user_name": user_name, "session_id": session_id}
else:
info.update({"user_id": user_id, "user_name": user_name, "session_id": session_id})
logger.info(
f"[0107 Feedback Core: process_feedback_core] Starting memory feedback process for user {user_name}"