From 80641993b8b462fa1d9a9a85da5544eff6bede9c Mon Sep 17 00:00:00 2001 From: Max P Date: Tue, 24 Feb 2026 11:51:14 -0500 Subject: [PATCH] feat(ai): persist user preferences across chat sessions --- Tasks.md | 2 + .../ai/ai-agent.chat.helpers.spec.ts | 44 ++++- .../app/endpoints/ai/ai-agent.chat.helpers.ts | 178 +++++++++++++++++- .../endpoints/ai/ai-agent.chat.interfaces.ts | 7 + .../src/app/endpoints/ai/ai.service.spec.ts | 87 +++++++++ apps/api/src/app/endpoints/ai/ai.service.ts | 49 ++++- tasks/lessons.md | 4 + tasks/tasks.md | 19 ++ 8 files changed, 381 insertions(+), 9 deletions(-) diff --git a/Tasks.md b/Tasks.md index e847d5426..3cebb6d7c 100644 --- a/Tasks.md +++ b/Tasks.md @@ -18,6 +18,7 @@ Last updated: 2026-02-24 | T-010 | Chat history persistence and simple direct-query handling | Complete | `apps/client/src/app/pages/portfolio/analysis/ai-chat-panel/ai-chat-panel.component.spec.ts`, `apps/api/src/app/endpoints/ai/ai-agent.utils.spec.ts`, `apps/api/src/app/endpoints/ai/ai.service.spec.ts` | Local implementation | | T-011 | Per-LLM LangSmith invocation tracing + production tracing env enablement | Complete | `apps/api/src/app/endpoints/ai/ai-observability.service.spec.ts`, `apps/api/src/app/endpoints/ai/ai.service.spec.ts`, `apps/api/src/app/endpoints/ai/ai-performance.spec.ts`, `apps/api/src/app/endpoints/ai/evals/mvp-eval.runner.spec.ts`, `apps/api/src/app/endpoints/ai/evals/ai-quality-eval.spec.ts` | Local implementation + Railway variable update | | T-012 | LangChain wrapper enforcement for provider calls + arithmetic direct-response correction | Complete | `apps/api/src/app/endpoints/ai/ai.service.spec.ts`, `apps/api/src/app/endpoints/ai/ai-agent.utils.spec.ts`, `npm run test:ai` | Local implementation | +| T-013 | Cross-session user preference memory (persisted by user, independent of chat session) | Complete | `apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.spec.ts`, `apps/api/src/app/endpoints/ai/ai.service.spec.ts`, `npm run test:ai`, `npx nx run api:lint` | Local implementation | ## Notes @@ -39,3 +40,4 @@ Last updated: 2026-02-24 - Chat persistence + simple direct-query handling (2026-02-24): client chat panel now restores/persists session + bounded message history via localStorage and policy no-tool prompts now return assistant capability guidance for queries like "Who are you?". - Per-LLM LangSmith invocation tracing (2026-02-24): each provider call now records an explicit LangSmith `llm` run (provider/model/query/session/response metadata), and production Railway env now has tracing variables enabled. - Direct arithmetic no-tool behavior fix (2026-02-24): simple arithmetic prompts now return computed answers (for example `2+2 = 4`) instead of generic capability guidance. +- Cross-session preference memory (2026-02-24): AI now persists explicit user response-style preferences in Redis by `userId`, recalls them across different `sessionId`s, and applies them to later AI responses. diff --git a/apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.spec.ts b/apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.spec.ts index a7d6c9ae3..4918da56d 100644 --- a/apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.spec.ts +++ b/apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.spec.ts @@ -1,6 +1,12 @@ import { DataSource } from '@prisma/client'; -import { buildAnswer } from './ai-agent.chat.helpers'; +import { + buildAnswer, + createPreferenceSummaryResponse, + getUserPreferences, + isPreferenceRecallQuery, + resolvePreferenceUpdate +} from './ai-agent.chat.helpers'; describe('AiAgentChatHelpers', () => { const originalLlmTimeout = process.env.AI_AGENT_LLM_TIMEOUT_IN_MS; @@ -66,4 +72,40 @@ describe('AiAgentChatHelpers', () => { expect(answer).toBe(generatedText); }); + + it('parses and persists concise response-style preference updates', () => { + const result = resolvePreferenceUpdate({ + query: 'Remember to keep responses concise.', + userPreferences: {} + }); + + expect(result.shouldPersist).toBe(true); + expect(result.userPreferences.responseStyle).toBe('concise'); + expect(result.acknowledgement).toContain('Saved preference'); + }); + + it('recognizes preference recall queries and renders deterministic summary', () => { + expect(isPreferenceRecallQuery('What do you remember about me?')).toBe(true); + expect( + createPreferenceSummaryResponse({ + userPreferences: { + responseStyle: 'concise', + updatedAt: '2026-02-24T10:00:00.000Z' + } + }) + ).toContain('response style: concise'); + }); + + it('returns empty preferences for malformed user preference payload', async () => { + const redisCacheService = { + get: jest.fn().mockResolvedValue('{bad-json') + }; + + const result = await getUserPreferences({ + redisCacheService: redisCacheService as never, + userId: 'user-1' + }); + + expect(result).toEqual({}); + }); }); diff --git a/apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.ts b/apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.ts index f4c4f4b28..a2cc10625 100644 --- a/apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.ts +++ b/apps/api/src/app/endpoints/ai/ai-agent.chat.helpers.ts @@ -7,6 +7,7 @@ import ms from 'ms'; import { AiAgentMemoryState, + AiAgentUserPreferenceState, MarketDataLookupResult, PortfolioAnalysisResult, RebalancePlanResult, @@ -19,7 +20,16 @@ import { } from './ai-agent.utils'; const AI_AGENT_MEMORY_TTL = ms('24 hours'); +const AI_AGENT_USER_PREFERENCES_TTL = ms('90 days'); const DEFAULT_LLM_TIMEOUT_IN_MS = 3_500; +const CLEAR_PREFERENCES_PATTERN = + /\b(?:clear|forget|reset)\s+(?:all\s+)?(?:my\s+)?(?:saved\s+)?preferences?\b/i; +const CONCISE_RESPONSE_STYLE_PATTERN = + /\b(?:(?:concise|brief|short)\s+(?:answers?|responses?|replies?)|(?:answers?|responses?|replies?)\s+(?:concise|brief|short)|(?:answer|reply)\s+(?:briefly|concisely)|keep (?:the )?(?:answers?|responses?|replies?) (?:short|brief|concise))\b/i; +const DETAILED_RESPONSE_STYLE_PATTERN = + /\b(?:(?:detailed|verbose|longer)\s+(?:answers?|responses?|replies?)|(?:answers?|responses?|replies?)\s+(?:detailed|verbose|longer)|(?:answer|reply)\s+(?:in detail|verbosely)|(?:more|extra)\s+detail)\b/i; +const PREFERENCE_RECALL_PATTERN = + /\b(?:what do you remember about me|show (?:my )?preferences?|what are my preferences?|which preferences (?:do|did) you (?:remember|save))\b/i; export const AI_AGENT_MEMORY_MAX_TURNS = 10; @@ -31,6 +41,122 @@ function getLlmTimeoutInMs() { : DEFAULT_LLM_TIMEOUT_IN_MS; } +function sanitizeUserPreferences( + preferences?: AiAgentUserPreferenceState +): AiAgentUserPreferenceState { + if (!preferences || typeof preferences !== 'object') { + return {}; + } + + return { + responseStyle: + preferences.responseStyle === 'concise' || preferences.responseStyle === 'detailed' + ? preferences.responseStyle + : undefined, + updatedAt: + typeof preferences.updatedAt === 'string' ? preferences.updatedAt : undefined + }; +} + +function hasStoredPreferences(preferences: AiAgentUserPreferenceState) { + return Boolean(preferences.responseStyle); +} + +function getResponseInstruction({ + userPreferences +}: { + userPreferences?: AiAgentUserPreferenceState; +}) { + if (userPreferences?.responseStyle === 'concise') { + return `User preference: keep the response concise in 1-3 short sentences and avoid speculation.`; + } + + if (userPreferences?.responseStyle === 'detailed') { + return `User preference: provide a detailed structured response with clear steps and avoid speculation.`; + } + + return `Write a concise response with actionable insight and avoid speculation.`; +} + +export function isPreferenceRecallQuery(query: string) { + return PREFERENCE_RECALL_PATTERN.test(query.trim().toLowerCase()); +} + +export function createPreferenceSummaryResponse({ + userPreferences +}: { + userPreferences: AiAgentUserPreferenceState; +}) { + if (!hasStoredPreferences(userPreferences)) { + return 'I have no saved cross-session preferences yet.'; + } + + const sections: string[] = ['Saved cross-session preferences:']; + + if (userPreferences.responseStyle) { + sections.push(`- response style: ${userPreferences.responseStyle}`); + } + + return sections.join('\n'); +} + +export function resolvePreferenceUpdate({ + query, + userPreferences +}: { + query: string; + userPreferences: AiAgentUserPreferenceState; +}): { + acknowledgement?: string; + shouldPersist: boolean; + userPreferences: AiAgentUserPreferenceState; +} { + const normalizedPreferences = sanitizeUserPreferences(userPreferences); + const normalizedQuery = query.trim(); + + if (CLEAR_PREFERENCES_PATTERN.test(normalizedQuery)) { + return { + acknowledgement: hasStoredPreferences(normalizedPreferences) + ? 'Cleared your saved cross-session preferences.' + : 'No saved cross-session preferences were found.', + shouldPersist: hasStoredPreferences(normalizedPreferences), + userPreferences: {} + }; + } + + const wantsConcise = CONCISE_RESPONSE_STYLE_PATTERN.test(normalizedQuery); + const wantsDetailed = DETAILED_RESPONSE_STYLE_PATTERN.test(normalizedQuery); + + if (wantsConcise === wantsDetailed) { + return { + shouldPersist: false, + userPreferences: normalizedPreferences + }; + } + + const responseStyle: AiAgentUserPreferenceState['responseStyle'] = wantsConcise + ? 'concise' + : 'detailed'; + + if (normalizedPreferences.responseStyle === responseStyle) { + return { + acknowledgement: `Preference already saved: response style is ${responseStyle}.`, + shouldPersist: false, + userPreferences: normalizedPreferences + }; + } + + return { + acknowledgement: `Saved preference: I will keep responses ${responseStyle} across sessions.`, + shouldPersist: true, + userPreferences: { + ...normalizedPreferences, + responseStyle, + updatedAt: new Date().toISOString() + } + }; +} + export async function buildAnswer({ generateText, languageCode, @@ -41,6 +167,7 @@ export async function buildAnswer({ rebalancePlan, riskAssessment, stressTest, + userPreferences, userCurrency }: { generateText: ({ @@ -58,6 +185,7 @@ export async function buildAnswer({ rebalancePlan?: RebalancePlanResult; riskAssessment?: RiskAssessmentResult; stressTest?: StressTestResult; + userPreferences?: AiAgentUserPreferenceState; userCurrency: string; }) { const fallbackSections: string[] = []; @@ -169,7 +297,9 @@ export async function buildAnswer({ ); } - const fallbackAnswer = fallbackSections.join('\n'); + const fallbackAnswer = userPreferences?.responseStyle === 'concise' + ? fallbackSections.slice(0, 2).join('\n') + : fallbackSections.join('\n'); const llmPrompt = [ `You are a neutral financial assistant.`, `User currency: ${userCurrency}`, @@ -177,7 +307,7 @@ export async function buildAnswer({ `Query: ${query}`, `Context summary:`, fallbackAnswer, - `Write a concise response with actionable insight and avoid speculation.` + getResponseInstruction({ userPreferences }) ].join('\n'); const llmTimeoutInMs = getLlmTimeoutInMs(); const abortController = new AbortController(); @@ -255,6 +385,30 @@ export async function getMemory({ } } +export async function getUserPreferences({ + redisCacheService, + userId +}: { + redisCacheService: RedisCacheService; + userId: string; +}): Promise { + const rawPreferences = await redisCacheService.get( + getUserPreferencesKey({ userId }) + ); + + if (!rawPreferences) { + return {}; + } + + try { + const parsed = JSON.parse(rawPreferences) as AiAgentUserPreferenceState; + + return sanitizeUserPreferences(parsed); + } catch { + return {}; + } +} + export function getMemoryKey({ sessionId, userId @@ -265,6 +419,10 @@ export function getMemoryKey({ return `ai-agent-memory-${userId}-${sessionId}`; } +export function getUserPreferencesKey({ userId }: { userId: string }) { + return `ai-agent-preferences-${userId}`; +} + export function resolveSymbols({ portfolioAnalysis, query, @@ -434,3 +592,19 @@ export async function setMemory({ AI_AGENT_MEMORY_TTL ); } + +export async function setUserPreferences({ + redisCacheService, + userId, + userPreferences +}: { + redisCacheService: RedisCacheService; + userId: string; + userPreferences: AiAgentUserPreferenceState; +}) { + await redisCacheService.set( + getUserPreferencesKey({ userId }), + JSON.stringify(sanitizeUserPreferences(userPreferences)), + AI_AGENT_USER_PREFERENCES_TTL + ); +} diff --git a/apps/api/src/app/endpoints/ai/ai-agent.chat.interfaces.ts b/apps/api/src/app/endpoints/ai/ai-agent.chat.interfaces.ts index 4b264bd59..f66ca11e7 100644 --- a/apps/api/src/app/endpoints/ai/ai-agent.chat.interfaces.ts +++ b/apps/api/src/app/endpoints/ai/ai-agent.chat.interfaces.ts @@ -11,6 +11,13 @@ export interface AiAgentMemoryState { }[]; } +export type AiAgentResponseStylePreference = 'concise' | 'detailed'; + +export interface AiAgentUserPreferenceState { + responseStyle?: AiAgentResponseStylePreference; + updatedAt?: string; +} + export interface PortfolioAnalysisResult { allocationSum: number; holdings: { diff --git a/apps/api/src/app/endpoints/ai/ai.service.spec.ts b/apps/api/src/app/endpoints/ai/ai.service.spec.ts index a889bf61c..a92a0702d 100644 --- a/apps/api/src/app/endpoints/ai/ai.service.spec.ts +++ b/apps/api/src/app/endpoints/ai/ai.service.spec.ts @@ -291,6 +291,93 @@ describe('AiService', () => { expect(generateTextSpy).not.toHaveBeenCalled(); }); + it('persists and recalls cross-session user preferences for the same user', async () => { + const redisStore = new Map(); + redisCacheService.get.mockImplementation(async (key: string) => { + return redisStore.get(key); + }); + redisCacheService.set.mockImplementation( + async (key: string, value: string) => { + redisStore.set(key, value); + } + ); + + const savePreferenceResult = await subject.chat({ + languageCode: 'en', + query: 'Remember to keep responses concise.', + sessionId: 'session-pref-1', + userCurrency: 'USD', + userId: 'user-pref' + }); + + expect(savePreferenceResult.answer).toContain('Saved preference'); + expect(redisStore.get('ai-agent-preferences-user-pref')).toContain('concise'); + + const recallPreferenceResult = await subject.chat({ + languageCode: 'en', + query: 'What do you remember about me?', + sessionId: 'session-pref-2', + userCurrency: 'USD', + userId: 'user-pref' + }); + + expect(recallPreferenceResult.answer).toContain( + 'Saved cross-session preferences' + ); + expect(recallPreferenceResult.answer).toContain('response style: concise'); + }); + + it('applies persisted response-style preferences to LLM prompt generation', async () => { + const redisStore = new Map(); + redisCacheService.get.mockImplementation(async (key: string) => { + return redisStore.get(key); + }); + redisCacheService.set.mockImplementation( + async (key: string, value: string) => { + redisStore.set(key, value); + } + ); + portfolioService.getDetails.mockResolvedValue({ + holdings: { + AAPL: { + allocationInPercentage: 1, + dataSource: DataSource.YAHOO, + symbol: 'AAPL', + valueInBaseCurrency: 1000 + } + } + }); + const generateTextSpy = jest.spyOn(subject, 'generateText'); + generateTextSpy.mockResolvedValue({ + text: 'Portfolio concentration is high.' + } as never); + + await subject.chat({ + languageCode: 'en', + query: 'Keep responses concise.', + sessionId: 'session-pref-tools-1', + userCurrency: 'USD', + userId: 'user-pref-tools' + }); + + const result = await subject.chat({ + languageCode: 'en', + query: 'Show my portfolio allocation', + sessionId: 'session-pref-tools-2', + userCurrency: 'USD', + userId: 'user-pref-tools' + }); + + expect(result.answer.length).toBeGreaterThan(0); + expect(generateTextSpy).toHaveBeenCalledWith( + expect.objectContaining({ + prompt: expect.stringContaining( + 'User preference: keep the response concise in 1-3 short sentences and avoid speculation.' + ) + }) + ); + }); + it('runs rebalance and stress test tools for portfolio scenario prompts', async () => { portfolioService.getDetails.mockResolvedValue({ holdings: { diff --git a/apps/api/src/app/endpoints/ai/ai.service.ts b/apps/api/src/app/endpoints/ai/ai.service.ts index bfed8d377..4380fb645 100644 --- a/apps/api/src/app/endpoints/ai/ai.service.ts +++ b/apps/api/src/app/endpoints/ai/ai.service.ts @@ -20,12 +20,17 @@ import { import { AI_AGENT_MEMORY_MAX_TURNS, buildAnswer, + createPreferenceSummaryResponse, getMemory, + getUserPreferences, + isPreferenceRecallQuery, + resolvePreferenceUpdate, resolveSymbols, runMarketDataLookup, runPortfolioAnalysis, runRiskAssessment, - setMemory + setMemory, + setUserPreferences } from './ai-agent.chat.helpers'; import { addVerificationChecks } from './ai-agent.verification.helpers'; import { @@ -247,11 +252,17 @@ export class AiService { try { const memoryReadStartedAt = Date.now(); - const memory = await getMemory({ - redisCacheService: this.redisCacheService, - sessionId: resolvedSessionId, - userId - }); + const [memory, userPreferences] = await Promise.all([ + getMemory({ + redisCacheService: this.redisCacheService, + sessionId: resolvedSessionId, + userId + }), + getUserPreferences({ + redisCacheService: this.redisCacheService, + userId + }) + ]); memoryReadInMs = Date.now() - memoryReadStartedAt; const plannedTools = determineToolPlan({ @@ -262,6 +273,11 @@ export class AiService { plannedTools, query: normalizedQuery }); + const preferenceUpdate = resolvePreferenceUpdate({ + query: normalizedQuery, + userPreferences + }); + const effectiveUserPreferences = preferenceUpdate.userPreferences; const toolCalls: AiAgentToolCall[] = []; const citations: AiAgentChatResponse['citations'] = []; const verification: AiAgentChatResponse['verification'] = []; @@ -438,6 +454,19 @@ export class AiService { query: normalizedQuery }); + if ( + policyDecision.route === 'direct' && + policyDecision.blockReason === 'no_tool_query' + ) { + if (isPreferenceRecallQuery(normalizedQuery)) { + answer = createPreferenceSummaryResponse({ + userPreferences: effectiveUserPreferences + }); + } else if (preferenceUpdate.acknowledgement) { + answer = preferenceUpdate.acknowledgement; + } + } + if (policyDecision.route === 'tools') { const llmGenerationStartedAt = Date.now(); answer = await buildAnswer({ @@ -458,6 +487,7 @@ export class AiService { rebalancePlan, riskAssessment, stressTest, + userPreferences: effectiveUserPreferences, userCurrency }); llmGenerationInMs = Date.now() - llmGenerationStartedAt; @@ -525,6 +555,13 @@ export class AiService { sessionId: resolvedSessionId, userId }); + if (preferenceUpdate.shouldPersist) { + await setUserPreferences({ + redisCacheService: this.redisCacheService, + userId, + userPreferences: effectiveUserPreferences + }); + } memoryWriteInMs = Date.now() - memoryWriteStartedAt; const response: AiAgentChatResponse = { diff --git a/tasks/lessons.md b/tasks/lessons.md index 0b7243c73..53db3fc41 100644 --- a/tasks/lessons.md +++ b/tasks/lessons.md @@ -35,3 +35,7 @@ Updated: 2026-02-24 8. Context: Open-source submission strategy after publish constraints Mistake: Treated npm publication as the only completion path for contribution evidence Rule: When package publication is blocked, ship the tool in-repo and open upstream PRs in high-signal repositories to preserve external contribution progress. + +9. Context: Memory feature validation after chat/session persistence rollout + Mistake: Session-scoped memory shipped without an explicit user-scoped preference path for cross-session continuity. + Rule: When memory requirements mention user preferences, implement and test both session memory and user-level memory keyed independently from session IDs. diff --git a/tasks/tasks.md b/tasks/tasks.md index 055a6b54c..9b7db4e5a 100644 --- a/tasks/tasks.md +++ b/tasks/tasks.md @@ -216,6 +216,22 @@ Last updated: 2026-02-24 - [x] Add/update unit tests for arithmetic direct replies and provider tracing/fallback behavior. - [x] Run focused verification (`test:ai` and `api:lint`) and update tracker notes. +## Session Plan (2026-02-24, Cross-Session User Preference Memory) + +- [x] Add Redis-backed user preference storage keyed by `userId` (independent of `sessionId`). +- [x] Parse explicit preference update prompts and persist preference changes across sessions. +- [x] Apply persisted preference context to AI answer generation and direct-route responses where relevant. +- [x] Add/update AI unit tests to verify cross-session preference continuity and deterministic behavior. +- [x] Run focused verification (`test:ai`) and update tracker notes. + +## Session Plan (2026-02-24, Chat Details Popover UX) + +- [ ] Audit current AI chat response rendering and identify diagnostics shown inline. +- [ ] Move diagnostics (confidence, citations, verification, observability) behind an info-triggered popover per assistant message. +- [ ] Keep main assistant response focused on user-facing answer and retain feedback controls in primary view. +- [ ] Update chat panel tests to assert info-trigger behavior and diagnostics visibility expectations. +- [ ] Run focused frontend verification and update trackers (`Tasks.md`, `tasks/tasks.md`, `tasks/lessons.md`). + ## Verification Notes - `nx run api:lint` completed successfully (existing workspace warnings only). @@ -293,3 +309,6 @@ Last updated: 2026-02-24 - `npx jest apps/api/src/app/endpoints/ai/ai-agent.utils.spec.ts apps/api/src/app/endpoints/ai/ai.service.spec.ts apps/api/src/app/endpoints/ai/ai-observability.service.spec.ts --config apps/api/jest.config.ts` (36/36 tests passed) - `npm run test:ai` (9/9 suites passed, 49/49 tests) - `npx nx run api:lint --verbose` (passes with existing workspace warnings) +- Cross-session user preference memory verification (local, 2026-02-24): + - `npm run test:ai` (9/9 suites passed, 54/54 tests) + - `npx nx run api:lint` (passes with existing workspace warnings)