diff --git a/src/components/map-projects/AICandidatesAnalysis.jsx b/src/components/map-projects/AICandidatesAnalysis.jsx
index 28c55cd..1e9688b 100644
--- a/src/components/map-projects/AICandidatesAnalysis.jsx
+++ b/src/components/map-projects/AICandidatesAnalysis.jsx
@@ -12,6 +12,8 @@ import Tooltip from '@mui/material/Tooltip'
import Skeleton from '@mui/material/Skeleton'
import CloseIcon from '@mui/icons-material/Close'
import DataObjectIcon from '@mui/icons-material/DataObject';
+import ChevronLeftIcon from '@mui/icons-material/ChevronLeft';
+import ChevronRightIcon from '@mui/icons-material/ChevronRight';
import get from 'lodash/get'
import map from 'lodash/map'
@@ -20,9 +22,24 @@ import compact from 'lodash/compact'
import Comment from './Comment'
-const AICandidatesAnalysis = ({ analysis, onClose, sx, isCoreUser }) => {
+const AICandidatesAnalysis = ({ analysis: analysisProp, onClose, sx, isCoreUser }) => {
const { t } = useTranslation();
const [openDetails, setOpenDetails] = React.useState(false)
+ const [page, setPage] = React.useState(0)
+
+ const analysisArray = Array.isArray(analysisProp) ? analysisProp : (analysisProp ? [analysisProp] : [])
+ const total = analysisArray.length
+
+ // Jump to latest only when total grows (a new entry was appended);
+ // don't yank the user back when total holds steady on re-render.
+ const prevTotalRef = React.useRef(0)
+ React.useEffect(() => {
+ if(total > prevTotalRef.current)
+ setPage(total - 1)
+ prevTotalRef.current = total
+ }, [total])
+
+ const analysis = analysisArray[page]
let output = analysis?.output || analysis
const getRecommendationTitle = () => {
@@ -112,13 +129,13 @@ const AICandidatesAnalysis = ({ analysis, onClose, sx, isCoreUser }) => {
{
- analysis?.prompt_template_uri &&
+ analysis?.output_locale &&
- URI:
+ {t('map_project.output_locale')}:
- {analysis.prompt_template_uri}
+ {analysis.output_locale}
}
@@ -131,19 +148,29 @@ const AICandidatesAnalysis = ({ analysis, onClose, sx, isCoreUser }) => {
-
- <>
- {
- isCoreUser &&
-
-
- setOpenDetails(!openDetails)}>
-
-
-
-
- }
- >
+
+ {
+ total > 1 &&
+
+ setPage(p => Math.max(0, p - 1))} disabled={page === 0}>
+
+
+ {page + 1}/{total}
+ setPage(p => Math.min(total - 1, p + 1))} disabled={page === total - 1}>
+
+
+
+ }
+ {
+ isCoreUser &&
+
+
+ setOpenDetails(!openDetails)}>
+
+
+
+
+ }
}
diff --git a/src/components/map-projects/Candidates.jsx b/src/components/map-projects/Candidates.jsx
index 5bd4736..8a84e8e 100644
--- a/src/components/map-projects/Candidates.jsx
+++ b/src/components/map-projects/Candidates.jsx
@@ -382,7 +382,8 @@ const Candidates = ({rowIndex, alert, setAlert, rowState, conceptCache, targetCa
// v2 concept_key passthrough, then canonical_reference.code (PR2a shim),
// then the legacy concept_id/id. The resolved code is matched against
// ConceptDefinition.reference.code in Concept.jsx for highlighting.
- const primary = analysis?.output?.primary_candidate || analysis?.primary_candidate
+ const latestAnalysis = Array.isArray(analysis) ? analysis[analysis.length - 1] : analysis
+ const primary = latestAnalysis?.output?.primary_candidate || latestAnalysis?.primary_candidate
const AIRecommendedCandidateId = resolveAICandidateID(primary, conceptCache)
// Quality (score-grouped) view shows ONLY target-repo concepts. Bridge
diff --git a/src/components/map-projects/MapProject.jsx b/src/components/map-projects/MapProject.jsx
index 85ee7c5..3959414 100644
--- a/src/components/map-projects/MapProject.jsx
+++ b/src/components/map-projects/MapProject.jsx
@@ -756,7 +756,8 @@ const MapProject = () => {
setProjectPromptTemplateKey(response.data?.prompt_template_key || '')
setPromptOutputLocale(response.data?.prompt_output_locale || null)
setUseLexicalVariants(Boolean(response.data?.use_lexical_variants))
- setAnalysis(response.data?.analysis || {})
+ const rawAnalysis = response.data?.analysis || {}
+ setAnalysis(Object.fromEntries(Object.entries(rawAnalysis).map(([k, v]) => [k, Array.isArray(v) ? v : [v]])))
setProject(response.data)
setConfigure(false)
})
@@ -2259,7 +2260,9 @@ const MapProject = () => {
const rowStateLabel = VIEWS[rowState].label
let concept = mapSelected[index]
let _repo = concept?.repo
- const aiRecommendation = get(analysis, index)?.output || get(analysis, index)
+ const rowAnalyses = get(analysis, index) || []
+ const latestAnalysis = Array.isArray(rowAnalyses) ? rowAnalyses[rowAnalyses.length - 1] : rowAnalyses
+ const aiRecommendation = latestAnalysis?.output || latestAnalysis
const aiCandidate = get(aiRecommendation, 'primary_candidate')
// v2 response: prefer concept_key (resolves via conceptCache for an
// unambiguous match), then canonical_reference.code (the PR2a shim);
@@ -3840,7 +3843,13 @@ const MapProject = () => {
})
)
}
- if(isNumber(__index) && repoVersion && !analysis[__index] && _candidates?.length > 0) {
+ // Auto-match (caller supplied resolvedPromptTemplate) fires once per row;
+ // user-initiated single-row clicks always append a new entry to the
+ // per-row analysis history.
+ const isAutoMatch = Boolean(resolvedPromptTemplate)
+ const existingAnalyses = analysis[__index] || []
+ const alreadyAnalyzed = isAutoMatch && existingAnalyses.length > 0
+ if(isNumber(__index) && repoVersion && !alreadyAnalyzed && _candidates?.length > 0) {
if(!promptTemplate?.key) {
setAlert({message: 'AI Assistant prompt template is not available', severity: 'error'})
markAlgo(__index, 'recommend', -3)
@@ -3935,7 +3944,15 @@ const MapProject = () => {
markAlgo(__index, 'recommend', 1)
log({created_at: timestamp, action: 'AIRecommendation', description: get(response.data, 'output.rationale') || get(response.data, 'rationale'), extras: {...response.data, model: selectedModel, prompt_template: promptTemplateRef, prompt_template_uri: promptTemplateRef?.uri}}, __index)
- setAnalysis(prev => ({...prev, [__index]: {...response.data, model: selectedModel?.id || AIModel, model_name: selectedModel?.name, prompt_template: promptTemplateRef, prompt_template_uri: promptTemplateRef?.uri, timestamp: timestamp, user: user.username || user.id}}))
+ const resolvedTemplate = response.data?.template || {}
+ const resolvedVersion = resolvedTemplate.version || promptTemplateRef?.version || null
+ const resolvedPromptRef = {
+ ...promptTemplateRef,
+ version: resolvedVersion,
+ uri: resolvedVersion && promptTemplateRef?.key ? `/prompts/${promptTemplateRef.key}/${resolvedVersion}/` : (promptTemplateRef?.uri || null)
+ }
+ const newEntry = {...response.data, model: selectedModel?.id || AIModel, model_name: selectedModel?.name, prompt_template: resolvedPromptRef, prompt_template_uri: resolvedPromptRef.uri, output_locale: promptOutputLocale || null, timestamp: timestamp, user: user.username || user.id}
+ setAnalysis(prev => ({...prev, [__index]: [...(prev[__index] || []), newEntry]}))
return true
} catch (err) {
markAlgo(__index, 'recommend', -2)
@@ -3946,7 +3963,7 @@ const MapProject = () => {
return false
}
} else {
- markAlgo(__index, 'recommend', analysis[__index] ? 1 : -3)
+ markAlgo(__index, 'recommend', analysis[__index]?.length > 0 ? 1 : -3)
}
return false
}
diff --git a/src/i18n/locales/en/translations.json b/src/i18n/locales/en/translations.json
index 4996ec9..03e8fab 100644
--- a/src/i18n/locales/en/translations.json
+++ b/src/i18n/locales/en/translations.json
@@ -557,6 +557,7 @@
"bridge_terminology_search": "Bridge terminology search<0>Premium0>",
"bridge_terminology_search_description": "Include mappings in the <0>CIEL Interface Terminology0> to identify additional high quality candidates. Only available for compatible target repositories and matching algorithms.",
"scispacy_loinc_search": "ScispaCy LOINC search<0>Premium0>",
+ "ocl_ai_assistant": "OCL AI Assistant",
"ocl_ai_candidates_analysis": "OCL AI Assistant: Candidate Analysis",
"group_by_match_quality": "Match Quality",
"ocl_semantic_algorithm": "OCL Semantic Algorithm",
@@ -628,6 +629,7 @@
"create_similar_name": "Copy of {{name}}",
"set_ai_assistant_output_language": "Set AI Assistant output language",
"ai_assistant_output_locale": "Output locale",
+ "output_locale": "Output Locale",
"use_lexical_variants": "Use Lexical Variants",
"use_lexical_variants_description": "Expand $match search to include English spelling variants (e.g. color/colour, leukemia/leukaemia) when matching concept names."
},
diff --git a/src/i18n/locales/es/translations.json b/src/i18n/locales/es/translations.json
index 49f853b..604a4ee 100644
--- a/src/i18n/locales/es/translations.json
+++ b/src/i18n/locales/es/translations.json
@@ -528,6 +528,7 @@
"bridge_terminology_search": "Búsqueda de terminología Bridge<0>Premium0>",
"bridge_terminology_search_description": "Incluir mapeos en la <0>Terminología de Interfaz CIEL0> para identificar candidatos adicionales de alta calidad. Disponible solo para repositorios de destino y algoritmos de coincidencia compatibles.",
"scispacy_loinc_search": "Búsqueda ScispaCy LOINC<0>Premium0>",
+ "ocl_ai_assistant": "OCL Asistente de IA",
"ocl_ai_candidates_analysis": "OCL AI Assistant: Análisis de Candidatos",
"group_by_match_quality": "Calidad de Coincidencia",
"ocl_semantic_algorithm": "Algoritmo Semántico OCL",
@@ -556,6 +557,7 @@
"ai_prompt_template_default_model": "Modelo predeterminado",
"set_ai_assistant_output_language": "Establecer el idioma de salida del asistente de IA",
"ai_assistant_output_locale": "Configuración regional de salida",
+ "output_locale": "Idioma de salida",
"reranker_configuration": "Configuración del reranker",
"reranker_configuration_description": "Elija el modelo de reranker utilizado para calcular las puntuaciones unificadas de este proyecto. El modelo predeterminado se selecciona automáticamente, o puede ingresar un nombre de modelo personalizado.",
"reranker_configuration_model": "Modelo de reranker",
diff --git a/src/i18n/locales/zh/translations.json b/src/i18n/locales/zh/translations.json
index 5169995..1d5d8f5 100644
--- a/src/i18n/locales/zh/translations.json
+++ b/src/i18n/locales/zh/translations.json
@@ -553,6 +553,7 @@
"bridge_terminology_search": "Bridge 术语搜索<0>高级版0>",
"bridge_terminology_search_description": "包含 <0>CIEL 接口术语0> 中的映射,以识别更多高质量候选项。仅适用于兼容的目标仓库和匹配算法。",
"scispacy_loinc_search": "ScispaCy LOINC 搜索<0>高级版0>",
+ "ocl_ai_assistant": "OCL AI 助手",
"ocl_ai_candidates_analysis": "OCL AI 助手:候选分析",
"group_by_match_quality": "匹配质量",
"ocl_semantic_algorithm": "OCL 语义算法",
@@ -581,6 +582,7 @@
"ai_prompt_template_default_model": "默认模型",
"set_ai_assistant_output_language": "设置 AI 助手输出语言",
"ai_assistant_output_locale": "输出区域设置",
+ "output_locale": "输出语言",
"reranker_configuration": "重排序器配置",
"reranker_configuration_description": "为此项目选择用于计算统一分数的重排序器模型。默认模型会自动选中,您也可以输入自定义模型名称。",
"reranker_configuration_model": "重排序器模型",