diff --git a/client/src/App.tsx b/client/src/App.tsx index cbbe62d..68d59f6 100644 --- a/client/src/App.tsx +++ b/client/src/App.tsx @@ -1,6 +1,8 @@ -import { BrowserRouter, Routes, Route } from 'react-router-dom'; +import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom'; import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; -import MainPage from './pages/MainPage'; +import LandingPage from './pages/LandingPage'; +import DemoPage from './pages/DemoPage'; +import ClassifyPage from './pages/ClassifyPage'; import Login from './pages/Login'; import Logout from './pages/Logout'; import Callback from './pages/Callback'; @@ -13,10 +15,13 @@ function App() { - } /> + } /> + } /> + } /> } /> } /> } /> + } /> diff --git a/client/src/components/DemoInterface.tsx b/client/src/components/DemoInterface.tsx new file mode 100644 index 0000000..50561ce --- /dev/null +++ b/client/src/components/DemoInterface.tsx @@ -0,0 +1,368 @@ +import React, { useEffect, useState, useMemo } from 'react'; +import { Row, Col, Select, Divider, Spin, Card, List, Badge, Empty } from 'antd'; +import { LoadingOutlined, ExclamationCircleOutlined, PictureOutlined } from '@ant-design/icons'; +import { useInference } from '../hooks/useInference'; +import type { TokenInfo } from '../types/token'; +import type { TapisFile, InferenceModelMeta } from '../types/inference'; +import TapisImageViewer from './TapisImageViewer'; +import { getCuratedFileList } from '../utils/examples'; + +interface DemoInterfaceProps { + models: InferenceModelMeta[]; + tokenInfo: TokenInfo; + apiBasePath: string; +} + +interface AggregatedResult { + label: string; + count: number; +} + +const DemoInterface: React.FC = ({ models, tokenInfo, apiBasePath }) => { + const curatedFileList = getCuratedFileList(tokenInfo.tapisHost); + + // only supporting clip models on first pass + const clipModels = useMemo( + () => models.filter((m) => m.name.toLowerCase().includes('clip')), + [models] + ); + + // Split into curated sets of 5 + const curatedSets = useMemo(() => { + const sets: { value: string; label: string; files: TapisFile[] }[] = []; + for (let i = 0; i < curatedFileList.length; i += 5) { + const setNum = Math.floor(i / 5) + 1; + sets.push({ + value: `set${setNum}`, + label: `Curated Set #${setNum}`, + files: curatedFileList.slice(i, i + 5), + }); + } + return sets; + }, [curatedFileList]); + + const [selectedSet, setSelectedSet] = useState(undefined); + const [selectedModel, setSelectedModel] = useState(undefined); + const [selectedSensitivity, setSelectedSensitivity] = useState<'high' | 'medium' | 'low'>( + 'medium' + ); + const [aggregatedResults, setAggregatedResults] = useState([]); + + const inferenceMutation = useInference(tokenInfo.token, apiBasePath); + + // Get current files from selected set + const currentFiles = useMemo(() => { + if (!selectedSet) return []; + return curatedSets.find((s) => s.value === selectedSet)?.files || []; + }, [selectedSet, curatedSets]); + + // Auto-select first model when models are loaded (but NOT auto-select set) + useEffect(() => { + if (clipModels && clipModels.length > 0 && !selectedModel) { + setSelectedModel(clipModels[0].name); + } + }, [clipModels, selectedModel]); + + // Submit inference when: + // - User first selects both a model and image set + // - User changes model, set, or sensitivity after initial selection + useEffect(() => { + if (selectedModel && selectedSet && currentFiles.length > 0) { + inferenceMutation.mutate({ + files: currentFiles, + model: selectedModel, + sensitivity: selectedSensitivity, + }); + } + // excluding inferenceMutation from deps to avoid infinite loop + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [selectedModel, selectedSet, selectedSensitivity, currentFiles]); + + // Clear results when switching set or model + useEffect(() => { + setAggregatedResults([]); + }, [selectedSet, selectedModel]); + + // Aggregate inference results + useEffect(() => { + if (inferenceMutation.isSuccess && inferenceMutation.data) { + const labelCounts: Record = {}; + + // Access the nested array - use aggregated_results for CLIP + const response = inferenceMutation.data; + const results = response.aggregated_results || response.results || []; + + results.forEach((fileResult) => { + fileResult.predictions?.forEach((prediction) => { + labelCounts[prediction.label] = (labelCounts[prediction.label] || 0) + 1; + }); + }); + + const aggregated = Object.entries(labelCounts) + .map(([label, count]) => ({ label, count })) + .sort((a, b) => b.count - a.count); + + setAggregatedResults(aggregated); + } + }, [inferenceMutation.isSuccess, inferenceMutation.data]); + + const isReady = selectedModel && selectedSet; + const isLoading = inferenceMutation.isPending; + const isError = inferenceMutation.isError; + + return ( + <> + {/* Controls Section */} +
+ + +
Select a model
+ + + setSelectedSensitivity(val)} + options={[ + { label: 'High', value: 'high' } /* - More labels, may include noise */, + { label: 'Medium', value: 'medium' } /* default */, + { label: 'Low', value: 'low' } /* fewer labels, higher confidence? */, + ]} + /> + +
+ + +
+ + Select a curated image set +
+ + +