Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions lightspeed-stack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@ service:
llama_stack:
# Uses a remote llama-stack service
# The instance would have already been started with a llama-stack-run.yaml file
use_as_library_client: false
# use_as_library_client: false
# Alternative for "as library use"
# use_as_library_client: true
# library_client_config_path: <path-to-llama-stack-run.yaml-file>
url: http://llama-stack:8321
api_key: xyzzy
use_as_library_client: true
library_client_config_path: run.yaml
# url: http://llama-stack:8321
# api_key: xyzzy
user_data_collection:
feedback_enabled: true
feedback_storage: "/tmp/data/feedback"
Expand Down
223 changes: 129 additions & 94 deletions run.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,134 +15,169 @@ apis:
benchmarks: []
datasets: []
image_name: starter
# external_providers_dir: /opt/app-root/src/.llama/providers.d
external_providers_dir: ${env.EXTERNAL_PROVIDERS_DIR}

providers:
inference:
- provider_id: openai # This ID is a reference to 'providers.inference'
provider_type: remote::openai
config:
api_key: ${env.OPENAI_API_KEY}
allowed_models: ["${env.E2E_OPENAI_MODEL:=gpt-4o-mini}"]
- config: {}
provider_id: sentence-transformers
provider_type: inline::sentence-transformers
files:
- config:
metadata_store:
table_name: files_metadata
backend: sql_default
storage_dir: ~/.llama/storage/files
provider_id: meta-reference-files
provider_type: inline::localfs
safety:
- config:
excluded_categories: []
provider_id: llama-guard
provider_type: inline::llama-guard
scoring:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: llm-as-judge
provider_type: inline::llm-as-judge
config: {}
- provider_id: braintrust
provider_type: inline::braintrust
config:
openai_api_key: '********'
tool_runtime:
- config: {} # Enable the RAG tool
provider_id: rag-runtime
provider_type: inline::rag-runtime
vector_io:
- config: # Define the storage backend for RAG
persistence:
namespace: vector_io::faiss
backend: kv_default
provider_id: faiss
provider_type: inline::faiss
agents:
- config:
persistence:
agent_state:
namespace: agents_state
backend: kv_default
responses:
table_name: agents_responses
backend: sql_default
provider_id: meta-reference
provider_type: inline::meta-reference
batches:
- config:
kvstore:
namespace: batches_store
backend: kv_default
provider_id: reference
provider_type: inline::reference
datasetio:
- config:
kvstore:
namespace: huggingface_datasetio
backend: kv_default
provider_id: huggingface
provider_type: remote::huggingface
- config:
kvstore:
namespace: localfs_datasetio
backend: kv_default
provider_id: localfs
provider_type: inline::localfs
eval:
- config:
kvstore:
namespace: eval_store
backend: kv_default
provider_id: meta-reference
provider_type: inline::meta-reference
scoring_fns: []
server:
port: 8321
storage:
backends:
kv_default: # Define the storage backend type for RAG, in this case registry and RAG are unified i.e. information on registered resources (e.g. models, vector_stores) are saved together with the RAG chunks
kv_default:
type: kv_sqlite
db_path: ${env.KV_STORE_PATH:=~/.llama/storage/rag/kv_store.db}
sql_default:
type: sql_sqlite
db_path: ${env.SQL_STORE_PATH:=~/.llama/storage/sql_store.db}

stores:
metadata:
namespace: registry
backend: kv_default

inference:
table_name: inference_store
backend: sql_default
max_write_queue_size: 10000
num_writers: 4

conversations:
table_name: openai_conversations
backend: sql_default

prompts:
namespace: prompts
backend: kv_default

metadata_store:
type: sqlite
db_path: ~/.llama/storage/registry.db

inference_store:
type: sqlite
db_path: ~/.llama/storage/inference-store.db

conversations_store:
type: sqlite
db_path: ~/.llama/storage/conversations.db

providers:
inference:
- provider_id: openai
provider_type: remote::openai
config:
api_key: ${env.OPENAI_API_KEY}
allowed_models:
- gpt-4o-mini

- provider_id: sentence-transformers
provider_type: inline::sentence-transformers
config:
allowed_models:
- ${env.EMBEDDING_MODEL_DIR}

files:
- provider_id: meta-reference-files
provider_type: inline::localfs
config:
storage_dir: ~/.llama/storage/files
metadata_store:
table_name: files_metadata
backend: sql_default

safety:
- provider_id: llama-guard
provider_type: inline::llama-guard
config:
excluded_categories: []

scoring:
- provider_id: basic
provider_type: inline::basic
config: {}

- provider_id: llm-as-judge
provider_type: inline::llm-as-judge
config: {}

tool_runtime:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}

vector_io:
- provider_id: solr-vector
provider_type: remote::solr_vector_io
config:
solr_url: http://localhost:8983/solr
collection_name: portal-rag
vector_field: chunk_vector
content_field: chunk
embedding_dimension: 384
embedding_model: ${env.EMBEDDING_MODEL_DIR}
persistence:
namespace: portal-rag
backend: kv_default

agents:
- provider_id: meta-reference
provider_type: inline::meta-reference
config:
persistence:
agent_state:
namespace: agents_state
backend: kv_default
responses:
table_name: agents_responses
backend: sql_default

batches:
- provider_id: reference
provider_type: inline::reference
config:
kvstore:
namespace: batches_store
backend: kv_default

datasetio:
- provider_id: huggingface
provider_type: remote::huggingface
config:
kvstore:
namespace: huggingface_datasetio
backend: kv_default

- provider_id: localfs
provider_type: inline::localfs
config:
kvstore:
namespace: localfs_datasetio
backend: kv_default

registered_resources:
models: []
models:
- model_id: granite-embedding-30m
model_type: embedding
provider_id: sentence-transformers
provider_model_id: ${env.EMBEDDING_MODEL_DIR}
metadata:
embedding_dimension: 384

shields:
- shield_id: llama-guard
provider_id: llama-guard
provider_shield_id: openai/gpt-4o-mini
Comment on lines 162 to 165
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

rg -n "provider_shield_id" -n

Repository: lightspeed-core/lightspeed-stack

Length of output: 1306


🏁 Script executed:

sed -n '1,60p' dev-tools/test-configs/llama-stack-mcp-test.yaml

Repository: lightspeed-core/lightspeed-stack

Length of output: 1308


Invalid provider_shield_id for llama-guard

Lines 162-165: provider_shield_id is set to openai/gpt-4o-mini under the llama-guard provider; it must reference a llama-guard shield ID (e.g. llama-guard).

🤖 Prompt for AI Agents
In `@run.yaml` around lines 162 - 165, The YAML sets shield_id: llama-guard but
incorrectly uses provider_shield_id: openai/gpt-4o-mini; change
provider_shield_id to reference the correct llama-guard shield identifier (e.g.,
provider_shield_id: llama-guard) so the `shields` entry for the `llama-guard`
provider uses a valid llama-guard shield ID; update the `provider_shield_id`
value in the same `shields` mapping to match the `llama-guard` shield name.

vector_stores: []
vector_stores:
- vector_store_id: portal-rag
provider_id: solr-vector
embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR}
embedding_dimension: 384
datasets: []
scoring_fns: []
benchmarks: []
tool_groups:
- toolgroup_id: builtin::rag # Register the RAG tool
provider_id: rag-runtime
vector_stores:
default_provider_id: faiss
default_embedding_model: # Define the default embedding model for RAG
provider_id: sentence-transformers
model_id: nomic-ai/nomic-embed-text-v1.5
vector_store_id: portal-rag
provider_id: solr-vector
embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR}
embedding_dimension: 384
Comment on lines +166 to +181
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Remove duplicate vector_stores definition.

vector_stores is defined both under registered_resources (lines 166-170) and at the top level (lines 177-181) with identical content. This duplication could cause confusion and maintenance issues.

🔧 Remove duplicate block
   tool_groups:
   - toolgroup_id: builtin::rag # Register the RAG tool
     provider_id: rag-runtime
-vector_stores:
-  vector_store_id: portal-rag
-  provider_id: solr-vector
-  embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR}
-  embedding_dimension: 384
 safety:
   default_shield_id: llama-guard
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
vector_stores:
- vector_store_id: portal-rag
provider_id: solr-vector
embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR}
embedding_dimension: 384
datasets: []
scoring_fns: []
benchmarks: []
tool_groups:
- toolgroup_id: builtin::rag # Register the RAG tool
provider_id: rag-runtime
vector_stores:
default_provider_id: faiss
default_embedding_model: # Define the default embedding model for RAG
provider_id: sentence-transformers
model_id: nomic-ai/nomic-embed-text-v1.5
vector_store_id: portal-rag
provider_id: solr-vector
embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR}
embedding_dimension: 384
vector_stores:
- vector_store_id: portal-rag
provider_id: solr-vector
embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR}
embedding_dimension: 384
datasets: []
scoring_fns: []
benchmarks: []
tool_groups:
- toolgroup_id: builtin::rag # Register the RAG tool
provider_id: rag-runtime
🤖 Prompt for AI Agents
In `@run.yaml` around lines 166 - 181, There are two identical vector_stores
blocks (both defining vector_store_id: portal-rag, provider_id: solr-vector,
embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR},
embedding_dimension: 384); remove the duplicate so only one vector_stores
definition remains (delete the redundant block outside/inside the
registered_resources section as appropriate), ensure YAML indentation and
surrounding keys (registered_resources, tool_groups, etc.) remain valid after
removal and that vector_store_id: portal-rag stays in the intended scope.

safety:
default_shield_id: llama-guard
11 changes: 8 additions & 3 deletions src/app/endpoints/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,11 @@
router = APIRouter(tags=["query"])


# When OFFLINE is False, use reference_url for chunk source
# When OFFLINE is True, use parent_id for chunk source
# TODO: move this setting to a higher level configuration
OFFLINE = True

query_response: dict[int | str, dict[str, Any]] = {
200: QueryResponse.openapi_response(),
401: UnauthorizedResponse.openapi_response(
Expand Down Expand Up @@ -386,9 +391,9 @@ async def query_endpoint_handler_base( # pylint: disable=R0914
response = QueryResponse(
conversation_id=conversation_id,
response=summary.llm_response,
tool_calls=summary.tool_calls,
tool_results=summary.tool_results,
rag_chunks=summary.rag_chunks,
rag_chunks=rag_chunks_dict,
tool_calls=summary.tool_calls if summary.tool_calls else [],
tool_results=summary.tool_results if summary.tool_results else [],
referenced_documents=referenced_documents,
truncated=False, # TODO: implement truncation detection
input_tokens=token_usage.input_tokens,
Expand Down
Loading
Loading