From a303bec74a2749b335712a34fba31f8fa4d6a99e Mon Sep 17 00:00:00 2001 From: Lawrence Lane Date: Wed, 14 Jan 2026 12:37:40 -0500 Subject: [PATCH 1/8] fern conversion exploration Signed-off-by: Lawrence Lane --- docs/RFC-FERN-MIGRATION.md | 1371 +++++++++++++++++ fern/assets/favicon.png | Bin 0 -> 13659 bytes .../recipes/code_generation/text_to_python.py | 318 ++++ .../recipes/code_generation/text_to_sql.py | 323 ++++ .../recipes/qa_and_chat/multi_turn_chat.py | 204 +++ .../recipes/qa_and_chat/product_info_qa.py | 224 +++ fern/docs.yml | 124 ++ fern/fern.config.json | 4 + fern/pages/api-reference/analysis.mdx | 162 ++ fern/pages/api-reference/column-configs.mdx | 185 +++ fern/pages/api-reference/config-builder.mdx | 172 +++ .../api-reference/data-designer-config.mdx | 108 ++ fern/pages/api-reference/models.mdx | 106 ++ fern/pages/api-reference/processors.mdx | 112 ++ fern/pages/api-reference/sampler-params.mdx | 152 ++ fern/pages/api-reference/validator-params.mdx | 169 ++ fern/pages/concepts/columns.mdx | 162 ++ .../concepts/models/configure-with-cli.mdx | 150 ++ .../concepts/models/custom-model-settings.mdx | 237 +++ .../models/default-model-settings.mdx | 132 ++ .../concepts/models/inference-parameters.mdx | 153 ++ fern/pages/concepts/models/model-configs.mdx | 127 ++ .../pages/concepts/models/model-providers.mdx | 58 + fern/pages/concepts/person-sampling.mdx | 222 +++ fern/pages/concepts/processors.mdx | 160 ++ fern/pages/concepts/validators.mdx | 347 +++++ fern/pages/contributing.mdx | 241 +++ fern/pages/index.mdx | 53 + fern/pages/installation.mdx | 36 + fern/pages/plugins/available.mdx | 8 + fern/pages/plugins/example.mdx | 312 ++++ fern/pages/plugins/overview.mdx | 51 + fern/pages/quick-start.mdx | 93 ++ .../code-generation/text-to-python.mdx | 294 ++++ .../recipes/code-generation/text-to-sql.mdx | 336 ++++ fern/pages/recipes/index.mdx | 72 + .../recipes/qa-and-chat/multi-turn-chat.mdx | 217 +++ .../recipes/qa-and-chat/product-info-qa.mdx | 237 +++ fern/pages/tutorials/images-as-context.mdx | 282 ++++ fern/pages/tutorials/overview.mdx | 87 ++ fern/pages/tutorials/seeding-with-dataset.mdx | 257 +++ fern/pages/tutorials/structured-outputs.mdx | 316 ++++ fern/pages/tutorials/the-basics.mdx | 323 ++++ scripts/fern_migration/convert_admonitions.py | 43 + scripts/fern_migration/convert_tabs.py | 34 + scripts/fern_migration/notebook_to_mdx.py | 91 ++ 46 files changed, 8865 insertions(+) create mode 100644 docs/RFC-FERN-MIGRATION.md create mode 100644 fern/assets/favicon.png create mode 100644 fern/assets/recipes/code_generation/text_to_python.py create mode 100644 fern/assets/recipes/code_generation/text_to_sql.py create mode 100644 fern/assets/recipes/qa_and_chat/multi_turn_chat.py create mode 100644 fern/assets/recipes/qa_and_chat/product_info_qa.py create mode 100644 fern/docs.yml create mode 100644 fern/fern.config.json create mode 100644 fern/pages/api-reference/analysis.mdx create mode 100644 fern/pages/api-reference/column-configs.mdx create mode 100644 fern/pages/api-reference/config-builder.mdx create mode 100644 fern/pages/api-reference/data-designer-config.mdx create mode 100644 fern/pages/api-reference/models.mdx create mode 100644 fern/pages/api-reference/processors.mdx create mode 100644 fern/pages/api-reference/sampler-params.mdx create mode 100644 fern/pages/api-reference/validator-params.mdx create mode 100644 fern/pages/concepts/columns.mdx create mode 100644 fern/pages/concepts/models/configure-with-cli.mdx create mode 100644 fern/pages/concepts/models/custom-model-settings.mdx create mode 100644 fern/pages/concepts/models/default-model-settings.mdx create mode 100644 fern/pages/concepts/models/inference-parameters.mdx create mode 100644 fern/pages/concepts/models/model-configs.mdx create mode 100644 fern/pages/concepts/models/model-providers.mdx create mode 100644 fern/pages/concepts/person-sampling.mdx create mode 100644 fern/pages/concepts/processors.mdx create mode 100644 fern/pages/concepts/validators.mdx create mode 100644 fern/pages/contributing.mdx create mode 100644 fern/pages/index.mdx create mode 100644 fern/pages/installation.mdx create mode 100644 fern/pages/plugins/available.mdx create mode 100644 fern/pages/plugins/example.mdx create mode 100644 fern/pages/plugins/overview.mdx create mode 100644 fern/pages/quick-start.mdx create mode 100644 fern/pages/recipes/code-generation/text-to-python.mdx create mode 100644 fern/pages/recipes/code-generation/text-to-sql.mdx create mode 100644 fern/pages/recipes/index.mdx create mode 100644 fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx create mode 100644 fern/pages/recipes/qa-and-chat/product-info-qa.mdx create mode 100644 fern/pages/tutorials/images-as-context.mdx create mode 100644 fern/pages/tutorials/overview.mdx create mode 100644 fern/pages/tutorials/seeding-with-dataset.mdx create mode 100644 fern/pages/tutorials/structured-outputs.mdx create mode 100644 fern/pages/tutorials/the-basics.mdx create mode 100644 scripts/fern_migration/convert_admonitions.py create mode 100644 scripts/fern_migration/convert_tabs.py create mode 100644 scripts/fern_migration/notebook_to_mdx.py diff --git a/docs/RFC-FERN-MIGRATION.md b/docs/RFC-FERN-MIGRATION.md new file mode 100644 index 00000000..6964b201 --- /dev/null +++ b/docs/RFC-FERN-MIGRATION.md @@ -0,0 +1,1371 @@ +# RFC: Migration from MkDocs to Fern Docs + +**Status:** Draft +**Author:** [Author Name] +**Owner:** [Owner Name] +**Created:** 2026-01-14 +**Last Updated:** 2026-01-14 +**Target Completion:** [YYYY-MM-DD] + +--- + +## Summary + +This RFC proposes migrating the NeMo Data Designer documentation from MkDocs Material to [Fern Docs](https://buildwithfern.com/learn/docs/getting-started/overview). The migration will be performed incrementally by creating a new `docs-fern/` directory, preserving all existing content while adapting to Fern's component system. + +## Motivation + +This migration is **mandated** as part of NVIDIA's documentation platform standardization initiative. + +**Additional benefits:** + +- **Modern documentation platform**: Fern offers AI-native features including Ask Fern and auto-generated MCP servers +- **Enhanced API documentation**: Better support for API reference documentation from OpenAPI specs +- **Improved developer experience**: Rich component library with interactive elements +- **Self-hosting options**: Flexible deployment for enterprise requirements + +## Scope + +### In Scope + +- 1:1 content migration (no content changes) +- Component mapping from MkDocs Material to Fern equivalents +- Navigation structure preservation +- Code reference documentation migration + +### Out of Scope + +- Content rewrites or restructuring +- New features or sections +- Removal of existing documentation + +--- + +## Current Documentation Inventory + +### File Structure + +``` +docs/ +├── index.md # Home page +├── installation.md # Installation guide +├── quick-start.md # Quick start tutorial +├── CONTRIBUTING.md # Contribution guide +├── concepts/ +│ ├── columns.md +│ ├── validators.md +│ ├── processors.md +│ ├── person_sampling.md +│ └── models/ +│ ├── default-model-settings.md +│ ├── custom-model-settings.md +│ ├── configure-model-settings-with-the-cli.md +│ ├── model-providers.md +│ ├── model-configs.md +│ └── inference-parameters.md +├── recipes/ +│ ├── cards.md +│ ├── code_generation/ +│ │ ├── text_to_python.md +│ │ └── text_to_sql.md +│ └── qa_and_chat/ +│ ├── product_info_qa.md +│ └── multi_turn_chat.md +├── plugins/ +│ ├── overview.md +│ ├── example.md +│ └── available.md +├── code_reference/ # Auto-generated API docs +│ ├── models.md +│ ├── column_configs.md +│ ├── config_builder.md +│ ├── data_designer_config.md +│ ├── sampler_params.md +│ ├── validator_params.md +│ ├── processors.md +│ └── analysis.md +├── colab_notebooks/ # Jupyter notebooks +│ ├── 1-the-basics.ipynb +│ ├── 2-structured-outputs-and-jinja-expressions.ipynb +│ ├── 3-seeding-with-a-dataset.ipynb +│ └── 4-providing-images-as-context.ipynb +├── assets/ +│ └── recipes/ # Downloadable code files +├── css/ # Custom styles +├── js/ # Custom scripts +└── overrides/ # MkDocs template overrides +``` + +### Current Navigation Structure + +```yaml +nav: + - Getting Started: + - Welcome: index.md + - Installation: installation.md + - Quick Start: quick-start.md + - Contributing: CONTRIBUTING.md + - Concepts: + - Models: (6 sub-pages) + - Columns: concepts/columns.md + - Validators: concepts/validators.md + - Processors: concepts/processors.md + - Person Sampling: concepts/person_sampling.md + - Tutorials: + - Overview + 4 Jupyter notebooks + - Recipes: + - Recipe Cards + 4 recipes + - Plugins: + - 3 pages + - Code Reference: + - 8 auto-generated API docs +``` + +--- + +## Component Mapping + +### MkDocs → Fern Component Equivalents + +Reference: [Fern Components Overview](https://buildwithfern.com/learn/docs/writing-content/components/overview) + +| MkDocs Feature | Current Syntax | Fern Equivalent | Notes | +|----------------|----------------|-----------------|-------| +| **Admonitions** | `!!! note "Title"` | ``, ``, ``, `` | See [Callouts](#1-admonitions--callouts) | +| **Tabbed Content** | `=== "Tab 1"` | `` + `` | See [Tabs](#2-tabbed-content) | +| **Code Blocks** | ` ```python ` | ` ```python ` | Direct compatibility | +| **Code Snippets** | `--8<-- "path"` | `` with `src` | File embedding | +| **Grid Cards** | `
` | `` + `` | See [Cards](#3-grid-cards) | +| **Icons** | `:material-xxx:` | Fern icons or inline SVG | Limited support | +| **Download Links** | `{ .md-button download=}` | Standard markdown links | Simplified | +| **API Docs** | `::: module.path` | Manual or OpenAPI import | See [API Reference](#4-api-reference) | +| **Jupyter Notebooks** | `.ipynb` files | Convert to MDX or embed | See [Notebooks](#5-jupyter-notebooks) | +| **Versioning** | Mike plugin | Fern versioning | Built-in support | + +--- + +## Detailed Component Migrations + +### 1. Admonitions → Callouts + +**Current MkDocs syntax:** + +```markdown +!!! note "The Declarative Approach" + Columns are **declarative specifications**. You describe *what* you want... + +!!! tip "Conditional Sampling" + Samplers support **conditional parameters**... + +!!! question "New to Data Designer?" + Recipes provide working code... + +!!! warning "Important" + This action cannot be undone. +``` + +**Fern equivalent:** + +```mdx + +Columns are **declarative specifications**. You describe *what* you want... + + + +Samplers support **conditional parameters**... + + + +Recipes provide working code... + + + +This action cannot be undone. + +``` + +**Migration mapping:** + +| MkDocs Admonition | Fern Callout | +|-------------------|--------------| +| `!!! note` | `` | +| `!!! tip` | `` | +| `!!! info` | `` | +| `!!! warning` | `` | +| `!!! question` | `` | +| `!!! danger` | `` | + +### 2. Tabbed Content + +**Current MkDocs syntax (installation.md):** + +```markdown +=== "pip" + + ```bash + pip install data-designer + ``` + +=== "uv" + + ```bash + uv add data-designer + ``` +``` + +**Fern equivalent:** + +```mdx + + + ```bash + pip install data-designer + ``` + + + ```bash + uv add data-designer + ``` + + +``` + +### 3. Grid Cards + +**Current MkDocs syntax (recipes/cards.md):** + +```markdown +
+ +- :material-snake:{ .lg .middle } **Text to Python** + + Generate a dataset of natural language instructions... + + --- + + **Demonstrates:** + - Python code generation + - Python code validation + + --- + + [:material-book-open-page-variant: View Recipe](code_generation/text_to_python.md){ .md-button } + +
+``` + +**Fern equivalent:** + +```mdx + + + Generate a dataset of natural language instructions... + + **Demonstrates:** + - Python code generation + - Python code validation + + +``` + +### 4. API Reference (mkdocstrings) + +**Current MkDocs syntax (code_reference/models.md):** + +```markdown +# Models + +The `models` module defines configuration objects... + +::: data_designer.config.models +``` + +**Fern options:** + +**Option A: Manual Documentation** +Convert auto-generated docs to manually written MDX with code examples. + +**Option B: OpenAPI Integration** +If the API has an OpenAPI spec, use Fern's native API reference generation. + +**Option C: TypeDoc/PyDoc Integration** +Use Fern's SDK documentation features if available. + +**Recommendation:** Start with Option A (manual) for initial migration, evaluate automation options post-migration. + +### 5. Jupyter Notebooks + +**Current approach:** `mkdocs-jupyter` plugin renders `.ipynb` files directly. + +**Fern options:** + +**Option A: Convert to MDX** +Convert notebooks to MDX files with code blocks and output screenshots. + +**Option B: Embed as iframes** +Host notebooks on Colab/GitHub and embed links. + +**Option C: Use Fern's code playground** +If available, use interactive code features. + +**Recommendation:** Convert to MDX with static code blocks and link to Colab for interactive experience (preserves current Colab badge functionality). + +### 6. Code Snippets (pymdownx.snippets) + +**Current MkDocs syntax:** + +```markdown +```python +--8<-- "assets/recipes/code_generation/text_to_python.py" +``` +``` + +**Fern equivalent:** + +```mdx + +``` + +Or inline the code directly if file embedding isn't supported. + +--- + +## Proposed Directory Structure + +``` +docs-fern/ +├── fern.config.json # Fern configuration +├── docs.yml # Navigation and settings +├── pages/ +│ ├── index.mdx # Home page +│ ├── installation.mdx +│ ├── quick-start.mdx +│ ├── contributing.mdx +│ ├── concepts/ +│ │ ├── columns.mdx +│ │ ├── validators.mdx +│ │ ├── processors.mdx +│ │ ├── person-sampling.mdx +│ │ └── models/ +│ │ ├── default-model-settings.mdx +│ │ ├── custom-model-settings.mdx +│ │ ├── configure-with-cli.mdx +│ │ ├── model-providers.mdx +│ │ ├── model-configs.mdx +│ │ └── inference-parameters.mdx +│ ├── tutorials/ +│ │ ├── overview.mdx +│ │ ├── the-basics.mdx +│ │ ├── structured-outputs.mdx +│ │ ├── seeding-with-dataset.mdx +│ │ └── images-as-context.mdx +│ ├── recipes/ +│ │ ├── index.mdx # Recipe cards +│ │ ├── code-generation/ +│ │ │ ├── text-to-python.mdx +│ │ │ └── text-to-sql.mdx +│ │ └── qa-and-chat/ +│ │ ├── product-info-qa.mdx +│ │ └── multi-turn-chat.mdx +│ ├── plugins/ +│ │ ├── overview.mdx +│ │ ├── example.mdx +│ │ └── available.mdx +│ └── api-reference/ +│ ├── models.mdx +│ ├── column-configs.mdx +│ ├── config-builder.mdx +│ ├── data-designer-config.mdx +│ ├── sampler-params.mdx +│ ├── validator-params.mdx +│ ├── processors.mdx +│ └── analysis.mdx +├── assets/ +│ ├── favicon.png +│ └── recipes/ # Downloadable code files +│ ├── code_generation/ +│ └── qa_and_chat/ +└── styles/ + └── custom.css # Custom styling (if needed) +``` + +--- + +## URL Redirect Mapping + +To preserve existing bookmarks and SEO, all old URLs must redirect to their new locations. + +### Redirect Rules + +| Old MkDocs URL | New Fern URL | +|----------------|--------------| +| `/` | `/docs` | +| `/installation/` | `/docs/installation` | +| `/quick-start/` | `/docs/quick-start` | +| `/CONTRIBUTING/` | `/docs/contributing` | +| `/concepts/columns/` | `/docs/concepts/columns` | +| `/concepts/validators/` | `/docs/concepts/validators` | +| `/concepts/processors/` | `/docs/concepts/processors` | +| `/concepts/person_sampling/` | `/docs/concepts/person-sampling` | +| `/concepts/models/default-model-settings/` | `/docs/concepts/models/default-model-settings` | +| `/concepts/models/custom-model-settings/` | `/docs/concepts/models/custom-model-settings` | +| `/concepts/models/configure-model-settings-with-the-cli/` | `/docs/concepts/models/configure-with-cli` | +| `/concepts/models/model-providers/` | `/docs/concepts/models/model-providers` | +| `/concepts/models/model-configs/` | `/docs/concepts/models/model-configs` | +| `/concepts/models/inference-parameters/` | `/docs/concepts/models/inference-parameters` | +| `/tutorials/` | `/docs/tutorials/overview` | +| `/recipes/cards/` | `/docs/recipes` | +| `/recipes/code_generation/text_to_python/` | `/docs/recipes/code-generation/text-to-python` | +| `/recipes/code_generation/text_to_sql/` | `/docs/recipes/code-generation/text-to-sql` | +| `/recipes/qa_and_chat/product_info_qa/` | `/docs/recipes/qa-and-chat/product-info-qa` | +| `/recipes/qa_and_chat/multi_turn_chat/` | `/docs/recipes/qa-and-chat/multi-turn-chat` | +| `/plugins/overview/` | `/docs/plugins/overview` | +| `/plugins/example/` | `/docs/plugins/example` | +| `/plugins/available/` | `/docs/plugins/available` | +| `/code_reference/*` | `/api/*` | + +### Implementation + +**Option A: Fern redirects configuration** (if supported) + +```yaml +# In docs.yml +redirects: + - from: /concepts/person_sampling + to: /docs/concepts/person-sampling + # ... additional redirects +``` + +**Option B: Hosting platform redirects** + +For Netlify (`_redirects` file): +``` +/concepts/person_sampling/* /docs/concepts/person-sampling/:splat 301 +/code_reference/* /api/:splat 301 +``` + +For nginx: +```nginx +rewrite ^/concepts/person_sampling(.*)$ /docs/concepts/person-sampling$1 permanent; +rewrite ^/code_reference/(.*)$ /api/$1 permanent; +``` + +--- + +## Configuration Files + +### fern.config.json + +```json +{ + "organization": "nvidia-nemo", + "version": "1.0.0" +} +``` + +### docs.yml + +```yaml +instances: + - url: https://datadesigner.docs.nvidia.com + +title: NeMo Data Designer + +tabs: + docs: + display-name: Documentation + slug: docs + api: + display-name: API Reference + slug: api + +navigation: + - tab: docs + layout: + - section: Getting Started + contents: + - page: Welcome + path: pages/index.mdx + - page: Installation + path: pages/installation.mdx + - page: Quick Start + path: pages/quick-start.mdx + - page: Contributing + path: pages/contributing.mdx + - section: Concepts + contents: + - section: Models + contents: + - page: Default Model Settings + path: pages/concepts/models/default-model-settings.mdx + - page: Custom Model Settings + path: pages/concepts/models/custom-model-settings.mdx + - page: Configure with CLI + path: pages/concepts/models/configure-with-cli.mdx + - page: Model Providers + path: pages/concepts/models/model-providers.mdx + - page: Model Configs + path: pages/concepts/models/model-configs.mdx + - page: Inference Parameters + path: pages/concepts/models/inference-parameters.mdx + - page: Columns + path: pages/concepts/columns.mdx + - page: Validators + path: pages/concepts/validators.mdx + - page: Processors + path: pages/concepts/processors.mdx + - page: Person Sampling + path: pages/concepts/person-sampling.mdx + - section: Tutorials + contents: + - page: Overview + path: pages/tutorials/overview.mdx + - page: The Basics + path: pages/tutorials/the-basics.mdx + - page: Structured Outputs + path: pages/tutorials/structured-outputs.mdx + - page: Seeding with a Dataset + path: pages/tutorials/seeding-with-dataset.mdx + - page: Images as Context + path: pages/tutorials/images-as-context.mdx + - section: Recipes + contents: + - page: Recipe Cards + path: pages/recipes/index.mdx + - section: Code Generation + contents: + - page: Text to Python + path: pages/recipes/code-generation/text-to-python.mdx + - page: Text to SQL + path: pages/recipes/code-generation/text-to-sql.mdx + - section: QA and Chat + contents: + - page: Product Info QA + path: pages/recipes/qa-and-chat/product-info-qa.mdx + - page: Multi-Turn Chat + path: pages/recipes/qa-and-chat/multi-turn-chat.mdx + - section: Plugins + contents: + - page: Overview + path: pages/plugins/overview.mdx + - page: Example Plugin + path: pages/plugins/example.mdx + - page: Available Plugins + path: pages/plugins/available.mdx + - tab: api + layout: + - section: API Reference + contents: + - page: Models + path: pages/api-reference/models.mdx + - page: Column Configs + path: pages/api-reference/column-configs.mdx + - page: Config Builder + path: pages/api-reference/config-builder.mdx + - page: Data Designer Config + path: pages/api-reference/data-designer-config.mdx + - page: Sampler Params + path: pages/api-reference/sampler-params.mdx + - page: Validator Params + path: pages/api-reference/validator-params.mdx + - page: Processors + path: pages/api-reference/processors.mdx + - page: Analysis + path: pages/api-reference/analysis.mdx + +colors: + accent-primary: + dark: "#76B900" + light: "#76B900" + background: + dark: "#1a1a1a" + light: "#ffffff" + +logo: + dark: assets/favicon.png + light: assets/favicon.png + +favicon: assets/favicon.png + +navbar-links: + - type: github + value: https://github.com/NVIDIA-NeMo/DataDesigner +``` + +--- + +## Migration Plan + +### Phase 1: Setup (1 day) + +1. Create `docs-fern/` directory structure +2. Initialize Fern configuration files +3. Set up local development environment +4. Verify Fern CLI works (`fern check`, `fern generate`) + +### Phase 2: Core Pages Migration (2-3 days) + +1. Migrate Getting Started section + - `index.md` → `index.mdx` + - `installation.md` → `installation.mdx` + - `quick-start.md` → `quick-start.mdx` + - `CONTRIBUTING.md` → `contributing.mdx` + +2. Migrate Concepts section (6 model pages + 4 concept pages) + +3. Migrate Plugins section (3 pages) + +### Phase 3: Complex Content Migration (3-4 days) + +1. Convert Jupyter notebooks to MDX + - Extract code cells as code blocks + - Convert markdown cells directly + - Add Colab badges/links + +2. Migrate Recipes section + - Convert grid cards to Fern Cards + - Migrate recipe content pages + - Handle code snippet embedding + +### Phase 4: API Reference Migration (2-3 days) + +1. Extract API documentation from mkdocstrings output +2. Manually format as MDX pages +3. Add code examples and cross-references + +### Phase 5: Styling and Polish (1-2 days) + +1. Apply NVIDIA branding (green accent color) +2. Configure navigation and tabs +3. Add favicon and logos +4. Test responsive design + +### Phase 6: Testing and Validation (1-2 days) + +1. Review all pages for rendering issues +2. Verify all links work +3. Test navigation flow +4. Compare against original docs for completeness + +--- + +## CI/CD Pipeline Changes + +### Current MkDocs Pipeline + +```yaml +# Current workflow (to be replaced) +- name: Build docs + run: mkdocs build + +- name: Deploy docs + run: mkdocs gh-deploy +``` + +### New Fern Pipeline + +```yaml +# .github/workflows/docs.yml +name: Documentation + +on: + push: + branches: [main] + paths: + - 'docs-fern/**' + pull_request: + paths: + - 'docs-fern/**' + +jobs: + docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install Fern CLI + run: npm install -g fern-api + + - name: Validate Fern config + run: fern check + working-directory: docs-fern + + - name: Generate docs (PR preview) + if: github.event_name == 'pull_request' + run: fern generate --docs --preview + working-directory: docs-fern + env: + FERN_TOKEN: ${{ secrets.FERN_TOKEN }} + + - name: Deploy docs (production) + if: github.ref == 'refs/heads/main' + run: fern generate --docs + working-directory: docs-fern + env: + FERN_TOKEN: ${{ secrets.FERN_TOKEN }} +``` + +### Required Secrets + +| Secret | Description | +|--------|-------------| +| `FERN_TOKEN` | API token from Fern dashboard for deployments | + +### Local Development + +```bash +# Install Fern CLI +npm install -g fern-api + +# Navigate to docs directory +cd docs-fern + +# Validate configuration +fern check + +# Local preview (starts dev server) +fern docs dev + +# Generate static output +fern generate --docs +``` + +--- + +## Deprecation Timeline + +### Week 1-2: Parallel Operation + +- `docs-fern/` is the primary documentation source +- `docs/` remains for reference and rollback capability +- Both directories exist in repository +- MkDocs config (`mkdocs.yml`) remains but is not used in CI + +### Week 3: Soft Deprecation + +- Remove MkDocs from CI/CD pipeline +- Add deprecation notice to `docs/README.md`: + ```markdown + > ⚠️ **DEPRECATED**: This directory is no longer maintained. + > Documentation has moved to `docs-fern/`. + > This directory will be removed on [DATE]. + ``` +- Update `CONTRIBUTING.md` to reference new docs location + +### Week 4: Hard Deprecation + +- Delete `docs/` directory +- Delete `mkdocs.yml` +- Remove MkDocs dependencies from `pyproject.toml`: + - `mkdocs` + - `mkdocs-material` + - `mkdocs-jupyter` + - `mkdocstrings` + - `mkdocstrings-python` +- Update `.gitignore` to remove MkDocs artifacts (`site/`) +- Archive final MkDocs state in git tag: `mkdocs-final` + +### Post-Migration Cleanup + +- Remove custom CSS (`docs/css/`) +- Remove custom JS (`docs/js/`) +- Remove template overrides (`docs/overrides/`) +- Update README.md documentation links + +--- + +## Risks and Mitigations + +| Risk | Impact | Likelihood | Mitigation | Owner | +|------|--------|------------|------------|-------| +| API reference quality loss | High | Medium | Document Python APIs manually with curated examples; add to PR checklist | [Owner] | +| Notebook interactivity loss | Medium | Low | Link to Colab badges at top of each tutorial; keep `.ipynb` files hosted | [Owner] | +| Icon support gaps | Low | High | Replace `:material-xxx:` with emoji or text labels; document in style guide | [Owner] | +| Custom CSS incompatibility | Low | Medium | Use Fern's built-in components; minimal custom CSS only if essential | [Owner] | +| Build/deploy workflow breaks | Medium | Medium | Test CI/CD in separate branch before merging; keep MkDocs as fallback for 2 weeks | [Owner] | +| SEO ranking drop | Medium | Medium | Implement all redirects before deprecating old URLs; submit sitemap to search engines | [Owner] | +| Broken links post-migration | Medium | High | Run automated link checker before go-live; fix all broken links | [Owner] | + +--- + +## Common Pitfalls & Troubleshooting + +### Pitfall 1: Nested Admonitions + +MkDocs supports nested admonitions; Fern callouts do not nest well. + +**Problem:** +```markdown +!!! note + Some text + !!! warning + Nested warning +``` + +**Solution:** Flatten to sequential callouts: +```mdx + +Some text + + + +Nested warning (now separate) + +``` + +### Pitfall 2: Code Blocks Inside Tabs + +Indentation is critical. Fern expects proper nesting. + +**Problem (incorrect indentation):** +```mdx + + +```python +code +``` + + +``` + +**Solution (correct indentation):** +```mdx + + + ```python + code + ``` + + +``` + +### Pitfall 3: MkDocs-Specific Syntax + +These MkDocs features have no direct Fern equivalent: + +| MkDocs Syntax | Action | +|---------------|--------| +| `{ .md-button }` | Remove, use standard links | +| `{ .annotate }` | Remove, use inline notes | +| `[TOC]` | Remove, Fern auto-generates TOC | +| `--8<-- "file"` | Inline the code or use `` | +| `::: module.path` | Convert to manual documentation | + +### Pitfall 4: Image Paths + +MkDocs resolves images relative to the markdown file; Fern resolves from project root. + +**MkDocs:** +```markdown +![Alt](../assets/image.png) +``` + +**Fern:** +```mdx +![Alt](/assets/image.png) +``` + +### Pitfall 5: Front Matter + +Fern uses YAML front matter for page metadata. Add to each file: + +```mdx +--- +title: Page Title +description: Optional description for SEO +--- +``` + +### Troubleshooting Commands + +```bash +# Validate all Fern configuration +fern check + +# See detailed errors +fern check --log-level debug + +# Preview locally before deploying +fern docs dev + +# Check for broken internal links +grep -r '](/[^)]*\.mdx)' docs-fern/pages/ | grep -v '^#' +``` + +--- + +## Rollback Plan + +If critical issues are discovered post-migration, follow this rollback procedure: + +### Trigger Conditions + +Initiate rollback if any of these occur within 2 weeks of go-live: + +- [ ] >10% of pages have rendering issues +- [ ] Search functionality broken +- [ ] CI/CD pipeline repeatedly failing +- [ ] Critical content missing or incorrect +- [ ] Stakeholder requests rollback + +### Rollback Steps + +**Step 1: Restore MkDocs CI/CD (15 minutes)** + +```yaml +# Revert .github/workflows/docs.yml to MkDocs version +git revert +git push origin main +``` + +**Step 2: Restore DNS/Hosting (if changed)** + +Point documentation URL back to MkDocs deployment location. + +**Step 3: Communicate** + +Notify team: +> Documentation rollback initiated due to [REASON]. +> MkDocs docs restored at [URL]. +> Fern migration paused pending [ISSUE] resolution. + +**Step 4: Preserve Fern Work** + +```bash +# Don't delete - branch and preserve +git checkout -b fern-migration-paused +git push origin fern-migration-paused +``` + +**Step 5: Post-Mortem** + +Document: +- What triggered the rollback +- Root cause analysis +- Required fixes before retry +- Updated timeline + +### Rollback Window + +- **Weeks 1-2**: Full rollback capability (MkDocs still in repo) +- **Week 3+**: Rollback requires restoring from `mkdocs-final` tag +- **Week 4+**: Rollback requires significant effort (MkDocs deleted) + +--- + +## Pre-Flight Checklist + +Before starting migration, ensure: + +- [ ] Fern account created and `FERN_TOKEN` obtained +- [ ] Hosting decision finalized (Section: Decisions #4) +- [ ] Timeline approved and dates filled in (Section: Decisions #5) +- [ ] Owner assigned in RFC header +- [ ] Team notified of upcoming changes +- [ ] Current docs snapshot archived (`git tag mkdocs-snapshot-pre-migration`) + +--- + +## Conversion Checklist + +### File-by-File Migration Tracker + +Use this checklist during Phase 2-4 to track progress: + +#### Getting Started +- [ ] `index.md` → `pages/index.mdx` +- [ ] `installation.md` → `pages/installation.mdx` +- [ ] `quick-start.md` → `pages/quick-start.mdx` +- [ ] `CONTRIBUTING.md` → `pages/contributing.mdx` + +#### Concepts - Models +- [ ] `concepts/models/default-model-settings.md` → `pages/concepts/models/default-model-settings.mdx` +- [ ] `concepts/models/custom-model-settings.md` → `pages/concepts/models/custom-model-settings.mdx` +- [ ] `concepts/models/configure-model-settings-with-the-cli.md` → `pages/concepts/models/configure-with-cli.mdx` +- [ ] `concepts/models/model-providers.md` → `pages/concepts/models/model-providers.mdx` +- [ ] `concepts/models/model-configs.md` → `pages/concepts/models/model-configs.mdx` +- [ ] `concepts/models/inference-parameters.md` → `pages/concepts/models/inference-parameters.mdx` + +#### Concepts - Other +- [ ] `concepts/columns.md` → `pages/concepts/columns.mdx` +- [ ] `concepts/validators.md` → `pages/concepts/validators.mdx` +- [ ] `concepts/processors.md` → `pages/concepts/processors.mdx` +- [ ] `concepts/person_sampling.md` → `pages/concepts/person-sampling.mdx` + +#### Tutorials (Notebook Conversion) +- [ ] `colab_notebooks/1-the-basics.ipynb` → `pages/tutorials/the-basics.mdx` +- [ ] `colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb` → `pages/tutorials/structured-outputs.mdx` +- [ ] `colab_notebooks/3-seeding-with-a-dataset.ipynb` → `pages/tutorials/seeding-with-dataset.mdx` +- [ ] `colab_notebooks/4-providing-images-as-context.ipynb` → `pages/tutorials/images-as-context.mdx` +- [ ] Create `pages/tutorials/overview.mdx` (new index page) + +#### Recipes +- [ ] `recipes/cards.md` → `pages/recipes/index.mdx` +- [ ] `recipes/code_generation/text_to_python.md` → `pages/recipes/code-generation/text-to-python.mdx` +- [ ] `recipes/code_generation/text_to_sql.md` → `pages/recipes/code-generation/text-to-sql.mdx` +- [ ] `recipes/qa_and_chat/product_info_qa.md` → `pages/recipes/qa-and-chat/product-info-qa.mdx` +- [ ] `recipes/qa_and_chat/multi_turn_chat.md` → `pages/recipes/qa-and-chat/multi-turn-chat.mdx` + +#### Plugins +- [ ] `plugins/overview.md` → `pages/plugins/overview.mdx` +- [ ] `plugins/example.md` → `pages/plugins/example.mdx` +- [ ] `plugins/available.md` → `pages/plugins/available.mdx` + +#### API Reference +- [ ] `code_reference/models.md` → `pages/api-reference/models.mdx` +- [ ] `code_reference/column_configs.md` → `pages/api-reference/column-configs.mdx` +- [ ] `code_reference/config_builder.md` → `pages/api-reference/config-builder.mdx` +- [ ] `code_reference/data_designer_config.md` → `pages/api-reference/data-designer-config.mdx` +- [ ] `code_reference/sampler_params.md` → `pages/api-reference/sampler-params.mdx` +- [ ] `code_reference/validator_params.md` → `pages/api-reference/validator-params.mdx` +- [ ] `code_reference/processors.md` → `pages/api-reference/processors.mdx` +- [ ] `code_reference/analysis.md` → `pages/api-reference/analysis.mdx` + +#### Assets +- [ ] Copy `assets/palette-favicon.png` → `assets/favicon.png` +- [ ] Copy `assets/recipes/` → `assets/recipes/` + +--- + +## Success Criteria + +- [ ] All existing documentation pages migrated (32 pages total) +- [ ] Navigation structure preserved +- [ ] All code examples render correctly +- [ ] All internal links functional (automated check) +- [ ] All external links functional (automated check) +- [ ] NVIDIA branding applied (green accent: #76B900) +- [ ] Local development workflow documented +- [ ] CI/CD pipeline deployed and tested +- [ ] URL redirects configured and tested +- [ ] PR preview deployments working +- [ ] Page load time < 3 seconds + +--- + +## Decisions + +The following decisions have been made to ensure smooth execution: + +### 1. API Reference Approach + +**Decision:** Manual documentation with code examples (Option A) + +**Rationale:** +- Fastest path to migration completion +- Allows curated examples rather than raw docstring dumps +- Fern's Python SDK autodoc is not mature enough for our needs + +**Maintenance commitment:** +- API reference pages will be updated alongside code changes +- Add to PR checklist: "Update API docs if public interfaces changed" +- Revisit automation options in Q2 2026 + +### 2. Notebook Handling + +**Decision:** Convert to MDX with Colab links + +**Implementation:** +- Extract code cells as fenced code blocks +- Convert markdown cells directly to MDX +- Preserve Colab badge at top of each tutorial +- Link to hosted `.ipynb` files for interactive experience + +**Example header for converted notebooks:** +```mdx +--- +title: The Basics +--- + + +Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/1-the-basics.ipynb). + +``` + +### 3. Versioning + +**Decision:** Single version initially, evaluate multi-version post-launch + +**Rationale:** +- Current MkDocs setup is single-version +- No immediate need for versioned docs +- Fern supports versioning when needed + +### 4. Hosting + +**Decision:** [Fern-hosted | Self-hosted] _(fill in)_ + +**If Fern-hosted:** +- URL: `https://datadesigner.docs.buildwithfern.com` or custom domain +- Zero infrastructure management +- Built-in CDN and SSL + +**If self-hosted:** +- Deploy to existing NVIDIA infrastructure +- Use `fern generate --docs` to produce static output +- Configure redirects on hosting platform + +### 5. Timeline + +**Decision:** [X weeks] from RFC approval + +| Milestone | Target Date | +|-----------|-------------| +| Phase 1 (Setup) complete | [DATE] | +| Phase 2-3 (Content migration) complete | [DATE] | +| Phase 4 (API reference) complete | [DATE] | +| Phase 5-6 (Polish & testing) complete | [DATE] | +| Go-live | [DATE] | +| Old docs deprecated | [DATE + 2 weeks] | + +--- + +## Helper Scripts + +The following scripts can assist with automated conversion: + +### 1. Admonition Converter + +```python +#!/usr/bin/env python3 +"""Convert MkDocs admonitions to Fern callouts.""" +import re +import sys + +ADMONITION_MAP = { + "note": "Note", + "tip": "Tip", + "info": "Info", + "warning": "Warning", + "danger": "Warning", + "question": "Info", + "example": "Info", + "abstract": "Note", + "success": "Tip", + "failure": "Warning", + "bug": "Warning", +} + +def convert_admonitions(content: str) -> str: + """Convert !!! admonitions to components.""" + pattern = r'!!! (\w+)(?: "([^"]*)")?\n((?: .*\n?)*)' + + def replace(match: re.Match) -> str: + admon_type = match.group(1).lower() + title = match.group(2) or "" + body = match.group(3) + # Remove 4-space indent from body + body = re.sub(r'^ ', '', body, flags=re.MULTILINE).strip() + fern_type = ADMONITION_MAP.get(admon_type, "Note") + if title: + return f'<{fern_type} title="{title}">\n{body}\n\n' + return f'<{fern_type}>\n{body}\n\n' + + return re.sub(pattern, replace, content) + +if __name__ == "__main__": + content = sys.stdin.read() + print(convert_admonitions(content)) +``` + +**Usage:** +```bash +cat docs/concepts/columns.md | python scripts/convert_admonitions.py > docs-fern/pages/concepts/columns.mdx +``` + +### 2. Tabs Converter + +```python +#!/usr/bin/env python3 +"""Convert MkDocs tabs to Fern Tabs components.""" +import re +import sys + +def convert_tabs(content: str) -> str: + """Convert === tabs to components.""" + # Match tab groups + pattern = r'((?:=== "([^"]+)"\n((?: .*\n?)*)\n?)+)' + + def replace_group(match: re.Match) -> str: + group = match.group(0) + tabs = re.findall(r'=== "([^"]+)"\n((?: .*\n?)*)', group) + result = [""] + for title, body in tabs: + body = re.sub(r'^ ', '', body, flags=re.MULTILINE).strip() + result.append(f' ') + result.append(f' {body}') + result.append(' ') + result.append("") + return '\n'.join(result) + '\n' + + return re.sub(pattern, replace_group, content) + +if __name__ == "__main__": + content = sys.stdin.read() + print(convert_tabs(content)) +``` + +### 3. Notebook to MDX Converter + +```python +#!/usr/bin/env python3 +"""Convert Jupyter notebook to MDX.""" +import json +import sys +from pathlib import Path + +def notebook_to_mdx(notebook_path: str, colab_url: str) -> str: + """Convert a Jupyter notebook to MDX format.""" + with open(notebook_path) as f: + nb = json.load(f) + + lines = [ + "---", + f"title: {Path(notebook_path).stem.replace('-', ' ').title()}", + "---", + "", + '', + f"Run this tutorial interactively in [Google Colab]({colab_url}).", + "", + "", + ] + + for cell in nb.get("cells", []): + cell_type = cell.get("cell_type") + source = "".join(cell.get("source", [])) + + if cell_type == "markdown": + lines.append(source) + lines.append("") + elif cell_type == "code": + lines.append("```python") + lines.append(source) + lines.append("```") + lines.append("") + + return "\n".join(lines) + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: notebook_to_mdx.py ") + sys.exit(1) + print(notebook_to_mdx(sys.argv[1], sys.argv[2])) +``` + +**Usage:** +```bash +python scripts/notebook_to_mdx.py \ + docs/colab_notebooks/1-the-basics.ipynb \ + "https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/1-the-basics.ipynb" \ + > docs-fern/pages/tutorials/the-basics.mdx +``` + +### 4. Link Checker + +```bash +#!/bin/bash +# Check all links in Fern docs +cd docs-fern + +# Internal links +grep -roh '\[.*\]([^)]*\.mdx)' pages/ | sort | uniq + +# External links +grep -roh 'https://[^)]*' pages/ | sort | uniq | while read url; do + if ! curl -s --head "$url" | head -1 | grep -q "200\|301\|302"; then + echo "BROKEN: $url" + fi +done +``` + +### 5. Batch Conversion Script + +```bash +#!/bin/bash +# batch_convert.sh - Run all conversions + +set -e + +SCRIPTS_DIR="scripts" +DOCS_DIR="docs" +FERN_DIR="docs-fern/pages" + +# Create directory structure +mkdir -p "$FERN_DIR"/{concepts/models,tutorials,recipes/{code-generation,qa-and-chat},plugins,api-reference} + +# Convert simple pages (admonitions + tabs) +for file in index installation quick-start CONTRIBUTING; do + src="$DOCS_DIR/$file.md" + if [ -f "$src" ]; then + dst="$FERN_DIR/${file,,}.mdx" + cat "$src" | python "$SCRIPTS_DIR/convert_admonitions.py" | python "$SCRIPTS_DIR/convert_tabs.py" > "$dst" + echo "Converted: $src -> $dst" + fi +done + +echo "Batch conversion complete. Manual review required." +``` + +--- + +## References + +- [Fern Docs Getting Started](https://buildwithfern.com/learn/docs/getting-started/overview) +- [Fern Components Overview](https://buildwithfern.com/learn/docs/writing-content/components/overview) +- [Fern Configuration](https://buildwithfern.com/learn/docs/configuration/site-level-settings) +- [Current MkDocs Configuration](../mkdocs.yml) + +--- + +## Appendix: Sample Migration + +### Before (MkDocs - columns.md excerpt) + +```markdown +# Columns + +Columns are the fundamental building blocks in Data Designer. + +!!! note "The Declarative Approach" + Columns are **declarative specifications**. You describe *what* you want... + +## Column Types + +### 🎲 Sampler Columns + +Sampler columns generate data using numerical sampling... + +!!! tip "Conditional Sampling" + Samplers support **conditional parameters**... +``` + +### After (Fern - columns.mdx excerpt) + +```mdx +# Columns + +Columns are the fundamental building blocks in Data Designer. + + +Columns are **declarative specifications**. You describe *what* you want... + + +## Column Types + +### 🎲 Sampler Columns + +Sampler columns generate data using numerical sampling... + + +Samplers support **conditional parameters**... + +``` diff --git a/fern/assets/favicon.png b/fern/assets/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..11c795d3efcd5fc449ed2fa20ab991585d8aa2d3 GIT binary patch literal 13659 zcmb7rbyQSe)b^cWfEjw|?rua{x*I{dQ2|Bi&H?Fe=~h8fT5@O62thif39 z4lKt=qy|7CU>Fc8#|UC1g0cb-a3};J$CwNdK_SGz|7O(6q0w1}dmf}n@%cuJ;^XU& zFyr#auV*>mwz^k|YOQ?2HRc}-A(T@hkKot3j1sk2sWs$r*qMoqF>6}o$Tt&Fqxra9 zg>NtyLR;laLEdu9Q7se-k$dsPtlHPS)TntfoTqy_f-BQ;_H%;T>PYk($YFfcMS!kj zZ@QD-vyV=FhCz+RGWbm)oFh~1Ov!*aWl~=UYO*3j`tU`;Cz!!o2M8%*oo#?l35?VK z;-Z8U8ssm-Oblod@1PbQtz|}tH60^5AwT{$KMBBExUT*H@C>&%%5PAr; zgAk{_KuOn+fmz3~xJ1ju`jhiS3C2-(Ni))@5@|Uw+Pjrv|JvHJ^?Po+Y7~!`vz))d z@POBaCP_`s=a|C^>@@bk%o~WXnq{+B8PeVqKE6j0L zTGpOP{P96rG`d;N?OnCGQP#00GXV@SJ+Ri$`;5JY(=%q` z6hQ4)@leOorH~(U?OI9%)GkI{iJchuhL$focd{6y%f1W)kreCHYbiswP6U|AVTkDE zz(2Kq#eB>k91|6rUTbY|XK=vdA##+vudhOQO|iwSAHnq=P1vfrO|C%;ag-ht19u%) zE8i2NHI>A_t#WOU@v<>JMEoY!x%{RtuvR8zR3iDuXBu^=8nBCziF6;}TlvnR#J$-F zHKA(cUudU=Cqt0zFCxl63%LlV3@a-{XPNXfiz+D*11HD{V&^Q==uIWs_gZ56ieA{P zfJ$-P;U%fqw^z)uBWsVZyt>%+Qvp(NnxFq4hg`V);-?K8x}6@mb)(m$v#qS>)XpdP#mk~_vg3=SWhwPws@Sr8g_+k8j~iIvHd!c zu_ueFg2t=uR)LYjwML&~Itb*lh*RwiL$Bqnsfqt>>FmT5UXy<7o{>~7E)Y5NEiE4p zR-$3A)l%{n1x23*i`0^dq|C#8TTgjfCBE=Jk(^Q)KV`rI#;aNGPo#jH!Z*y_!87P@ z^iK=#g*Iu5K7P<)1{|A(YWd;w@;cpu5{o%k1gtv2B<==(LP$=Y7Ksc67kDH~z%&(t zrR&r}hn>DK@$_G7GdHDU0@AHn=XCs%Zr}jKSS{{f_D1ZbzoLGW^8}3sQlZ{%J`<6W zlUv;#R6bXz{Bv9QNk{|c{tnPcvd7*P3f(n~?xZIa%9+WC=6ec4f82}CuxwtW@sw@B zuuK|Eo-IlAYplU-v(lEAs*&a(*C$7`H^^*2iWY8V0}2)Mf|{+c>0L4v*o)nTv~@DG zfTGgZjpF_HSe_W>j9+%hS*}WJs?lFu#C&HOcwRJJe{ZF1>^<0-$MZaC)0n#Hme|7A z0qAQ!zgAPR|CM`f2M4R-%5^>0qu`41TNO_t<38j>M!dpMx%oARng#H z9~btXg$VzGLBBJV8^Dz7wd9q z#t=)?N53v`ikNTC{d@#Cxue4_Ww(175O|azYcqyvg^5`C<_$B|SV&7&E{Hw^<0AWt zf*+32U6I?mx`Y;}qR2`8rJ6Ua6@@IfXXE<)R(eV2~Lhazad4Hc9ogXU$ zKXz=St_%C7)t~c9;ZjM80(-msCFTI$ub_muw4-lr%!KE7a>6-2AaoVN5~qSqi_Og- zDdyq^7tl$k<60l-Ty79rbT;Wja<>!s^(`#9u##2lnVOLMaXMK6{L+sUa0b^RSXvI^Ut_Au zUXwQdaH2>(c4KfE`{M9#e)`iPFE3I3ugCQXH&3!xGKb7I$#k+P>+-l$3m zJI_un=R;{4aG1LLj?5g~IXr{8gm%r)McX9^;0<5wj=S#g-IZdmr08xElc3}vk&Q&6 zgvi>|(1(x6LpWaJog5F;B`QwkyI1{6rQh0<2csvZt&h@@1&#vaa$FD7bkWYY+gM70 zZkHu5H#XPh&Zhhbm>$v~{w>GCiLA-gupB_SKUxcnA9$yQ15YFt)sKIuF<|p7vw>WJ zg&B&+p_opqeyCbu+WzT6n4JH#85ZlPc7S8bjczd2AJ=0K@S&F_KS(_bi*eb4PVOmO z&FTm)di0m64JfOrhGq9GmGOhWWv|4Vu>xmjCnT*Yn;AM@634=SQTGxcH4y>er~HT? zQ%Ersk9eOu`BAsO5C}c>Znlt`916p~S!&trz<9;e<((?ewm%;-;pTU@@||6wyRrLo z$XLof)C{By8(zI&Ckd+e{AdtDhdD|zQA6}Gkx_V^rd8w*Un%pU&8)*93kF!Oy~WKc z=lFPIOy44-&ulUcNC&8TmWLn-p#u9xzE=#FjT%DW`cc`mx%;u|o78S34kbsP0Hc?w zK`9t%3br+5JP)m#2W`A-6%p1=*MHyQN)ECaR=&{!+h9!f1vHX$;5Y}iD}cnqC&>6t zkDTKwx=jaJf~Ps4%m87)V^VKz`w%;Qh{%3v+>YmBu6&;~kDiR;Z?-_5U>9)usO4d4Qk&d%K5EOM!bM++uN} z0+Uf-P5&#_4R)sjSP#PS>9KP;jD%x)X9L>D1H8wZDQYW%w1l(XJlXYFDx5XFP$)(; zn=m;p6LZQk4`Ox16JSJ@5h+y!O zfi&YlXvO3#<8uSLDn7^Ih?B+u>SDE-h{mk#MY0lPp9{e`g_vs-dljsP9n5w&IDGfA zWB=M`jI5ew497)0JY0Yq91?WL6)VT54n?}%@a_udADCHpgfeAC_{`VVYcr#vi}?}X zOl@(DNdVE85`!6m|M@9>{uwj+_K{9gm8lj$nHXe*xi9jWi~I6G8%EzdE9+YBgBd=3F^X@|FyFIHGDp@s91#d@4u0-z^H)?cm z?7k^Da{}$%w|}#qS@m{O(Is4gW5fR8wHnq^AFS&t$E~@N^`U}l45Zba(U0$&lb>t~ zx7T3p1{iG2yE3_SS0^YxBU#h{R>@wXWTY!}YSOKGXPMRIH(X)0n7=k|lC7t!yo=X# z8F*iCz4b5}6QR^DxW-&8xRW7n&r@DSWt+k~Z~nC^u(mLae>6&Pr2(1nyuU?!lb!Mh z`I6ZbcsnU0md5Gm_(%JPF*H@UgE1?>uQc1F<|7IIMLxb4J#;lC(OQ26oKx7D2}{Bm zc;Cj~hd51yt6lr;tDGXHE8F{uW!rnZMDkalcPG0HOH5p0E!(4;l1=sP0VY7__x#L~ zG!9`Uki)N;W-~~lEunm7#U}K)p>EB3a5zc)cQHS{5Lxu;PKhKk@4`qh*wP~{t6+k- zxAb=}J!#~NCPfyqDr|d}_vDDC@AI-hrR!5)h{@Y0(~kL6I5a_N2d6J}g0Aba8G;St zMx+J*AUY1_W1!zZDUvAW(ad>~8e>VmR z57D9-(^oT=T0bdU@%mgpf+Lr%Ai8qeFG<`z&W+YMsI4Lo4Rd8`@BXb2I2ZcYKmYRp zZSYkqr;8fvAGqo}$#>Si(+Uc@OEaKm3n(I9l}lv=StuQXL!p1BIMH(uXNo1fYTi3o zp6F`*w#CrZv|;M^2pcA&*^1lYM&KvM14xT%OQfQ8;OjII*AE--T^VXI+cwzyi~OuY z$1}{V-Cqsa*HSihBSsPQO3w=F$O7>2tgTv-k;PGN$(r7gv-w4Y7D)=YMan`hWZhEYO7+Tv4@OqYe58w*b$gIR2Pz7cY9=$Y36XKl>=A0U8k z$mLa+i{z5MZE$E@Y@}!$Lb_F^Q39U9k%zwz{w^_VqoyaFST5YM>U+McmHVwWAO~w& z%tv%b@P$m+LK7XMqUEDcVmd=t%U%#UF5Jv|j0xgY_U5SPUR~(?8;TAZqNjdNW5tL` z+#Q7^3B#4mza7&ds3zIB%)$J32dka$1RdIYc`yHcA*PF{U^~HIw`o}17!v?KJd{ra z$gd|1`rS#DuVqA>Ny?G)EDF#4 z>L?Z#N9_t0WNnpE{eCGG#77w7pMpQ+@)q5TU}Nf%naPqZ;`-?zo3V>JlGWh>oHFW{ zwSb!Ow=4QIv0mhNm9`@7-$LsB@R?=D)d==L?yRyThyx?BRp?fCNVw^-@Q%&On3Xq) zHABF>ryJL&A}?^!;YB`7?)W&7-(;==Q3l%+jH}DsTOX?IXVbcyf~5T%7bAGi)!@?w z#9LjHsSYZ)UuBUYvJ5psRhU9LUpL?!*j(HGNEB80sVqB;@ZD90H#JD^AjH7?isO`DmnMagy4mJf zHx$4ls5w7>74H+c@t5an;1R|pF~XW8rw4Hu)tONM ztQT*tQaj>5CVmDD+%(<{K93*pCJdQj?@q>t?6ErgkA7k)hF{ZUj~zNDS)q^O5}jE_$n=$WX6Q=I$F5&65WYU z;Wo)@BDT;-oXdKsO1S8S6stsSW@%m?N>c|g!A65Y3)(V3SpN-3mRFDuj3Irx|0KMiK)1L^sX@e?5|Sssu!Vkamyd(>A=>Rhr=dXLK0 zrlQcbj(a#hUhtjXkOc^*&fu@x^Gs%71ruEN{O`TvG@Lc&T_~Mxf7mbkh@$QgI!cF6 zE;es04D`lo?GHUT_}t&D8F%26v%UZgH^hs5dj;+A^*ww_m7RTPD zBmNEX=c|5ox@k$^9T-u?uk}@r+Zd6H2`2j8!24D(%z#l^3)C&(WWOxmT8uzTi zIUY_`-yfp7(cYc~x7PV7v&F*fD;dl?svW)2wM|*ZaK(vK%w#i8AR7D5((ySw4=2jV z8-n^OG8!wVUN?B;6Y`ww-EF@&`1L-t963+Qhur_3zyp@KCqrca&Gij>?A;Yzpiny)GAIbNpuUBKQ1nal^4lE+s+=0gTcZ8dd}_A! z6bg^N@>^Ib1%NSe+H^TrcomrlNivh!3M`u`9apZdG~MSFwzx#G=)CP~#qJg+ zqdyt%9V728Q0IJaq|cdXsSL>r+>T zh4y1rleRYmH+6#0XD1~^(|O{K!Vc2sx~R*LUR_+{UgDdRx1i_fU`K&pj)5QkjyYkP zM|BV?pQo8}zO18*n;6q}f^Nw#Hzuxpr&GX14weY~={;3RwJx9T>9gG}0m}DUNVvzv z9YsEmE+&l?Ha#pXVNC>H8U7|TipDG+JLG0mbd?6AsVpnmOkGyv=~Btaqcy892Rg*# zykbtgx%uF!n>_&%bhm{DK3YSVmdsnalA2YEgXqAz^6!*~(9cQi1bw@xy&;M0%q zq-{3qV!hL>8Qi*Fu8fwo?dN57DGr+hGAIQY6I66E@;_W8Ep0HS>95UU%27>K*UAyCk%DYu3~>jAuMUtOS&h z)ypVT$P5nQJ}#chftOQ?F)TB*%yJ(6#>@*}^J%o%n;)y(I;=o3mC8CTgt@j7KZ+iH zi^*R~k|c1vvnFU(=De9@#8Vm$N6^#rCGcG>eb{4dpnS(Z8umVer+BJy)jk9@ySZ}I zKw+~((Lkzo4kxWOXnnlTGq{wHfT5s>Gea2_v12EB+<<;}& zxWUe`kM+Y?Hg2K!dR5`~XZNL)Po-*}7&0GoyejB@-#}>YN%_dSudl8^$@h6z)@5}( zT!i9_tj3lQW&TLh!YJ$(Hj}daQbr+eT1;dA3*P-QnYl7!v9VukLs>OVbwh8V4`^Yb zc}Di9<6%i$=Rvz}zYCOV8>3!E{xJ0kQHM%Dxf_};%!A8RG)-1$54K6$DEj*F$0B7b z*{>uuYsjd%^^5WTFMa*n38tTCG*dK8Np56BVh2bnSuka4JyzG)cWzE6Ul06Nh{uvL z)_VK1+%?634LhmuTS{T^O%e@Ww1n#q<=p7P6K0CuL^j1)iKw?YG>_`?!g85zV=nw{ z5?60JkL{`#J;J4F9_y_9$~PIuoms|^3A?R8H1t?hh+r!U5{3$o(%Z@tySR@B0|io4$u%Cy{R8#y=*A#M*PZx znPg*wY}5|#XZJ8|PLMtVCrcyzvP!oLsZepn?Y7Pd<%_`Yxqx62@8`F%Uk1oV)~+t= zQ23CdQM4KcaU_GL5Ep>$Vt=d>94Q$rwJ7t*)l+NMk;%+#h_mw-S|;$ds261|3%|8t zxzX5~E+DR2u;_vu3D#s+CPA~DPs%GJ_#oYS|LJO}2cMOoPIvmQN^V3PLx7^@7 zE~PL=p%SHg$PZLeZkbH9IP(A%Jne-*U_5KejX@&>v+D4r_uV^<&ajNisHH_5 zlkr?YE9coZLu>S_LGrVOh3zoe-SVTfIGEg4s8ei8vfq{JfK_Dl8kgYP-`W>%=9qdQ zML1?4im<5GYUtlyGR5#l@Z9I`&w6h!Mm;l&C?YYt-TsJ#d^FwK8;Aqe-(wBs#ComF)T9ssiFpoHck)vZQpazhx#e(_<&(Wnc3`9-7C}-P!+8d&f{=#G^2TS`X#Fb&>u(~som{TDe8E1a}8NJ9o-jjv3Wq~3QP{r)q9?YT)N3a1X)-8eh+f-|+$;<{dV!>S0T@w^nMp& z@ekH@BPH13|^bQmh=*ylB^8H76DY%*PyL9X>n=T|Ru!2Y^N z#l!D?Q_O2gPVxBn3wyNP3U0W`d$gqDMm8BpMWYrj)M19;Sy~s#YdY z&=|&433>uU@|9K~4QN__Ty4*MF7ANIb(OLWrUTKlqBmFHtq$c;Z1&2C5z13oWPK?y zEjJ|NBLiAL#hN>{oGI!LxiVtq#Bb(QZ{~I- z@TmC#q<8w`?Bt(i3>tPJ^X^H9(o(T3ldiCEf?ZC{>@%RG=!^m;w9w;NkL6BTTMnO+ zTo!4PgnHlAZo9AxOtmc8Jg76BRh@z61L><0LxS+n45~B{O4i46;F$O5bAL0-;=b05dl<7vR`diwr# z2|uz4xlB&?F3|2ro=eOG+Ym`3RH4-|G$M_+);3|t{njAlpghUVS6j8ks*ks+gS!@N zBf4Sch#CDm z?CY`il>3T3qHiFdw(!weApGO`O2(gsx$)4On}{;<4dj&kgxd^W!&YDOYT$+5@U>-T$TcVG+HnBcie>v6up3Xv)$p8K^rp9#@r_hnLJpAa+mYTw=NLw|T zQWA$9$gBc3GBUOmzC2q(WXmGX&!?+3nC+yY)CBps-M44B79TZa!<3F}{N6W*!4Pab z2qw3l`Bdvht^O^tTShD%yiF}C{4|cl6|~r;Lv)rj@fjZ#|{?KegkIo03z z%}f3AmoSJ3AhN3HZwdj+Zpe}=`Hhkq z0*7};kBD#6O{36S?$EZUX55?EgNs2(+Te+BE=O%&fM#;2&0`J3OJN8cw_*7`7UKY5 zoy*7{6A4y%mCl37^se^$mJ6W?cF&(#*;Hv$SWfLT& zOaW+4n^B$J>qS`Q;jZpadDqpP)7b1UQJmlxtL;)B!7cKZDa(7uz00rzJe(4xB@+)N zDUj?u!1I&~_vz8j3mOmeDO^t`!Akz+dC_AUl6SW#Iy)v58~HplHK+= zUlhbgjY^Nmlbm!q7W!=-64G%4m& zRB;gnSphxs8;^ai-)C3s!`?G<^o1sb^fskkGx3ScB6gb*F$xGw2d)Cf(=E({|>{?qk`3-M*Fe zxwH+d?SiKnONTS(5q&f>L=+U*xneGn*XIYh?CvU9j!kC6-@mUGO>Z#@3s19G$~E|4 zI{J}<7t^yZ_}?pilkQ+Ke7RlsO1et_vBt_QK=Q1uG+Po*_>R-#XGyco=3#6}d?^WP z%XrC#u4KJHrrBLz4>a74;~VU}#qA1zp6yJ+lk^u~N^HP=Jt-+E>UWC^4I4j8fQ}A9 z)|LO=jRXhQoH>L-uA!i?GL*ecwVZElL+IHJUDx?-v_(L;&G_*5?@GNu@8T zbiSPZO{GBGT<*zc1*CkDseSIWspIU1LN##}u*mj(c|H^i&&c23h_*f*A$MYw=EHxa z+aKy}=%OlX&V51X|ACe6*O zVy0(|%g;PuYWSgE9}`DEcZfNG3bGCkPD|st=4(y2YyK{M*AxbyJjt?}Y9dz#PB&mL zaspzaEX5yZT@3j|tUHLiud97&zl);~c+gky86)h}+OU3cMvVzAt`K#nOf|r9dYV&l zKr}iYq!H-Q5Q^R?VC zhi-w1cCh#TyyN3DRUE+~xpwRN@)@dsm4#~)F32$0!yvn6ww>#nS@qx1CScyc6Ey5Z zZ|6ViTP%x=(2vknxgsM$JoX$WOHR(;$n0A9hvZK~;XNXZl-&25jn(ORk!cqVKOcM< zP=ZBs)rtR!V=iu^seV%T3rxoDTvu65f$bR=FAD{L*sc^G zf*82}#|LPFkvA9}Dcxvp*H?usg?{9P0UTwz6(L_H9!d7o|H^&VuVXb=y}g)z)e%XY ztvsSO{@P|WXvFmTvZnqCwQ5cN3kah%M1gk+A zp~0=bwBS0MUBN2%;Fbhc@#Yo(k_FeaB=?eUP~majIQq1|jn+0>Ha35ud2S13IY<8h zBK^_u{))x5p&iIVAe*FJ`Ha3G(MnhyhHOi8xyNQWtNI#Y^LlGkrzdJVSOpXPu<>B6 zKQ9O}uueR@z;(gotdYUnHa3e6>Tg?Nw6!Su zon%P^GeRy;lCg5K4rnRIjcVZD0)GBg83A`;Mm^F@v1zn~NePqONwI-7DeDl%+R$i+ zfVuh6H=yCVSkYX&RJD72V=6KP1~vgH3%bE!JT|59Hl!B@s6Y~Hybr0%uW&=kpIwMW z=0-J0HKrxslb%h#Ve9{;rVkwZ!~AdFyec-yo1v;kXn(1Ug8`bDI(+)qmP1^?C@^sKijbs{-XDa-o9C-7yet?mdlg!bZfBiWf|5k4hD!ErC?g0sqv+jW| zI+?rbN|PZ@2~GNOb+4Uw{h2o!G|hn4O1bfRmleCECerrKEu|It=M_lIR?n@}bqMk( z7@?3i8TrrO(oE;Cd#w-y=}uK#N!`z>!2ITQ2QwFKyK@2%1e5? zXg54>(VR%~K3XhNJXtK(i`u6earh4;KAIgt+3Xef;_H@7U}A6n>)n%`in_S#(!fX zO!LY^nlParfV9;EtO|EHcn`!*goL?<~=h0UJm zE~6rIo>3tUxRI8R{87>}VV-Pj`B39GKKsS;!KL~RBw8KEl>%LOM<-;P2bECI_9DRP zVeYagdJr`K@hZ}ybE?5`lOE}*j(E!|qp?V#)6chNcdU>c60~ThmPol8&o^K5GNQta z_^Uc_8U!Y+&SoCD>e38)C#+9z!qCHWX_~S6_Lbyal`=oG+IA3@8W5g9u-LMY{&WlW z2CqT*&2gDw-&5#JvR?ly2Z83H>acV}#k}mBtJbJ_FeZHI2d13MGQw~#f5CNdRB+Q# zqJVmmaBOfa>MhOg3A_$c*5LF}LB&^ho@S`VNad+^Xf7@=4>PiAt8aS8YP#*}e<-HY zG|5nY-4~Jd)77<)d!enT{$(8#69K^>wypB2$vkTplk@9dGOw6ZXI{WB6ESq3j5e-< zhic!VRt80+_Go)ut@%4I<~~1lefvY4K+NbqJVu&@(Qs*K7Uoh#!7|3GN5Hb${skaC z4Ob0gXi$OJ1!P84HspS#Gqs?~x`xYD^gnpUyoJ=Vv^+*qNs;f*BBnT0mOJ zo2(SU=s|EY2l#{4z?NWT_1;kz$u0N+HiU;-fNV8dx98tHf+5-~8P6!!?Z2CIfnlBB zrDlnU!?K|oHs=&WpXP3oAN1llLy!L{hdG#w^-q%M#Mi=d^xBBUrE11rdq1eji>#GS zk!DUd*As-SmRd@6%=*N;_`k7@YP7*}!?{vL+y<~AS<5=3huFIaHb052@24wqZuWnJ zHTk3gM0ipJJkl(0sleZ8!^Z?uqV;Gg93BUc2FkMn;ml+5wBT2^^tO|q6&AH#;EPic zQv@ayFVD*|ffy+tRHKcJA9E&(F8@y4%OBS#D)Dpl{J5+}bUF|NVcC}$1?F$yp_CaE z?dA&h<#}rCZWc#QvE&Fmwz>79ZK*{QLvV@cEDvA29IuZwzMc8D&F3k4yHXn8UIUe{~Dggs} z?Egcf<}$-dO1v;PKR!-AMe7@j-nOi)|Lr~9FL;S!1~D$A$p8IGAU5?Q71l#z(U0H- zhYlyDWG|tNrYiT6rOUMB!j??tkM{1@jmc$rDv0u~suRnGG9{#@!XA#gtN8QF9{3M4 zxy>OIjOcVTg4p1pV1^#JC;4(|dFXQ~E`e9$+V8lnmbREUK#bxt87BJ`NPO@1G6m%K z#Ph@}$_Igi{8IF;&}HActGfapKhE3AVcCCdPhdKr$`LM zcrvC&ECh*grn--^)8r4amZtxy*%G#??#No$yE)R{Ahx z^{@0#y&Ne9pS7Ky@7u$pu>`Tuh{CEQm)w^3S}GYd^^a0+TxeyZ^zz$ z={hh^^l$d~yLX^=9Ropr9m@LigKuHHg(!9F?7!os=e_OLpONS4y>^%_=ltWNA5|L&d$QNN4DPlE6fhp96C z`yy#2Fj`Va@r_js{Ttau^n5=N#;d+H*vL-Ov+p?*R!kt*pb_NI32v1HWufer_`G&Z zVZPY<>~otp4B7E#m@tGn^M4FhL2%z>-txw9a*0yUN3jSy__$b=6iH{uLrT$`xZ&dZ z{D~s-VWO9kSSc*2+mm;4VD^Zg>MVdDY|K^HjK6-3av!%&*4iP8A?64qDA>n_Pn&iJ z8|f(@q(Bg8y{Ct#HYG$KPANxY(+@qD-j3Kj8q$m59_mD9E_x_^)ZxfPW|2>8mg8c6@fk<=U;Zj%DQL0g} G3Hu*BsHHam literal 0 HcmV?d00001 diff --git a/fern/assets/recipes/code_generation/text_to_python.py b/fern/assets/recipes/code_generation/text_to_python.py new file mode 100644 index 00000000..b5cb88d3 --- /dev/null +++ b/fern/assets/recipes/code_generation/text_to_python.py @@ -0,0 +1,318 @@ +from pathlib import Path + +from data_designer.essentials import ( + CategorySamplerParams, + CodeLang, + CodeValidatorParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMCodeColumnConfig, + LLMJudgeColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + SubcategorySamplerParams, + ValidationColumnConfig, + ValidatorType, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + + config_builder.add_column( + SamplerColumnConfig( + name="industry_sector", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Healthcare", + "Finance", + "Technology", + ], + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="industry_sector", + values={ + "Healthcare": [ + "Electronic Health Records (EHR) Systems", + "Telemedicine Platforms", + "AI-Powered Diagnostic Tools", + ], + "Finance": [ + "Fraud Detection Software", + "Automated Trading Systems", + "Personal Finance Apps", + ], + "Technology": [ + "Cloud Computing Platforms", + "Artificial Intelligence and Machine Learning Platforms", + "DevOps and CI/CD Tools", + ], + }, + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="code_complexity", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Beginner", + "Intermediate", + "Advanced", + ], + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="code_concept", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="code_complexity", + values={ + "Beginner": [ + "Variables", + "Data Types", + "Functions", + "Loops", + "Classes", + ], + "Intermediate": [ + "List Comprehensions", + "Object-oriented programming", + "Lambda Functions", + "Web frameworks", + "Pandas", + ], + "Advanced": [ + "Multithreading", + "Context Managers", + "Generators", + ], + }, + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="instruction_phrase", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Write a function that", + "Create a class that", + "Implement a script", + "Can you create a function", + "Develop a module that", + ], + ), + ), + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="instruction", + model_alias=model_alias, + system_prompt=("You are an expert at generating clear and specific programming tasks."), + prompt=( + "Generate an instruction to create Python code that solves a specific problem.\n" + "Each instruction should begin with one of the following phrases: {{ instruction_phrase }}.\n\n" + "Important Guidelines:\n" + "* Industry Relevance: Ensure the instruction pertains to the {{ industry_sector }} sector and {{ topic }} topic.\n" + "* Code Complexity: Tailor the instruction to the {{ code_complexity }} level. Utilize relevant {{ code_concept }} where appropriate to match the complexity level.\n" + "* Clarity and Specificity: Make the problem statement clear and unambiguous. Provide sufficient context to understand the requirements without being overly verbose.\n" + "* Response Formatting: Do not include any markers such as ### Response ### in the instruction.\n" + ), + ) + ) + + config_builder.add_column( + LLMCodeColumnConfig( + name="code_implementation", + model_alias=model_alias, + code_lang=CodeLang.PYTHON, + system_prompt=( + "You are an expert Python programmer who writes clean, efficient, and well-documented code." + ), + prompt=( + "Write Python code for the following instruction:\n" + "Instruction: {{ instruction }}\n\n" + "Important Guidelines:\n" + "* Code Quality: Your code should be clean, complete, self-contained, and accurate.\n" + "* Code Validity: Please ensure that your Python code is executable and does not contain any errors.\n" + "* Packages: Remember to import any necessary libraries, and to use all libraries you import.\n" + "* Complexity & Concepts: The code should be written at a {{ code_complexity }} level, making use of concepts such as {{code_concept}}.\n" + ), + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="instruction", + model_alias=model_alias, + system_prompt=("You are an expert at generating clear and specific programming tasks."), + prompt=( + "Generate an instruction to create Python code that solves a specific problem.\n" + "Each instruction should begin with one of the following phrases: {{ instruction_phrase }}.\n\n" + "Important Guidelines:\n" + "* Industry Relevance: Ensure the instruction pertains to the {{ industry_sector }} sector and {{ topic }} topic.\n" + "* Code Complexity: Tailor the instruction to the {{ code_complexity }} level. Utilize relevant {{ code_concept }} where appropriate to match the complexity level.\n" + "* Clarity and Specificity: Make the problem statement clear and unambiguous. Provide sufficient context to understand the requirements without being overly verbose.\n" + "* Response Formatting: Do not include any markers such as ### Response ### in the instruction.\n" + ), + ) + ) + + config_builder.add_column( + LLMCodeColumnConfig( + name="code_implementation", + model_alias=model_alias, + code_lang=CodeLang.PYTHON, + system_prompt=( + "You are an expert Python programmer who writes clean, efficient, and well-documented code." + ), + prompt=( + "Write Python code for the following instruction:\n" + "Instruction: {{ instruction }}\n\n" + "Important Guidelines:\n" + "* Code Quality: Your code should be clean, complete, self-contained, and accurate.\n" + "* Code Validity: Please ensure that your Python code is executable and does not contain any errors.\n" + "* Packages: Remember to import any necessary libraries, and to use all libraries you import.\n" + "* Complexity & Concepts: The code should be written at a {{ code_complexity }} level, making use of concepts such as {{ code_concept }}.\n" + ), + ) + ) + + config_builder.add_column( + LLMJudgeColumnConfig( + name="code_judge_result", + model_alias=model_alias, + prompt=TEXT_TO_PYTHON_JUDGE_TEMPLATE, + scores=python_scoring, + ) + ) + + config_builder.add_column( + ValidationColumnConfig( + name="code_validity_result", + validator_type=ValidatorType.CODE, + target_columns=["code_implementation"], + validator_params=CodeValidatorParams( + code_lang=CodeLang.PYTHON, + ), + batch_size=100, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +TEXT_TO_PYTHON_JUDGE_TEMPLATE = """\ +You are an expert in Python programming, with specialized knowledge in software engineering, data science, and algorithmic problem-solving. + +You think about potential flaws and errors in the code. You are a tough critic, but a fair one. + +Take a deep breath and use the Python Code Quality Rubric below to score the **Generated Python Code** based on the INSTRUCTIONS. + +#### INSTRUCTIONS +The Generated Python Code should be a valid response to the Natural Language Prompt below + +Natural Language Prompt: +{{ instruction }} + +Generated Python Code +{{ code_implementation }} +""" + + +python_scoring = [ + Score( + name="Relevance", + description="Adherence to INSTRUCTIONS and CONTEXT", + options={ + 4: "Perfectly meets all specified requirements.", + 3: "Meets most requirements with minor deviations.", + 2: "Moderate deviation from the instructions.", + 1: "Significant deviations from the instructions.", + 0: "Does not adhere to the instructions.", + }, + ), + Score( + name="Pythonic", + description="Pythonic Code and Best Practices (Does the code follow Python conventions and best practices?)", + options={ + 4: "The code exemplifies Pythonic principles, making excellent use of Python-specific constructs, standard library modules and programming idioms; follows all relevant PEPs.", + 3: "The code closely follows Python conventions and adheres to many best practices; good use of Python-specific constructs, standard library modules and programming idioms.", + 2: "The code generally follows Python conventions but has room for better alignment with Pythonic practices.", + 1: "The code loosely follows Python conventions, with several deviations from best practices.", + 0: "The code does not follow Python conventions or best practices, using non-Pythonic approaches.", + }, + ), + Score( + name="Readability", + description="Readability and Maintainability (Is the Python code easy to understand and maintain?)", + options={ + 4: ( + "The code is excellently formatted, follows PEP 8 guidelines, is elegantly concise and clear, uses meaningful variable names, " + "ensuring high readability and ease of maintenance; organizes complex logic well. Docstrings are given in a Google Docstring format." + ), + 3: "The code is well-formatted in the sense of code-as-documentation, making it relatively easy to understand and maintain; uses descriptive names and organizes logic clearly.", + 2: "The code is somewhat readable with basic formatting and some comments, but improvements are needed; needs better use of descriptive names and organization.", + 1: "The code has minimal formatting, making it hard to understand; lacks meaningful names and organization.", + 0: "The code is unreadable, with no attempt at formatting or description.", + }, + ), + Score( + name="Efficiency", + description="Efficiency and Performance (Is the code optimized for performance?)", + options={ + 4: "The solution is highly efficient, using appropriate data structures and algorithms; avoids unnecessary computations and optimizes for both time and space complexity.", + 3: "The solution is efficient, with good use of Python's built-in functions and libraries; minor areas for optimization.", + 2: "The solution is moderately efficient, but misses some opportunities for optimization; uses some inefficient patterns.", + 1: "The solution shows poor efficiency, with notable performance issues; lacks effective optimization techniques.", + 0: "The solution is highly inefficient; overlooks fundamental optimization practices, resulting in significant performance issues.", + }, + ), +] + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() diff --git a/fern/assets/recipes/code_generation/text_to_sql.py b/fern/assets/recipes/code_generation/text_to_sql.py new file mode 100644 index 00000000..a0fbf6e5 --- /dev/null +++ b/fern/assets/recipes/code_generation/text_to_sql.py @@ -0,0 +1,323 @@ +from pathlib import Path + +from data_designer.essentials import ( + CategorySamplerParams, + CodeLang, + CodeValidatorParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMCodeColumnConfig, + LLMJudgeColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + SubcategorySamplerParams, + ValidationColumnConfig, + ValidatorType, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + + config_builder.add_column( + SamplerColumnConfig( + name="industry_sector", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["Healthcare", "Finance", "Technology"], + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="industry_sector", + values={ + "Healthcare": [ + "Electronic Health Records (EHR) Systems", + "Telemedicine Platforms", + "AI-Powered Diagnostic Tools", + ], + "Finance": [ + "Fraud Detection Software", + "Automated Trading Systems", + "Personal Finance Apps", + ], + "Technology": [ + "Cloud Computing Platforms", + "Artificial Intelligence and Machine Learning Platforms", + "DevOps and CI/CD Tools", + ], + }, + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="sql_complexity", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["Beginner", "Intermediate", "Advanced"], + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="sql_concept", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="sql_complexity", + values={ + "Beginner": [ + "Basic SELECT Statements", + "WHERE Clauses", + "Basic JOINs", + "INSERT, UPDATE, DELETE", + ], + "Intermediate": [ + "Aggregation Functions", + "Multiple JOINs", + "Subqueries", + "Views", + ], + "Advanced": [ + "Window Functions", + "Common Table Expressions (CTEs)", + "Stored Procedures", + "Query Optimization", + ], + }, + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="sql_task_type", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Data Retrieval", + "Data Manipulation", + "Analytics and Reporting", + "Data Transformation", + ], + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="instruction_phrase", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Write an SQL query that", + "Create an SQL statement to", + "Develop an SQL query to", + "Can you write SQL that", + "Formulate an SQL query that", + ], + ), + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="sql_prompt", + model_alias=model_alias, + system_prompt="You are an expert at generating clear and specific SQL tasks.", + prompt=SQL_PROMPT_TEXT, + ) + ) + + config_builder.add_column( + LLMCodeColumnConfig( + name="sql_context", + model_alias=model_alias, + code_lang=CodeLang.SQL_ANSI, + system_prompt=( + "You are an expert SQL database designer who creates clean, efficient, and " + "well-structured database schemas." + ), + prompt=SQL_CONTEXT_TEXT, + ) + ) + + config_builder.add_column( + LLMCodeColumnConfig( + name="sql", + model_alias=model_alias, + code_lang=CodeLang.SQL_ANSI, + system_prompt="You are an expert SQL programmer who writes clean, efficient, and well-structured queries.", + prompt=SQL_CODE_TEXT, + ) + ) + + config_builder.add_column( + ValidationColumnConfig( + name="code_validity_result", + validator_type=ValidatorType.CODE, + target_columns=["sql"], + validator_params=CodeValidatorParams( + code_lang=CodeLang.SQL_ANSI, + ), + batch_size=100, + ) + ) + + config_builder.add_column( + LLMJudgeColumnConfig( + name="code_judge_result", + model_alias=model_alias, + prompt=TEXT_TO_SQL_JUDGE_TEMPLATE, + scores=sql_scoring, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +SQL_PROMPT_TEXT = ( + "Generate an instruction to create SQL code that solves a specific problem.\n" + "Each instruction should begin with one of the following phrases: {{instruction_phrase}}.\n\n" + "Important Guidelines:\n" + "* Industry Relevance: Ensure the instruction pertains to the {{industry_sector}} sector and {{topic}} topic.\n" + "* SQL Complexity: Tailor the instruction to the {{sql_complexity}} level. Utilize relevant {{sql_concept}} " + "where appropriate to match the complexity level.\n" + "* Task Type: The instruction should involve a {{sql_task_type}} task.\n" + "* Clarity and Specificity: Make the problem statement clear and unambiguous. Provide sufficient context to " + "understand the requirements without being overly verbose.\n" + "* Response Formatting: Do not include any markers such as ### Response ### in the instruction.\n" +) + +SQL_CONTEXT_TEXT = ( + "Generate the SQL for creating database tables that would be relevant for the following instruction:\n" + "Instruction: {{sql_prompt}}\n\n" + "Important Guidelines:\n" + "* Relevance: Ensure all tables are directly related to the {{industry_sector}} sector and {{topic}} topic.\n" + "* Completeness: Include all essential columns with appropriate data types, primary/foreign keys, and necessary constraints.\n" + "* Realism: Use realistic table structures typical for the specified industry.\n" + "* Executable SQL: Provide complete CREATE TABLE statements that can be run without modification.\n" + "* Consistency: Use consistent naming conventions (e.g., snake_case for table and column names).\n" + "* Sample Data: Include INSERT statements with sample data that makes sense for the tables (at least 5-10 rows per table)." +) + +SQL_CODE_TEXT = ( + "Write SQL code for the following instruction based on the provided database context:\n" + "Instruction: {{sql_prompt}}\n\n" + "Database Context:\n" + "{{sql_context}}\n\n" + "Important Guidelines:\n" + "* Code Quality: Your SQL should be clean, complete, self-contained and accurate.\n" + "* Code Validity: Please ensure that your SQL code is executable and does not contain any errors.\n" + "* Context: Base your query on the provided database context. Only reference tables and columns that " + "exist in the context.\n" + "* Complexity & Concepts: The SQL should be written at a {{sql_complexity}} level, making use of " + "concepts such as {{sql_concept}}.\n" + "* Task Type: Ensure your solution implements the appropriate {{sql_task_type}} operation.\n" + "* Comments: Include brief comments explaining the key parts of your query.\n" +) + + +TEXT_TO_SQL_JUDGE_TEMPLATE = """\ +You are an expert in SQL with deep knowledge of relational modeling, query semantics, +and performance tuning across common dialects (e.g., PostgreSQL, MySQL, SQLite, SQL Server). +You think critically about correctness, readability, and efficiency. + +Use the SQL Query Quality Rubric below to score the **Generated SQL Query** based on the INSTRUCTIONS. + +#### INSTRUCTIONS +The Generated SQL Query should be a valid response to the Natural Language Prompt below + +Natural Language Prompt: +{{ sql_prompt }} + +Database Context: +{{ sql_context }} + +Generated SQL Query +{{ sql }} +""" + + +sql_scoring = [ + Score( + name="Relevance", + description="Adherence to INSTRUCTIONS and CONTEXT", + options={ + 4: "Perfectly meets all specified requirements.", + 3: "Meets most requirements with minor deviations.", + 2: "Moderate deviation from the instructions.", + 1: "Significant deviations from the instructions.", + 0: "Does not adhere to the instructions.", + }, + ), + Score( + name="SQL Correctness", + description="Syntax and semantic correctness; returns the intended result", + options={ + 4: "Valid SQL with correct joins, filters, grouping/aggregation, and NULL handling; produces the intended result set under the stated/implicit dialect.", + 3: "Generally correct with minor issues (e.g., edge-case NULLs, minor grouping detail) but still likely yields the intended result.", + 2: "Partially correct; noticeable semantic mistakes (joins, grouping, filters) that may change results or fail in edge cases.", + 1: "Largely incorrect; major semantic or syntactic errors likely causing failure or wrong results.", + 0: "Invalid SQL or unrelated to the task; will not run or cannot produce a meaningful result.", + }, + ), + Score( + name="Readability", + description="Formatting, clarity, and maintainability", + options={ + 4: "Cleanly formatted (keywords/clauses consistently styled), clear structure (CTEs/subqueries where helpful), meaningful table/column aliases, and concise.", + 3: "Generally readable with consistent formatting and understandable aliases; could be organized slightly better.", + 2: "Somewhat readable but inconsistent formatting or confusing aliasing; structure is harder to follow.", + 1: "Poorly formatted and hard to read; unclear structure and aliasing.", + 0: "Unreadable or chaotic; no meaningful structure or styling.", + }, + ), + Score( + name="Efficiency", + description="Query performance best practices", + options={ + 4: "Uses sargable predicates, appropriate joins, selective filters early, avoids SELECT *, unnecessary DISTINCT, and wasteful subqueries; likely to use indexes effectively.", + 3: "Mostly efficient; minor opportunities for improvement (e.g., simplifying expressions, reducing data early).", + 2: "Moderate inefficiencies (e.g., non-sargable filters, unnecessary nested subqueries, broad SELECT *).", + 1: "Notably inefficient patterns likely causing large scans or poor plans.", + 0: "Highly inefficient; ignores basic best practices and likely to perform very poorly.", + }, + ), +] + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() diff --git a/fern/assets/recipes/qa_and_chat/multi_turn_chat.py b/fern/assets/recipes/qa_and_chat/multi_turn_chat.py new file mode 100644 index 00000000..b4debed7 --- /dev/null +++ b/fern/assets/recipes/qa_and_chat/multi_turn_chat.py @@ -0,0 +1,204 @@ +from pathlib import Path +from typing import Literal + +from pydantic import BaseModel, Field + +from data_designer.essentials import ( + CategorySamplerParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMJudgeColumnConfig, + LLMStructuredColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + SubcategorySamplerParams, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + + config_builder.add_column( + SamplerColumnConfig( + name="domain", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=["Tech Support", "Personal Finances", "Educational Guidance"]), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="domain", + values={ + "Tech Support": [ + "Troubleshooting a Laptop", + "Setting Up a Home Wi-Fi Network", + "Installing Software Updates", + ], + "Personal Finances": [ + "Budgeting Advice", + "Understanding Taxes", + "Investment Strategies", + ], + "Educational Guidance": [ + "Choosing a College Major", + "Effective Studying Techniques", + "Learning a New Language", + ], + }, + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="complexity", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=["Basic", "Intermediate", "Advanced"]), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="conversation_length", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=[2, 4, 6, 8]), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="user_mood", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=["happy", "silly", "sarcastic", "combative", "disappointed", "toxic"]), + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="assistant_system_prompt", + prompt=( + "Write a reasonable system prompt for a helpful AI assistant with expertise in " + "{{domain}} and {{topic}}. The AI assistant must not engage in harmful behaviors." + ), + model_alias=model_alias, + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="user_task", + prompt="Define a simple task related to {{topic}} of {{complexity}} complexity for the user.", + model_alias=model_alias, + ) + ) + + config_builder.add_column( + LLMStructuredColumnConfig( + name="conversation", + prompt=( + "\n{{user_task}}\n\n\n" + "{{assistant_system_prompt}}\n\n" + "Generate a conversation between a user and an AI assistant with about .\n" + "User is asking the assistant for advice and is in a {{user_mood}} mood.\n" + "The conversation must be {{conversation_length}} messages in length.\n" + "The conversation must come to a natural end in {{conversation_length}} messages and if the assistant is unable " + "to solve the user's needs by then, they should offer to continue the conversation later or redirect them to additional resources.\n" + "The conversation must be realistic and natural, incorporating details from both the user's and assistant's profiles.\n" + "The AI assistant role has the system prompt defined in . All of its responses must conform to :\n" + ), + output_format=ChatConversation, + model_alias=model_alias, + ) + ) + + config_builder.add_column( + LLMJudgeColumnConfig( + name="toxicity_evaluation", + prompt=TOXICITY_PROMPT_TEMPLATE, + scores=[toxicity_score], + model_alias=model_alias, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +toxicity_score = Score( + name="Toxicity", + description="Evaluation of harmful, harassing, or hateful language in conversations", + options={ + "4": "No toxicity detected; conversation is respectful and appropriate.", + "3": "Mild toxicity; slightly rude or sarcastic but not hateful or harmful.", + "2": "Moderate toxicity; contains disrespectful or harassing language.", + "1": "Severe toxicity; contains overt hate, harassment, or harmful content.", + "0": "Extreme toxicity; contains dangerous, threatening, or abusive content.", + }, +) + +TOXICITY_PROMPT_TEMPLATE = ( + "You are an expert in content moderation and digital communication ethics. " + "You are evaluating the toxicity level in user messages.\n" + "Take a deep breath and carefully analyze the conversation provided. \n" + "Use the rubric to score the level of toxicity in the USER'S messages only.\n" + "Consider:\n" + "- Focus primarily on harassment, hate, or harmful language\n" + "- Evaluate the full context of the conversation\n" + "- Be objective and consistent in your assessment\n" + "- If any single message exhibits toxicity, it should influence the final score\n" + "## CONVERSATION\n" + "{{ conversation }}" +) + + +class Message(BaseModel): + """A single message turn in the conversation.""" + + role: Literal["user", "assistant"] = Field(..., description="Which role is writing the message.") + content: str = Field(..., description="Message contents.") + + +class ChatConversation(BaseModel): + """A chat conversation between a specific user and an AI assistant. + * All conversations are initiated by the user role. + * The assistant role always responds to the user message. + * Turns alternate between user and assistant roles. + * The last message is always from the assistant role. + * Message content can be long or short. + * All assistant messages are faithful responses and must be answered fully. + """ + + conversation: list[Message] = Field(..., description="List of all messages in the conversation.") + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() diff --git a/fern/assets/recipes/qa_and_chat/product_info_qa.py b/fern/assets/recipes/qa_and_chat/product_info_qa.py new file mode 100644 index 00000000..59a0110f --- /dev/null +++ b/fern/assets/recipes/qa_and_chat/product_info_qa.py @@ -0,0 +1,224 @@ +import string +from pathlib import Path + +from pydantic import BaseModel, Field + +from data_designer.essentials import ( + BernoulliSamplerParams, + CategorySamplerParams, + DataDesigner, + DataDesignerConfigBuilder, + ExpressionColumnConfig, + LLMJudgeColumnConfig, + LLMStructuredColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + UniformSamplerParams, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + config_builder.add_column( + SamplerColumnConfig( + name="category", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Electronics", + "Clothing", + "Home Appliances", + "Groceries", + "Toiletries", + "Sports Equipment", + "Toys", + "Books", + "Pet Supplies", + "Tools & Home Improvement", + "Beauty", + "Health & Wellness", + "Outdoor Gear", + "Automotive", + "Jewelry", + "Watches", + "Office Supplies", + "Gifts", + "Arts & Crafts", + "Baby & Kids", + "Music", + "Video Games", + "Movies", + "Software", + "Tech Devices", + ] + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="price_tens_of_dollars", + sampler_type=SamplerType.UNIFORM, + params=UniformSamplerParams(low=1, high=200), + ) + ) + + config_builder.add_column( + ExpressionColumnConfig( + name="product_price", + expr="{{ (price_tens_of_dollars * 10) - 0.01 | round(2) }}", + dtype="float", + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="first_letter", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=list(string.ascii_uppercase)), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="is_hallucination", + sampler_type=SamplerType.BERNOULLI, + params=BernoulliSamplerParams(p=0.5), + ) + ) + + config_builder.add_column( + LLMStructuredColumnConfig( + name="product_info", + model_alias=model_alias, + prompt=( + "Generate a realistic product description for a product in the {{ category }} " + "category that costs {{ product_price }}.\n" + "The name of the product MUST start with the letter {{ first_letter }}.\n" + ), + output_format=ProductInfo, + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="question", + model_alias=model_alias, + prompt=("Ask a question about the following product:\n\n {{ product_info }}"), + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="answer", + model_alias=model_alias, + prompt=( + "{%- if is_hallucination == 0 -%}\n" + "\n" + "{{ product_info }}\n" + "\n" + "{%- endif -%}\n" + "User Question: {{ question }}\n" + "Directly and succinctly answer the user's question.\n" + "{%- if is_hallucination == 1 -%}\n" + "Make up whatever information you need to in order to answer the user's request.\n" + "{%- endif -%}" + ), + ) + ) + + # Evaluate answer quality + config_builder.add_column( + LLMJudgeColumnConfig( + name="llm_answer_metrics", + model_alias=model_alias, + prompt=( + "\n" + "{{ product_info }}\n" + "\n" + "User Question: {{question }}\n" + "AI Assistant Answer: {{ answer }}\n" + "Judge the AI assistant's response to the user's question about the product described in ." + ), + scores=answer_quality_scores, + ) + ) + + config_builder.add_column( + ExpressionColumnConfig( + name="completeness_result", + expr="{{ llm_answer_metrics.Completeness.score }}", + ) + ) + + config_builder.add_column( + ExpressionColumnConfig( + name="accuracy_result", + expr="{{ llm_answer_metrics.Accuracy.score }}", + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +class ProductInfo(BaseModel): + product_name: str = Field(..., description="A realistic product name for the market.") + key_features: list[str] = Field(..., min_length=1, max_length=3, description="Key product features.") + description: str = Field( + ..., + description="A short, engaging description of what the product does, highlighting a unique but believable feature.", + ) + price_usd: float = Field(..., description="The price of the product", ge=10, le=1000, decimal_places=2) + + +completeness_score = Score( + name="Completeness", + description="Evaluation of AI assistant's thoroughness in addressing all aspects of the user's query.", + options={ + "Complete": "The response thoroughly covers all key points requested in the question, providing sufficient detail to satisfy the user's information needs.", + "PartiallyComplete": "The response addresses the core question but omits certain important details or fails to elaborate on relevant aspects that were requested.", + "Incomplete": "The response significantly lacks necessary information, missing major components of what was asked and leaving the query largely unanswered.", + }, +) + +accuracy_score = Score( + name="Accuracy", + description="Evaluation of how factually correct the AI assistant's response is relative to the product information.", + options={ + "Accurate": "The information provided aligns perfectly with the product specifications without introducing any misleading or incorrect details.", + "PartiallyAccurate": "While some information is correctly stated, the response contains minor factual errors or potentially misleading statements about the product.", + "Inaccurate": "The response presents significantly wrong information about the product, with claims that contradict the actual product details.", + }, +) + +answer_quality_scores = [completeness_score, accuracy_score] + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() diff --git a/fern/docs.yml b/fern/docs.yml new file mode 100644 index 00000000..b599caf4 --- /dev/null +++ b/fern/docs.yml @@ -0,0 +1,124 @@ +instances: + - url: https://nemo-datadesigner.docs.buildwithfern.com + +title: NeMo Data Designer + +tabs: + docs: + display-name: Documentation + slug: docs + api: + display-name: API Reference + slug: api + +navigation: + - tab: docs + layout: + - section: Getting Started + contents: + - page: Welcome + path: pages/index.mdx + - page: Installation + path: pages/installation.mdx + - page: Quick Start + path: pages/quick-start.mdx + - page: Contributing + path: pages/contributing.mdx + - section: Concepts + contents: + - section: Models + contents: + - page: Default Model Settings + path: pages/concepts/models/default-model-settings.mdx + - page: Custom Model Settings + path: pages/concepts/models/custom-model-settings.mdx + - page: Configure with CLI + path: pages/concepts/models/configure-with-cli.mdx + - page: Model Providers + path: pages/concepts/models/model-providers.mdx + - page: Model Configs + path: pages/concepts/models/model-configs.mdx + - page: Inference Parameters + path: pages/concepts/models/inference-parameters.mdx + - page: Columns + path: pages/concepts/columns.mdx + - page: Validators + path: pages/concepts/validators.mdx + - page: Processors + path: pages/concepts/processors.mdx + - page: Person Sampling + path: pages/concepts/person-sampling.mdx + - section: Tutorials + contents: + - page: Overview + path: pages/tutorials/overview.mdx + - page: The Basics + path: pages/tutorials/the-basics.mdx + - page: Structured Outputs + path: pages/tutorials/structured-outputs.mdx + - page: Seeding with a Dataset + path: pages/tutorials/seeding-with-dataset.mdx + - page: Images as Context + path: pages/tutorials/images-as-context.mdx + - section: Recipes + contents: + - page: Recipe Cards + path: pages/recipes/index.mdx + - section: Code Generation + contents: + - page: Text to Python + path: pages/recipes/code-generation/text-to-python.mdx + - page: Text to SQL + path: pages/recipes/code-generation/text-to-sql.mdx + - section: QA and Chat + contents: + - page: Product Info QA + path: pages/recipes/qa-and-chat/product-info-qa.mdx + - page: Multi-Turn Chat + path: pages/recipes/qa-and-chat/multi-turn-chat.mdx + - section: Plugins + contents: + - page: Overview + path: pages/plugins/overview.mdx + - page: Example Plugin + path: pages/plugins/example.mdx + - page: Available Plugins + path: pages/plugins/available.mdx + - tab: api + layout: + - section: API Reference + contents: + - page: Models + path: pages/api-reference/models.mdx + - page: Column Configs + path: pages/api-reference/column-configs.mdx + - page: Config Builder + path: pages/api-reference/config-builder.mdx + - page: Data Designer Config + path: pages/api-reference/data-designer-config.mdx + - page: Sampler Params + path: pages/api-reference/sampler-params.mdx + - page: Validator Params + path: pages/api-reference/validator-params.mdx + - page: Processors + path: pages/api-reference/processors.mdx + - page: Analysis + path: pages/api-reference/analysis.mdx + +colors: + accent-primary: + dark: "#76B900" + light: "#4a7300" + background: + dark: "#1a1a1a" + light: "#ffffff" + +logo: + dark: assets/favicon.png + light: assets/favicon.png + +favicon: assets/favicon.png + +navbar-links: + - type: github + value: https://github.com/NVIDIA-NeMo/DataDesigner diff --git a/fern/fern.config.json b/fern/fern.config.json new file mode 100644 index 00000000..3b23c05f --- /dev/null +++ b/fern/fern.config.json @@ -0,0 +1,4 @@ +{ + "organization": "nvidia-nemo", + "version": "3.40.1" +} diff --git a/fern/pages/api-reference/analysis.mdx b/fern/pages/api-reference/analysis.mdx new file mode 100644 index 00000000..f340b73d --- /dev/null +++ b/fern/pages/api-reference/analysis.mdx @@ -0,0 +1,162 @@ +--- +title: Analysis +description: API reference for dataset analysis and profiling. +--- + +# Analysis + +The `analysis` modules provide tools for profiling and analyzing generated datasets. It includes statistics tracking, column profiling, and reporting capabilities. + +## Column Statistics + +Column statistics are automatically computed for every column after generation. They provide basic metrics specific to the column type. For example, LLM columns track token usage statistics, sampler columns track distribution information, and validation columns track validation success rates. + +### LLMColumnStatistics + +```python +class LLMColumnStatistics(BaseModel): + """Statistics for LLM-generated columns.""" + + total_input_tokens: int # Total prompt tokens across all generations + total_output_tokens: int # Total completion tokens + avg_input_tokens: float # Average prompt tokens per generation + avg_output_tokens: float # Average completion tokens per generation + generation_time_seconds: float # Total generation time + generations_per_second: float # Generation throughput +``` + +### SamplerColumnStatistics + +```python +class SamplerColumnStatistics(BaseModel): + """Statistics for sampler columns.""" + + unique_values: int # Number of unique values generated + value_counts: dict[str, int] # Counts per value (for categorical) + min_value: float | None # Minimum value (for numerical) + max_value: float | None # Maximum value (for numerical) + mean_value: float | None # Mean value (for numerical) + std_value: float | None # Standard deviation (for numerical) +``` + +### ValidationColumnStatistics + +```python +class ValidationColumnStatistics(BaseModel): + """Statistics for validation columns.""" + + total_validated: int # Total records validated + valid_count: int # Number of valid records + invalid_count: int # Number of invalid records + null_count: int # Number of null results + pass_rate: float # Percentage of valid records +``` + +### ExpressionColumnStatistics + +```python +class ExpressionColumnStatistics(BaseModel): + """Statistics for expression columns.""" + + unique_values: int # Number of unique values + null_count: int # Number of null results + evaluation_time_seconds: float # Time to evaluate expressions +``` + +## Column Profilers + +Column profilers are optional analysis tools that provide deeper insights into specific column types. Currently, the only column profiler available is the Judge Score Profiler. + +### JudgeScoreProfilerResults + +```python +class JudgeScoreProfilerResults(BaseModel): + """Profiling results for LLM judge columns.""" + + score_name: str # Name of the score dimension + score_distribution: dict[str, int] # Distribution of scores + avg_score: float | None # Average score (for numeric scores) + score_counts: dict[str | int, int] # Counts per score value +``` + +## Dataset Profiler + +The `DatasetProfilerResults` class contains complete profiling results for a generated dataset. It aggregates column-level statistics, metadata, and profiler results. + +### DatasetProfilerResults + +```python +class DatasetProfilerResults(BaseModel): + """Complete profiling results for a generated dataset.""" + + dataset_name: str # Name of the dataset + total_records: int # Total records generated + generation_time_seconds: float # Total generation time + column_statistics: dict[str, ColumnStatistics] # Per-column stats + column_profiler_results: dict[str, list[ProfilerResults]] # Profiler results + + def to_report( + self, + output_format: Literal["console", "html", "svg"] = "console", + ) -> None: + """Generate a formatted analysis report. + + Args: + output_format: Output format for the report. + """ + ... + + def get_column_statistics( + self, + column_name: str, + ) -> ColumnStatistics: + """Get statistics for a specific column. + + Args: + column_name: Name of the column. + + Returns: + Column statistics object. + """ + ... + + def filter_by_column_type( + self, + column_type: str, + ) -> dict[str, ColumnStatistics]: + """Filter statistics by column type. + + Args: + column_type: Type of columns to filter (e.g., "llm-text"). + + Returns: + Dictionary of column statistics for matching columns. + """ + ... +``` + +### Example: Accessing Analysis Results + +```python +from data_designer.essentials import DataDesigner, DataDesignerConfigBuilder + +# Generate a dataset +data_designer = DataDesigner() +builder = DataDesignerConfigBuilder() +# ... add columns ... + +results = data_designer.create(builder, num_records=100) + +# Load and display analysis +analysis = results.load_analysis() +analysis.to_report() + +# Access specific column statistics +llm_stats = analysis.get_column_statistics("generated_text") +print(f"Average output tokens: {llm_stats.avg_output_tokens}") + +# Filter by column type +all_llm_stats = analysis.filter_by_column_type("llm-text") +for col_name, stats in all_llm_stats.items(): + print(f"{col_name}: {stats.generations_per_second:.2f} gen/sec") +``` diff --git a/fern/pages/api-reference/column-configs.mdx b/fern/pages/api-reference/column-configs.mdx new file mode 100644 index 00000000..dde8b350 --- /dev/null +++ b/fern/pages/api-reference/column-configs.mdx @@ -0,0 +1,185 @@ +--- +title: Column Configs +description: API reference for column configuration objects. +--- + +# Column Configurations + +The `column_configs` module defines configuration objects for all Data Designer column types. Each configuration inherits from `SingleColumnConfig`, which provides shared arguments like the column `name`, whether to `drop` the column after generation, and the `column_type`. + + +The `column_type` argument is used to identify column types when deserializing the Data Designer Config from JSON/YAML. It acts as the discriminator in a [discriminated union](https://docs.pydantic.dev/latest/concepts/unions/#discriminated-unions), allowing Pydantic to automatically determine which column configuration class to instantiate. + + +## SingleColumnConfig (Base Class) + +```python +class SingleColumnConfig(BaseModel): + """Base configuration for all column types.""" + + name: str # Column name (unique identifier) + drop: bool = False # Whether to drop column from final output + column_type: str # Discriminator field for column type + + @property + def required_columns(self) -> list[str]: + """Columns that must be generated before this one.""" + ... + + @property + def side_effect_columns(self) -> list[str]: + """Columns created as side effects (e.g., reasoning traces).""" + ... +``` + +## SamplerColumnConfig + +```python +class SamplerColumnConfig(SingleColumnConfig): + """Configuration for sampler-based columns.""" + + column_type: Literal["sampler"] = "sampler" + sampler_type: SamplerType # Type of sampler to use + params: SamplerParams | None = None # Sampler-specific parameters + conditional_params: dict[str, SamplerParams] | None = None # Condition-based params + convert_to: Literal["int", "float", "str"] | None = None # Type conversion +``` + +## LLMTextColumnConfig + +```python +class LLMTextColumnConfig(SingleColumnConfig): + """Configuration for LLM text generation columns.""" + + column_type: Literal["llm-text"] = "llm-text" + model_alias: str # Reference to model configuration + prompt: str # Jinja2 template for the prompt + system_prompt: str | None = None # Optional system prompt + multi_modal_context: list[ImageContext] | None = None # Image inputs +``` + +## LLMCodeColumnConfig + +```python +class LLMCodeColumnConfig(SingleColumnConfig): + """Configuration for LLM code generation columns.""" + + column_type: Literal["llm-code"] = "llm-code" + model_alias: str # Reference to model configuration + prompt: str # Jinja2 template for the prompt + code_lang: CodeLang # Target programming language + system_prompt: str | None = None # Optional system prompt +``` + +## LLMStructuredColumnConfig + +```python +class LLMStructuredColumnConfig(SingleColumnConfig): + """Configuration for LLM structured output columns.""" + + column_type: Literal["llm-structured"] = "llm-structured" + model_alias: str # Reference to model configuration + prompt: str # Jinja2 template for the prompt + output_format: type[BaseModel] | dict # Pydantic model or JSON schema + system_prompt: str | None = None # Optional system prompt +``` + +## LLMJudgeColumnConfig + +```python +class LLMJudgeColumnConfig(SingleColumnConfig): + """Configuration for LLM judge/scoring columns.""" + + column_type: Literal["llm-judge"] = "llm-judge" + model_alias: str # Reference to model configuration + prompt: str # Jinja2 template for the judge prompt + scores: list[Score] # Scoring rubrics + system_prompt: str | None = None # Optional system prompt + +class Score(BaseModel): + """Scoring rubric definition.""" + + name: str # Score dimension name + description: str # Description of what's being evaluated + options: dict[str | int, str] # Score options with descriptions +``` + +## LLMEmbeddingColumnConfig + +```python +class LLMEmbeddingColumnConfig(SingleColumnConfig): + """Configuration for embedding generation columns.""" + + column_type: Literal["llm-embedding"] = "llm-embedding" + model_alias: str # Reference to model configuration + target_column: str # Column containing text to embed +``` + +## ExpressionColumnConfig + +```python +class ExpressionColumnConfig(SingleColumnConfig): + """Configuration for Jinja2 expression columns.""" + + column_type: Literal["expression"] = "expression" + expr: str # Jinja2 expression + dtype: Literal["str", "int", "float", "bool"] | None = None # Output type +``` + +## ValidationColumnConfig + +```python +class ValidationColumnConfig(SingleColumnConfig): + """Configuration for validation columns.""" + + column_type: Literal["validation"] = "validation" + validator_type: ValidatorType # Type of validator + target_columns: list[str] # Columns to validate + validator_params: ValidatorParams # Validator-specific parameters + batch_size: int = 10 # Number of records per validation batch +``` + +## SeedDatasetColumnConfig + +```python +class SeedDatasetColumnConfig(SingleColumnConfig): + """Configuration for seed dataset columns.""" + + column_type: Literal["seed-dataset"] = "seed-dataset" + source_column: str # Column name in the seed dataset +``` + +## CodeLang Enum + +```python +class CodeLang(str, Enum): + """Supported programming languages for code generation.""" + + PYTHON = "python" + JAVASCRIPT = "javascript" + TYPESCRIPT = "typescript" + JAVA = "java" + KOTLIN = "kotlin" + GO = "go" + RUST = "rust" + RUBY = "ruby" + SCALA = "scala" + SWIFT = "swift" + SQL_ANSI = "sql_ansi" + SQL_POSTGRES = "sql_postgres" + SQL_MYSQL = "sql_mysql" + SQL_SQLITE = "sql_sqlite" + SQL_TSQL = "sql_tsql" + SQL_BIGQUERY = "sql_bigquery" +``` + +## ValidatorType Enum + +```python +class ValidatorType(str, Enum): + """Supported validator types.""" + + CODE = "code" + LOCAL_CALLABLE = "local_callable" + REMOTE = "remote" +``` diff --git a/fern/pages/api-reference/config-builder.mdx b/fern/pages/api-reference/config-builder.mdx new file mode 100644 index 00000000..80e61341 --- /dev/null +++ b/fern/pages/api-reference/config-builder.mdx @@ -0,0 +1,172 @@ +--- +title: Config Builder +description: API reference for the DataDesignerConfigBuilder. +--- + +# Data Designer's Config Builder + +The `config_builder` module provides a high-level interface for constructing Data Designer configurations through the `DataDesignerConfigBuilder` class, enabling programmatic creation of `DataDesignerConfig` objects by incrementally adding column configurations, constraints, processors, and profilers. + +You can use the builder to create Data Designer configurations from scratch or from existing configurations stored in YAML/JSON files via `from_config()`. The builder includes validation capabilities to catch configuration errors early and can work with seed datasets from local sources or external datastores. Once configured, use `build()` to generate the final configuration object or `write_config()` to serialize it to disk. + + +`DataDesignerConfigBuilder` requires a list of model configurations at initialization. This tells the builder which model aliases can be referenced by LLM-generated columns (such as `LLMTextColumnConfig`, `LLMCodeColumnConfig`, `LLMStructuredColumnConfig`, and `LLMJudgeColumnConfig`). Each model configuration specifies the model alias, model provider, model ID, and inference parameters that will be used during data generation. + + +## DataDesignerConfigBuilder + +```python +class DataDesignerConfigBuilder: + """Builder for constructing Data Designer configurations.""" + + def __init__( + self, + model_configs: list[ModelConfig] | None = None, + ) -> None: + """Initialize the config builder. + + Args: + model_configs: List of model configurations. If None, loads defaults. + """ + ... + + @classmethod + def from_config( + cls, + config_path: str | Path, + model_configs: list[ModelConfig] | None = None, + ) -> "DataDesignerConfigBuilder": + """Create a builder from an existing configuration file. + + Args: + config_path: Path to YAML/JSON configuration file. + model_configs: Optional model configurations (overrides config file). + + Returns: + Configured DataDesignerConfigBuilder instance. + """ + ... + + def add_column( + self, + config: SingleColumnConfig | None = None, + **kwargs: Any, + ) -> "DataDesignerConfigBuilder": + """Add a column configuration to the builder. + + Args: + config: Column configuration object, OR + **kwargs: Keyword arguments to construct a column config. + + Returns: + Self for method chaining. + """ + ... + + def add_model_config( + self, + model_config: ModelConfig, + ) -> "DataDesignerConfigBuilder": + """Add a model configuration to the builder. + + Args: + model_config: Model configuration to add. + + Returns: + Self for method chaining. + """ + ... + + def add_processor( + self, + processor: ProcessorConfig, + ) -> "DataDesignerConfigBuilder": + """Add a processor to the configuration. + + Args: + processor: Processor configuration to add. + + Returns: + Self for method chaining. + """ + ... + + def with_seed_dataset( + self, + seed_source: SeedSource, + ) -> "DataDesignerConfigBuilder": + """Configure a seed dataset for the generation. + + Args: + seed_source: Seed dataset source configuration. + + Returns: + Self for method chaining. + """ + ... + + def build(self) -> DataDesignerConfig: + """Build the final configuration object. + + Returns: + Complete DataDesignerConfig object. + """ + ... + + def write_config( + self, + path: str | Path, + format: Literal["yaml", "json"] = "yaml", + ) -> None: + """Write the configuration to a file. + + Args: + path: Output file path. + format: Output format (yaml or json). + """ + ... + + @property + def info(self) -> ConfigBuilderInfo: + """Access to configuration information display utilities.""" + ... +``` + +## ConfigBuilderInfo + +```python +class ConfigBuilderInfo: + """Utility for displaying configuration information.""" + + def display( + self, + info_type: str | InfoType, + ) -> None: + """Display information about the configuration. + + Args: + info_type: Type of information to display. + Options: "samplers", "model_configs", "model_providers", etc. + """ + ... +``` + +## Seed Sources + +### LocalFileSeedSource + +```python +class LocalFileSeedSource(BaseModel): + """Seed dataset from a local file.""" + + path: str | Path # Path to CSV, Parquet, or JSON file +``` + +### DataFrameSeedSource + +```python +class DataFrameSeedSource(BaseModel): + """Seed dataset from a pandas DataFrame.""" + + df: pd.DataFrame # DataFrame to use as seed data +``` diff --git a/fern/pages/api-reference/data-designer-config.mdx b/fern/pages/api-reference/data-designer-config.mdx new file mode 100644 index 00000000..19d5a9be --- /dev/null +++ b/fern/pages/api-reference/data-designer-config.mdx @@ -0,0 +1,108 @@ +--- +title: Data Designer Config +description: API reference for the DataDesignerConfig object. +--- + +# Data Designer Configuration + +`DataDesignerConfig` is the main configuration object for building datasets with Data Designer. It is a declarative configuration for defining the dataset you want to generate column-by-column, including options for dataset post-processing, validation, and profiling. + +Generally, you should use the [DataDesignerConfigBuilder](/api/config-builder) to build your configuration, but you can also build it manually by instantiating the `DataDesignerConfig` class directly. + +## DataDesignerConfig + +```python +class DataDesignerConfig(BaseModel): + """Complete configuration for a Data Designer generation job.""" + + columns: list[SingleColumnConfig] # List of column configurations + processors: list[ProcessorConfig] = [] # Post-generation processors + seed_source: SeedSource | None = None # Optional seed dataset + + @property + def column_names(self) -> list[str]: + """Names of all configured columns.""" + ... + + @property + def dependency_graph(self) -> dict[str, list[str]]: + """Column dependency graph for execution ordering.""" + ... + + def get_column(self, name: str) -> SingleColumnConfig: + """Get a column configuration by name. + + Args: + name: Column name. + + Returns: + Column configuration. + + Raises: + KeyError: If column not found. + """ + ... + + def to_yaml(self) -> str: + """Serialize configuration to YAML string.""" + ... + + def to_json(self) -> str: + """Serialize configuration to JSON string.""" + ... + + @classmethod + def from_yaml(cls, yaml_str: str) -> "DataDesignerConfig": + """Deserialize configuration from YAML string.""" + ... + + @classmethod + def from_json(cls, json_str: str) -> "DataDesignerConfig": + """Deserialize configuration from JSON string.""" + ... + + @classmethod + def from_file(cls, path: str | Path) -> "DataDesignerConfig": + """Load configuration from a file. + + Args: + path: Path to YAML or JSON file. + + Returns: + Loaded configuration. + """ + ... +``` + +## Configuration Serialization + +Data Designer configs can be serialized to and from YAML or JSON format, making it easy to: + +- Save configurations for reproducibility +- Share configurations with team members +- Version control your data generation pipelines +- Load and modify existing configurations + +### Example: Saving and Loading Configs + +```python +from data_designer.essentials import DataDesignerConfigBuilder + +# Build a configuration +builder = DataDesignerConfigBuilder() +builder.add_column(name="id", column_type="sampler", sampler_type="uuid") +builder.add_column( + name="greeting", + column_type="llm-text", + model_alias="nvidia-text", + prompt="Write a greeting." +) + +# Save to file +builder.write_config("my_config.yaml") + +# Load from file later +from data_designer.config.data_designer_config import DataDesignerConfig + +config = DataDesignerConfig.from_file("my_config.yaml") +``` diff --git a/fern/pages/api-reference/models.mdx b/fern/pages/api-reference/models.mdx new file mode 100644 index 00000000..c472a5a9 --- /dev/null +++ b/fern/pages/api-reference/models.mdx @@ -0,0 +1,106 @@ +--- +title: Models +description: API reference for model configuration objects. +--- + +# Models + +The `models` module defines configuration objects for model-based generation. `ModelProvider` specifies connection and authentication details for custom providers. `ModelConfig` encapsulates model details including the model alias, identifier, and inference parameters. [Inference Parameters](/docs/concepts/models/inference-parameters) controls model behavior through settings like `temperature`, `top_p`, and `max_tokens`, with support for both fixed values and distribution-based sampling. The module includes `ImageContext` for providing image inputs to multimodal models. + +For more information on how they are used, see below: + +- **[Model Providers](/docs/concepts/models/model-providers)** +- **[Model Configs](/docs/concepts/models/model-configs)** +- **[Images as Context](/docs/tutorials/images-as-context)** + +## ModelProvider + +```python +class ModelProvider(BaseModel): + """Configuration for a model provider endpoint.""" + + name: str # Unique identifier for the provider + endpoint: str # API endpoint URL + provider_type: str = "openai" # Provider type (default: OpenAI-compatible) + api_key: str | None = None # API key or environment variable name + extra_body: dict[str, Any] | None = None # Additional request body parameters + extra_headers: dict[str, str] | None = None # Additional headers +``` + +## ModelConfig + +```python +class ModelConfig(BaseModel): + """Configuration for a specific model.""" + + alias: str # Unique identifier for this model configuration + model: str # Model identifier as recognized by the provider + provider: str | None = None # Reference to provider by name + inference_parameters: InferenceParamsT | None = None # Inference parameters +``` + +## ChatCompletionInferenceParams + +```python +class ChatCompletionInferenceParams(BaseModel): + """Parameters for chat completion inference.""" + + temperature: float | Distribution | None = None # Sampling temperature (0.0-2.0) + top_p: float | Distribution | None = None # Nucleus sampling parameter (0.0-1.0) + max_tokens: int | None = None # Maximum output tokens + max_parallel_requests: int = 4 # Maximum concurrent API requests + timeout: int | None = None # Request timeout in seconds + extra_body: dict[str, Any] | None = None # Additional request body parameters +``` + +## EmbeddingInferenceParams + +```python +class EmbeddingInferenceParams(BaseModel): + """Parameters for embedding inference.""" + + encoding_format: Literal["float", "base64"] = "float" # Embedding encoding format + dimensions: int | None = None # Number of embedding dimensions + max_parallel_requests: int = 4 # Maximum concurrent API requests + timeout: int | None = None # Request timeout in seconds + extra_body: dict[str, Any] | None = None # Additional request body parameters +``` + +## ImageContext + +```python +class ImageContext(BaseModel): + """Configuration for providing image context to vision models.""" + + column_name: str # Name of column containing image data + data_type: ModalityDataType # Type of image data (BASE64, URL, etc.) + image_format: ImageFormat | None = None # Image format (PNG, JPEG, etc.) +``` + +## Distribution Types + +### UniformDistribution + +```python +class UniformDistribution(BaseModel): + """Uniform distribution for parameter sampling.""" + + params: UniformDistributionParams + +class UniformDistributionParams(BaseModel): + low: float # Lower bound + high: float # Upper bound +``` + +### ManualDistribution + +```python +class ManualDistribution(BaseModel): + """Manual distribution with discrete values.""" + + params: ManualDistributionParams + +class ManualDistributionParams(BaseModel): + values: list[float] # Discrete values to sample from + weights: list[float] | None = None # Optional probability weights +``` diff --git a/fern/pages/api-reference/processors.mdx b/fern/pages/api-reference/processors.mdx new file mode 100644 index 00000000..644ea0f9 --- /dev/null +++ b/fern/pages/api-reference/processors.mdx @@ -0,0 +1,112 @@ +--- +title: Processors +description: API reference for processor configuration objects. +--- + +# Processors + +The `processors` module defines configuration objects for post-generation data transformations. Processors run after column generation and can modify the dataset schema or content before output. + +## DropColumnsProcessorConfig + +```python +class DropColumnsProcessorConfig(BaseModel): + """Configuration for dropping columns from output.""" + + name: str # Processor identifier + column_names: list[str] # Columns to remove from output + build_stage: BuildStage = BuildStage.POST_BATCH # When to run +``` + +### Behavior + +- Columns in `column_names` are removed from the main output +- Dropped column values are saved to a separate file in `dropped-columns/` +- Missing columns produce a warning but don't fail the build +- Column configs are automatically marked with `drop=True` + +### Example Usage + +```python +from data_designer.essentials import ( + DataDesignerConfigBuilder, + DropColumnsProcessorConfig, +) + +builder = DataDesignerConfigBuilder() +# ... add columns ... + +builder.add_processor( + DropColumnsProcessorConfig( + name="remove_intermediate", + column_names=["temp_calculation", "raw_context", "debug_info"], + ) +) +``` + +## SchemaTransformProcessorConfig + +```python +class SchemaTransformProcessorConfig(BaseModel): + """Configuration for transforming output schema.""" + + name: str # Processor identifier + template: dict[str, Any] # Jinja2 template for output schema + build_stage: BuildStage = BuildStage.POST_BATCH # When to run +``` + +### Behavior + +- Each key in `template` becomes a column in the transformed output +- Values are Jinja2 templates with access to all columns +- Complex structures (lists, nested dicts) are supported +- Output saved to `processors-outputs/{name}/` +- Original dataset passes through unchanged + +### Template Capabilities + +- **Variable substitution**: `{{ column_name }}` +- **Filters**: `{{ text | upper }}`, `{{ text | lower }}`, `{{ text | trim }}` +- **Nested structures**: Arbitrarily deep JSON structures +- **Lists**: `["{{ col1 }}", "{{ col2 }}"]` +- **Conditionals**: `{% if condition %}...{% endif %}` + +### Example Usage + +```python +from data_designer.essentials import ( + DataDesignerConfigBuilder, + SchemaTransformProcessorConfig, +) + +builder = DataDesignerConfigBuilder() +# ... add columns with 'question' and 'answer' ... + +# Transform to chat message format +builder.add_processor( + SchemaTransformProcessorConfig( + name="chat_format", + template={ + "messages": [ + {"role": "user", "content": "{{ question }}"}, + {"role": "assistant", "content": "{{ answer }}"}, + ], + "metadata": { + "category": "{{ category | upper }}", + "generated": True, + }, + }, + ) +) +``` + +## BuildStage Enum + +```python +class BuildStage(str, Enum): + """When processors run in the generation pipeline.""" + + POST_BATCH = "post_batch" # After each batch is generated +``` + +Currently, all processors run at the `POST_BATCH` stage. Additional stages may be added in future versions. diff --git a/fern/pages/api-reference/sampler-params.mdx b/fern/pages/api-reference/sampler-params.mdx new file mode 100644 index 00000000..ecba1214 --- /dev/null +++ b/fern/pages/api-reference/sampler-params.mdx @@ -0,0 +1,152 @@ +--- +title: Sampler Params +description: API reference for sampler parameter configuration objects. +--- + +# Sampler Parameters + +The `sampler_params` module defines parameter configuration objects for all Data Designer sampler types. Sampler parameters are used within the `SamplerColumnConfig` to specify how values should be generated for sampled columns. + + +The config builder has an `info` attribute that can be used to display the available sampler types and their parameters: + +```python +config_builder.info.display("samplers") +``` + + +## CategorySamplerParams + +```python +class CategorySamplerParams(BaseModel): + """Parameters for categorical sampling.""" + + values: list[Any] # List of categorical values to sample from + weights: list[float] | None = None # Optional probability weights (normalized) +``` + +## SubcategorySamplerParams + +```python +class SubcategorySamplerParams(BaseModel): + """Parameters for hierarchical categorical sampling.""" + + category: str # Name of parent category column + values: dict[str, list[Any]] # Mapping of parent values to subcategory values +``` + +## UniformSamplerParams + +```python +class UniformSamplerParams(BaseModel): + """Parameters for uniform distribution sampling.""" + + low: float # Lower bound (inclusive) + high: float # Upper bound (exclusive for floats, inclusive for ints) +``` + +## GaussianSamplerParams + +```python +class GaussianSamplerParams(BaseModel): + """Parameters for Gaussian (normal) distribution sampling.""" + + mean: float = 0.0 # Distribution mean + std: float = 1.0 # Standard deviation +``` + +## BernoulliSamplerParams + +```python +class BernoulliSamplerParams(BaseModel): + """Parameters for Bernoulli (binary) sampling.""" + + p: float # Probability of success (1) +``` + +## BinomialSamplerParams + +```python +class BinomialSamplerParams(BaseModel): + """Parameters for binomial distribution sampling.""" + + n: int # Number of trials + p: float # Probability of success per trial +``` + +## PoissonSamplerParams + +```python +class PoissonSamplerParams(BaseModel): + """Parameters for Poisson distribution sampling.""" + + lam: float # Expected number of events (lambda) +``` + +## ScipySamplerParams + +```python +class ScipySamplerParams(BaseModel): + """Parameters for scipy.stats distribution sampling.""" + + distribution: str # Name of scipy.stats distribution + params: dict[str, Any] # Distribution-specific parameters +``` + +## UUIDSamplerParams + +```python +class UUIDSamplerParams(BaseModel): + """Parameters for UUID generation.""" + + prefix: str = "" # Optional prefix for the UUID + short_form: bool = False # Use shortened UUID format + uppercase: bool = False # Convert to uppercase +``` + +## DateTimeSamplerParams + +```python +class DateTimeSamplerParams(BaseModel): + """Parameters for datetime sampling.""" + + start: str | datetime # Start of date range + end: str | datetime # End of date range + format: str | None = None # Output format string (strftime) +``` + +## TimedeltaSamplerParams + +```python +class TimedeltaSamplerParams(BaseModel): + """Parameters for timedelta (duration) sampling.""" + + dt_min: int # Minimum delta in days + dt_max: int # Maximum delta in days + reference_column_name: str # Column containing reference datetime +``` + +## PersonSamplerParams + +```python +class PersonSamplerParams(BaseModel): + """Parameters for Nemotron-Personas person sampling.""" + + locale: str # Locale code (en_US, ja_JP, en_IN, hi_Deva_IN, hi_Latn_IN) + sex: str | None = None # Filter by "Male" or "Female" + city: str | list[str] | None = None # Filter by city + age_range: list[int] | None = None # [min_age, max_age] + with_synthetic_personas: bool = False # Include personality profiles + select_field_values: dict[str, list[str]] | None = None # Custom field filters +``` + +## PersonFromFakerSamplerParams + +```python +class PersonFromFakerSamplerParams(BaseModel): + """Parameters for Faker-based person sampling.""" + + locale: str = "en_US" # Faker locale + age_range: list[int] | None = None # [min_age, max_age] + sex: str | None = None # Filter by "Male" or "Female" +``` diff --git a/fern/pages/api-reference/validator-params.mdx b/fern/pages/api-reference/validator-params.mdx new file mode 100644 index 00000000..57a0faf3 --- /dev/null +++ b/fern/pages/api-reference/validator-params.mdx @@ -0,0 +1,169 @@ +--- +title: Validator Params +description: API reference for validator parameter configuration objects. +--- + +# Validator Parameters + +When creating a `ValidationColumnConfig`, two parameters are used to define the validator: `validator_type` and `validator_params`. +The `validator_type` parameter can be set to either `code`, `local_callable` or `remote`. The `validator_params` accompanying each of these is described below. + +## CodeValidatorParams + +```python +class CodeValidatorParams(BaseModel): + """Parameters for code validation.""" + + code_lang: CodeLang # Programming language to validate +``` + +### Supported Languages + +For Python code validation (uses Ruff): +- `CodeLang.PYTHON` + +For SQL code validation (uses SQLFluff): +- `CodeLang.SQL_ANSI` +- `CodeLang.SQL_POSTGRES` +- `CodeLang.SQL_MYSQL` +- `CodeLang.SQL_SQLITE` +- `CodeLang.SQL_TSQL` +- `CodeLang.SQL_BIGQUERY` + +### Example Usage + +```python +from data_designer.essentials import ( + CodeLang, + CodeValidatorParams, + ValidationColumnConfig, + ValidatorType, +) + +# Python code validation +python_validator = ValidationColumnConfig( + name="python_validation", + validator_type=ValidatorType.CODE, + target_columns=["python_code"], + validator_params=CodeValidatorParams(code_lang=CodeLang.PYTHON), + batch_size=10, +) + +# SQL code validation +sql_validator = ValidationColumnConfig( + name="sql_validation", + validator_type=ValidatorType.CODE, + target_columns=["sql_query"], + validator_params=CodeValidatorParams(code_lang=CodeLang.SQL_POSTGRES), + batch_size=10, +) +``` + +## LocalCallableValidatorParams + +```python +class LocalCallableValidatorParams(BaseModel): + """Parameters for local callable validation.""" + + validation_function: Callable[[pd.DataFrame], pd.DataFrame] + # Function that takes DataFrame and returns DataFrame with is_valid column + + output_schema: dict | None = None + # Optional JSON schema to validate function output +``` + +### Function Requirements + +The validation function must: +1. Accept a `pd.DataFrame` containing the target columns +2. Return a `pd.DataFrame` with at minimum an `is_valid` column (boolean or null) +3. Any additional columns in the output become validation metadata + +### Example Usage + +```python +import pandas as pd +from data_designer.essentials import ( + LocalCallableValidatorParams, + ValidationColumnConfig, + ValidatorType, +) + +def validate_positive_prices(df: pd.DataFrame) -> pd.DataFrame: + """Validate that all prices are positive.""" + result = pd.DataFrame() + result["is_valid"] = df["price"] > 0 + result["error_message"] = result["is_valid"].apply( + lambda v: "" if v else "Price must be positive" + ) + return result + +validator = ValidationColumnConfig( + name="price_validation", + validator_type=ValidatorType.LOCAL_CALLABLE, + target_columns=["price"], + validator_params=LocalCallableValidatorParams( + validation_function=validate_positive_prices, + ), + batch_size=50, +) +``` + +## RemoteValidatorParams + +```python +class RemoteValidatorParams(BaseModel): + """Parameters for remote HTTP validation.""" + + endpoint_url: str # URL of the validation endpoint + timeout: float = 30.0 # Request timeout in seconds + max_retries: int = 3 # Number of retry attempts + retry_backoff: float = 2.0 # Exponential backoff factor + max_parallel_requests: int = 4 # Maximum concurrent requests + output_schema: dict | None = None # Optional response schema validation +``` + +### Request/Response Format + +**Request (POST):** +```json +{ + "data": [ + {"column1": "value1", "column2": "value2"}, + {"column1": "value3", "column2": "value4"} + ] +} +``` + +**Response:** +```json +{ + "data": [ + {"is_valid": true, "additional_field": "value"}, + {"is_valid": false, "additional_field": "value"} + ] +} +``` + +### Example Usage + +```python +from data_designer.essentials import ( + RemoteValidatorParams, + ValidationColumnConfig, + ValidatorType, +) + +validator = ValidationColumnConfig( + name="external_validation", + validator_type=ValidatorType.REMOTE, + target_columns=["content"], + validator_params=RemoteValidatorParams( + endpoint_url="https://api.example.com/validate", + timeout=60.0, + max_retries=3, + max_parallel_requests=4, + ), + batch_size=5, +) +``` diff --git a/fern/pages/concepts/columns.mdx b/fern/pages/concepts/columns.mdx new file mode 100644 index 00000000..a3d293d1 --- /dev/null +++ b/fern/pages/concepts/columns.mdx @@ -0,0 +1,162 @@ +--- +title: Columns +description: The fundamental building blocks in Data Designer for defining dataset fields. +--- + +# Columns + +Columns are the fundamental building blocks in Data Designer. Each column represents a field in your dataset and defines how to generate it—whether that's sampling from a distribution, calling an LLM, or applying a transformation. + + +Columns are **declarative specifications**. You describe *what* you want, and the framework handles *how* to generate it—managing execution order, batching, parallelization, and resources automatically. + + +## Column Types + +Data Designer provides nine built-in column types, each optimized for different generation scenarios. + +### 🎲 Sampler Columns + +Sampler columns generate data using numerical sampling—fast, deterministic, and ideal for numerical and categorical dataset fields. They're significantly faster than LLMs and can produce data following specific distributions (Poisson for event counts, Gaussian for measurements, etc.). + +Available sampler types: + +- **UUID**: Unique identifiers +- **Category**: Categorical values with optional probability weights +- **Subcategory**: Hierarchical categorical data (states within countries, models within brands) +- **Uniform**: Evenly distributed numbers (integers or floats) +- **Gaussian**: Normally distributed values with configurable mean and standard deviation +- **Bernoulli**: Binary outcomes with specified success probability +- **Bernoulli Mixture**: Binary outcomes from multiple probability components +- **Binomial**: Count of successes in repeated trials +- **Poisson**: Count data and event frequencies +- **Scipy**: Access to the full scipy.stats distribution library +- **Person**: Realistic synthetic individuals with names, demographics, and attributes +- **Datetime**: Timestamps within specified ranges +- **Timedelta**: Time duration values + + +Samplers support **conditional parameters** that change behavior based on other columns. Want age distributions that vary by country? Income ranges that depend on occupation? Just define conditions on existing column values. + + +### 📝 LLM-Text Columns + +LLM-Text columns generate natural language text: product descriptions, customer reviews, narrative summaries, email threads, or anything requiring semantic understanding and creativity. + +Use **Jinja2 templating** in prompts to reference other columns. Data Designer automatically manages dependencies and injects the referenced column values into the prompt. + + +Models that support extended thinking (chain-of-thought reasoning) can capture their reasoning process in a separate `{column_name}__reasoning_trace` column—useful for understanding *why* the model generated specific content. This column is automatically added to the dataset if the model and service provider parse and return reasoning content. + + +### 💻 LLM-Code Columns + +LLM-Code columns generate code in specific programming languages. They handle the prompting and parsing necessary to extract clean code from the LLM's response—automatically detecting and extracting code from markdown blocks. You provide the prompt and choose the model; the column handles the extraction. + +Supported languages: **Python, JavaScript, TypeScript, Java, Kotlin, Go, Rust, Ruby, Scala, Swift**, plus **SQL** dialects (SQLite, PostgreSQL, MySQL, T-SQL, BigQuery, ANSI SQL). + +### 🗂️ LLM-Structured Columns + +LLM-Structured columns generate JSON with a *guaranteed schema*. Define your structure using a Pydantic model or JSON schema, and Data Designer ensures the LLM output conforms—no parsing errors, no schema drift. + +Use for complex nested structures: API responses, configuration files, database records with multiple related fields, or any structured data where type safety matters. Schemas can be arbitrarily complex with nested objects, arrays, enums, and validation constraints, but success depends on the model's capabilities. + + +Flat schemas with simple fields are easier and more robustly produced across models. Deeply nested schemas with complex validation constraints are more sensitive to model choice—stronger models handle complexity better. If you're experiencing schema conformance issues, try simplifying the schema or switching to a more capable model. + + +### ⚖️ LLM-Judge Columns + +LLM-Judge columns score generated content across multiple quality dimensions using LLMs as evaluators. + +Define scoring rubrics (relevance, accuracy, fluency, helpfulness) and the judge model evaluates each record. Score rubrics specify criteria and scoring options (1-5 scales, categorical grades, etc.), producing quantified quality metrics for every data point. + +Use judge columns for data quality filtering (e.g., keep only 4+ rated responses), A/B testing generation strategies, and quality monitoring over time. + +### 🧬 Embedding Columns + +Embedding columns generate vector embeddings (numerical representations) for text content using embedding models. These embeddings capture semantic meaning, enabling similarity search, clustering, and semantic analysis. + +Specify a `target_column` containing text, and Data Designer generates embeddings for that content. The target column can contain either a single text string or a list of text strings in stringified JSON format. In the latter case, embeddings are generated for each text string in the list. + +Common use cases: + +- **Semantic search**: Generate embeddings for documents, then find similar content by vector similarity +- **Clustering**: Group similar texts based on embedding proximity +- **Recommendation systems**: Match content by semantic similarity +- **Anomaly detection**: Identify outliers in embedding space + + +Embedding columns require an embedding model configured with `EmbeddingInferenceParams`. These models differ from chat completion models—they output vectors rather than text. The generation type is automatically determined by the inference parameters type. + + +### 🧩 Expression Columns + +Expression columns handle simple transformations using **Jinja2 templates**—concatenate first and last names, calculate numerical totals, format date strings. No LLM overhead needed. + +Template capabilities: + +- **Variable substitution**: Pull values from any existing column +- **String filters**: Uppercase, lowercase, strip whitespace, replace patterns +- **Conditional logic**: if/elif/else support +- **Arithmetic**: Add, subtract, multiply, divide + +### 🔍 Validation Columns + +Validation columns check generated content against rules and return structured pass/fail results. + +Built-in validation types: + +**Code validation** runs Python or SQL code through a linter to validate the code. + +**Local callable validation** accepts a Python function directly when using Data Designer as a library. + +**Remote validation** sends data to HTTP endpoints for validation-as-a-service. Useful for linters, security scanners, or proprietary systems. + +### 🌱 Seed Dataset Columns + +Seed dataset columns bootstrap generation from existing data. Provide a real dataset, and those columns become available as context for generating new synthetic data. + +Typical pattern: use seed data for one part of your schema (real product names and categories), then generate synthetic fields around it (customer reviews, purchase histories, ratings). The seed data provides realism and constraints; generated columns add volume and variation. + +## Shared Column Properties + +Every column configuration inherits from `SingleColumnConfig` with these standard properties: + +### `name` + +The column's identifier—unique within your configuration, used in Jinja2 references, and becomes the column name in the output DataFrame. Choose descriptive names: `user_review` > `col_17`. + +### `drop` + +Boolean flag (default: `False`) controlling whether the column appears in final output. Setting `drop=True` generates the column (available as a dependency) but excludes it from final output. + +**When to drop columns:** + +- Intermediate calculations that feed expressions but aren't meaningful standalone +- Context columns used only for LLM prompt templates +- Validation results during development unwanted in production + +Dropped columns participate fully in generation and the dependency graph—just filtered out at the end. + +### `column_type` + +Literal string identifying the column type: `"sampler"`, `"llm-text"`, `"expression"`, etc. Set automatically by each configuration class and serves as Pydantic's discriminator for deserialization. + +You rarely set this manually—instantiating `LLMTextColumnConfig` automatically sets `column_type="llm-text"`. Serialization is reversible: save to YAML, load later, and Pydantic reconstructs the exact objects. + +### `required_columns` + +Computed property listing columns that must be generated before this one. The framework derives this automatically: + +- For LLM/Expression columns: extracted from Jinja2 template `{{ variables }}` +- For Validation columns: explicitly listed target columns +- For Sampler columns with conditional parameters: columns referenced in conditions + +You read this property for introspection but never set it—always computed from configuration details. + +### `side_effect_columns` + +Computed property listing columns created implicitly alongside the primary column. Currently, only LLM columns produce side effects (reasoning trace columns like `{name}__reasoning_trace` when models use extended thinking). + +For detailed information on each column type, refer to the [column configuration API reference](/api/column-configs). diff --git a/fern/pages/concepts/models/configure-with-cli.mdx b/fern/pages/concepts/models/configure-with-cli.mdx new file mode 100644 index 00000000..ecbbd435 --- /dev/null +++ b/fern/pages/concepts/models/configure-with-cli.mdx @@ -0,0 +1,150 @@ +--- +title: Configure with CLI +description: Use the Data Designer CLI to manage model providers and configurations. +--- + +# Configuring Model Settings Using The CLI + +The Data Designer CLI provides an interactive interface for creating and managing default model providers and model configurations stored in your Data Designer home directory (default: `~/.data-designer/`). + +## Configuration Files + +The CLI manages two YAML configuration files: + +- **`model_providers.yaml`**: Model provider configurations +- **`model_configs.yaml`**: Model configurations + + +If these configuration files don't already exist, the Data Designer library automatically creates them with default settings at runtime when first initialized. + + + +You can customize the configuration directory location with the `DATA_DESIGNER_HOME` environment variable: + +```bash +export DATA_DESIGNER_HOME="/path/to/your/custom/directory" +``` + + +## CLI Commands + +The Data Designer CLI provides four main configuration commands: + +```bash +# Configure model providers +data-designer config providers + +# Configure models +data-designer config models + +# List current configurations +data-designer config list + +# Reset all configurations +data-designer config reset +``` + + +See available commands + +```bash +data-designer --help +``` + +See available sub-commands + +```bash +data-designer config --help +``` + + +## Managing Model Providers + +Run the interactive provider configuration command: + +```bash +data-designer config providers +``` + +### Available Operations + +**Add a new provider**: Define a new provider by entering its name, endpoint URL, provider type, and optionally an API key (as plain text or as an environment variable name). + +**Update an existing provider**: Modify an existing provider's settings. All fields are pre-filled with current values. + +**Delete a provider**: Remove a provider and its associated models. + +**Delete all providers**: Remove all providers and their associated models. + +**Change default provider**: Set which provider is used by default. This option is only available when multiple providers are configured. + +## Managing Model Configurations + +Run the interactive model configuration command: + +```bash +data-designer config models +``` + + +You need at least one provider configured before adding models. Run `data-designer config providers` first if none exist. + + +### Available Operations + +**Add a new model configuration** + +Create a new model configuration with the following fields: + +- **Alias**: A unique name for referencing this model in a column configuration. +- **Model ID**: The model identifier (e.g., `nvidia/nemotron-3-nano-30b-a3b`) +- **Provider**: Select from available providers (if multiple exist) +- **Temperature**: Sampling temperature (0.0 to 2.0) +- **Top P**: Nucleus sampling parameter (0.0 to 1.0) +- **Max Tokens**: Maximum output length (1 to 100000) + + +To configure additional inference parameter settings or use distribution-based inference parameters, edit the `model_configs.yaml` file directly. + + +**Update an existing model configuration**: Modify an existing model's configuration. All fields are pre-filled with current values. + +**Delete a model configuration**: Remove a single model configuration. + +**Delete all model configurations**: Remove all model configurations. The CLI will ask for confirmation before proceeding. + +## Listing Configurations + +View all current configurations: + +```bash +data-designer config list +``` + +This command displays: + +- **Model Providers**: All configured providers with their endpoints (API keys are masked) +- **Default Provider**: The currently selected default provider +- **Model Configurations**: All configured models with their settings + +## Resetting Configurations + +Delete all configuration files: + +```bash +data-designer config reset +``` + +The CLI will show which configuration files exist and ask for confirmation before deleting them. + + +This command permanently deletes all configuration files and resets to the default model providers and configurations. You'll need to reconfigure your custom configurations from scratch. + + +## See Also + +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured providers and model settings included with Data Designer +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Model Providers](/docs/concepts/models/model-providers)**: Learn about the `ModelProvider` class and provider configuration +- **[Model Configurations](/docs/concepts/models/model-configs)**: Learn about `ModelConfig` +- **[Quick Start Guide](/docs/quick-start)**: Get started with a simple example diff --git a/fern/pages/concepts/models/custom-model-settings.mdx b/fern/pages/concepts/models/custom-model-settings.mdx new file mode 100644 index 00000000..99d97480 --- /dev/null +++ b/fern/pages/concepts/models/custom-model-settings.mdx @@ -0,0 +1,237 @@ +--- +title: Custom Model Settings +description: Create custom providers and model configurations for Data Designer. +--- + +# Custom Model Settings + +While Data Designer ships with pre-configured model providers and configurations, you can create custom configurations to use different models, adjust inference parameters, or connect to custom API endpoints. + +## When to Use Custom Settings + +Use custom model settings when you need to: + +- Use models not included in the defaults +- Adjust inference parameters (temperature, top_p, max_tokens) for specific use cases +- Add distribution-based inference parameters for variability +- Connect to self-hosted or custom model endpoints +- Create multiple variants of the same model with different settings + +## Creating and Using Custom Settings + +### Custom Models with Default Providers + +Create custom model configurations that use the default providers (no need to define providers yourself): + +```python +from data_designer.essentials import ( + CategorySamplerParams, + ChatCompletionInferenceParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMTextColumnConfig, + ModelConfig, + SamplerColumnConfig, + SamplerType, +) + +# Create custom models using default providers +custom_models = [ + # High-temperature for more variability + ModelConfig( + alias="creative-writer", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", # Uses default NVIDIA provider + inference_parameters=ChatCompletionInferenceParams( + temperature=1.2, + top_p=0.98, + max_tokens=4096, + ), + ), + # Low-temperature for less variability + ModelConfig( + alias="fact-checker", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", # Uses default NVIDIA provider + inference_parameters=ChatCompletionInferenceParams( + temperature=0.1, + top_p=0.9, + max_tokens=2048, + ), + ), +] + +# Create DataDesigner (uses default providers) +data_designer = DataDesigner() + +# Pass custom models to config builder +config_builder = DataDesignerConfigBuilder(model_configs=custom_models) + +# Add a topic column using a categorical sampler +config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["Artificial Intelligence", "Space Exploration", "Ancient History", "Climate Science"], + ), + ) +) + +# Use your custom models +config_builder.add_column( + LLMTextColumnConfig( + name="creative_story", + model_alias="creative-writer", + prompt="Write a creative short story about {{topic}}.", + ) +) + +config_builder.add_column( + LLMTextColumnConfig( + name="facts", + model_alias="fact-checker", + prompt="List 3 facts about {{topic}}.", + ) +) + +# Preview your dataset +preview_result = data_designer.preview(config_builder=config_builder) +preview_result.display_sample_record() +``` + + +When you only specify `model_configs`, the default model providers (NVIDIA, OpenAI, and OpenRouter) are still available. You only need to create custom providers if you want to connect to different endpoints or modify provider settings. + + + +When you provide custom `model_configs` to `DataDesignerConfigBuilder`, they **replace** the defaults entirely. To use custom model configs in addition to the default configs, use the add_model_config method: + +```python +# Load defaults first +config_builder = DataDesignerConfigBuilder() + +# Add custom model to defaults +config_builder.add_model_config( + ModelConfig( + alias="my-custom-model", + model="nvidia/llama-3.3-nemotron-super-49b-v1.5", + provider="nvidia", # Uses default provider + inference_parameters=ChatCompletionInferenceParams( + temperature=0.6, + max_tokens=8192, + ), + ) +) + +# Now you can use both default and custom models +# Default: nvidia-text, nvidia-reasoning, nvidia-vision, etc. +# Custom: my-custom-model +``` + + +### Custom Providers with Custom Models + +Define both custom providers and custom model configurations when you need to connect to services not included in the defaults: + + +The custom provider endpoints must be reachable from where Data Designer runs. Ensure network connectivity, firewall rules, and any VPN requirements are properly configured. + + +```python +from data_designer.essentials import ( + CategorySamplerParams, + ChatCompletionInferenceParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMTextColumnConfig, + ModelConfig, + ModelProvider, + SamplerColumnConfig, + SamplerType, +) + +# Step 1: Define custom providers +custom_providers = [ + ModelProvider( + name="my-custom-provider", + endpoint="https://api.my-llm-service.com/v1", + provider_type="openai", # OpenAI-compatible API + api_key="MY_SERVICE_API_KEY", # Environment variable name + ), + ModelProvider( + name="my-self-hosted-provider", + endpoint="https://my-org.internal.com/llm/v1", + provider_type="openai", + api_key="SELF_HOSTED_API_KEY", + ), +] + +# Step 2: Define custom models +custom_models = [ + ModelConfig( + alias="my-text-model", + model="openai/some-model-id", + provider="my-custom-provider", # References provider by name + inference_parameters=ChatCompletionInferenceParams( + temperature=0.85, + top_p=0.95, + max_tokens=2048, + ), + ), + ModelConfig( + alias="my-self-hosted-text-model", + model="openai/some-hosted-model-id", + provider="my-self-hosted-provider", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.7, + top_p=0.9, + max_tokens=1024, + ), + ), +] + +# Step 3: Create DataDesigner with custom providers +data_designer = DataDesigner(model_providers=custom_providers) + +# Step 4: Create config builder with custom models +config_builder = DataDesignerConfigBuilder(model_configs=custom_models) + +# Step 5: Add a topic column using a categorical sampler +config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["Technology", "Healthcare", "Finance", "Education"], + ), + ) +) + +# Step 6: Use your custom model by referencing its alias +config_builder.add_column( + LLMTextColumnConfig( + name="short_news_article", + model_alias="my-text-model", # Reference custom alias + prompt="Write a short news article about the '{{topic}}' topic in 10 sentences.", + ) +) + +config_builder.add_column( + LLMTextColumnConfig( + name="long_news_article", + model_alias="my-self-hosted-text-model", # Reference custom alias + prompt="Write a detailed news article about the '{{topic}}' topic.", + ) +) + +# Step 7: Preview your dataset +preview_result = data_designer.preview(config_builder=config_builder) +preview_result.display_sample_record() +``` + +## See Also + +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured providers and model settings +- **[Configure Model Settings With the CLI](/docs/concepts/models/configure-with-cli)**: CLI-based configuration +- **[Quick Start Guide](/docs/quick-start)**: Basic usage example diff --git a/fern/pages/concepts/models/default-model-settings.mdx b/fern/pages/concepts/models/default-model-settings.mdx new file mode 100644 index 00000000..030fca82 --- /dev/null +++ b/fern/pages/concepts/models/default-model-settings.mdx @@ -0,0 +1,132 @@ +--- +title: Default Model Settings +description: Pre-configured model providers and configurations included with Data Designer. +--- + +# Default Model Settings + +Data Designer ships with pre-configured model providers and model configurations that make it easy to start generating synthetic data without manual setup. + +## Model Providers + +Data Designer includes a few default model providers that are configured automatically: + +### NVIDIA Provider (`nvidia`) + +- **Endpoint**: `https://integrate.api.nvidia.com/v1` +- **API Key**: Set via `NVIDIA_API_KEY` environment variable +- **Models**: Access to NVIDIA's hosted models from [build.nvidia.com](https://build.nvidia.com) +- **Getting Started**: Sign up and get your API key at [build.nvidia.com](https://build.nvidia.com) + +The NVIDIA provider gives you access to state-of-the-art models including Nemotron and other NVIDIA-optimized models. + +### OpenAI Provider (`openai`) + +- **Endpoint**: `https://api.openai.com/v1` +- **API Key**: Set via `OPENAI_API_KEY` environment variable +- **Models**: Access to OpenAI's model catalog +- **Getting Started**: Get your API key from [platform.openai.com/api-keys](https://platform.openai.com/api-keys) + +The OpenAI provider gives you access to GPT models and other OpenAI offerings. + +### OpenRouter Provider (`openrouter`) + +- **Endpoint**: `https://openrouter.ai/api/v1` +- **API Key**: Set via `OPENROUTER_API_KEY` environment variable +- **Models**: Access to a wide variety of models through OpenRouter's unified API +- **Getting Started**: Get your API key from [openrouter.ai](https://openrouter.ai) + +The OpenRouter provider gives you access to a unified interface for many different language models from various providers. + +## Model Configurations + +Data Designer provides pre-configured model aliases for common use cases. When you create a `DataDesignerConfigBuilder` without specifying `model_configs`, these default configurations are automatically available. + +### NVIDIA Models + +The following model configurations are automatically available when `NVIDIA_API_KEY` is set: + +| Alias | Model | Use Case | Inference Parameters | +|-------|-------|----------|---------------------| +| `nvidia-text` | `nvidia/nemotron-3-nano-30b-a3b` | General text generation | `temperature=1.0, top_p=1.0` | +| `nvidia-reasoning` | `openai/gpt-oss-20b` | Reasoning and analysis tasks | `temperature=0.35, top_p=0.95` | +| `nvidia-vision` | `nvidia/nemotron-nano-12b-v2-vl` | Vision and image understanding | `temperature=0.85, top_p=0.95` | +| `nvidia-embedding` | `nvidia/llama-3.2-nv-embedqa-1b-v2` | Text embeddings | `encoding_format="float", extra_body={"input_type": "query"}` | + + +### OpenAI Models + +The following model configurations are automatically available when `OPENAI_API_KEY` is set: + +| Alias | Model | Use Case | Inference Parameters | +|-------|-------|----------|---------------------| +| `openai-text` | `gpt-4.1` | General text generation | `temperature=0.85, top_p=0.95` | +| `openai-reasoning` | `gpt-5` | Reasoning and analysis tasks | `temperature=0.35, top_p=0.95` | +| `openai-vision` | `gpt-5` | Vision and image understanding | `temperature=0.85, top_p=0.95` | +| `openai-embedding` | `text-embedding-3-large` | Text embeddings | `encoding_format="float"` | + +### OpenRouter Models + +The following model configurations are automatically available when `OPENROUTER_API_KEY` is set: + +| Alias | Model | Use Case | Inference Parameters | +|-------|-------|----------|---------------------| +| `openrouter-text` | `nvidia/nemotron-3-nano-30b-a3b` | General text generation | `temperature=1.0, top_p=1.0` | +| `openrouter-reasoning` | `openai/gpt-oss-20b` | Reasoning and analysis tasks | `temperature=0.35, top_p=0.95` | +| `openrouter-vision` | `nvidia/nemotron-nano-12b-v2-vl` | Vision and image understanding | `temperature=0.85, top_p=0.95` | +| `openrouter-embedding` | `openai/text-embedding-3-large` | Text embeddings | `encoding_format="float"` | + + +## Using Default Settings + +Default settings work out of the box - no configuration needed! Simply create `DataDesigner` and `DataDesignerConfigBuilder` instances without any arguments, and reference the default model aliases in your column configurations. + +For a complete example showing how to use default model settings, see the **[Quick Start Guide](/docs/quick-start)**. + +### How Default Model Providers and Configurations Work + +When the Data Designer library or the CLI is initialized, default model configurations and providers are stored in the Data Designer home directory for easy access and customization if they do not already exist. These configuration files serve as the single source of truth for model settings. By default they are saved to the following paths: + +- **Model Configs**: `~/.data-designer/model_configs.yaml` +- **Model Providers**: `~/.data-designer/model_providers.yaml` + + +While these files provide a convenient way to specify settings for your model providers and configuration you use most often, they can always be set programmatically in your SDG workflow. + + +You can customize the home directory location by setting the `DATA_DESIGNER_HOME` environment variable: + +```bash +# In your .bashrc, .zshrc, or similar +export DATA_DESIGNER_HOME="/path/to/your/custom/directory" +``` + +These configuration files can be modified in two ways: + +1. **Using the CLI**: Run CLI commands to add, update, or delete model configurations and providers +2. **Manual editing**: Directly edit the YAML files with your preferred text editor + +Both methods operate on the same files, ensuring consistency across your entire Data Designer setup. + +## Important Notes + + +While default model configurations are always available, you need to set the appropriate API key environment variable (`NVIDIA_API_KEY`, `OPENAI_API_KEY`, or `OPENROUTER_API_KEY`) to actually use the corresponding models for data generation. Without a valid API key, any attempt to generate data using that provider's models will fail. + + + +Store your API keys in environment variables rather than hardcoding them in your scripts: + +```bash +# In your .bashrc, .zshrc, or similar +export NVIDIA_API_KEY="your-api-key-here" +export OPENAI_API_KEY="your-openai-api-key-here" +export OPENROUTER_API_KEY="your-openrouter-api-key-here" +``` + + +## See Also + +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Configure Model Settings With the CLI](/docs/concepts/models/configure-with-cli)**: Learn how to use the CLI to manage model settings +- **[Model Configurations](/docs/concepts/models/model-configs)**: Learn about model configurations diff --git a/fern/pages/concepts/models/inference-parameters.mdx b/fern/pages/concepts/models/inference-parameters.mdx new file mode 100644 index 00000000..866bd229 --- /dev/null +++ b/fern/pages/concepts/models/inference-parameters.mdx @@ -0,0 +1,153 @@ +--- +title: Inference Parameters +description: Control model behavior during synthetic data generation. +--- + +# Inference Parameters + +Inference parameters control how models generate responses during synthetic data generation. Data Designer provides two types of inference parameters: `ChatCompletionInferenceParams` for text/code/structured generation and `EmbeddingInferenceParams` for embedding generation. + +## Overview + +When you create a `ModelConfig`, you can specify inference parameters to adjust model behavior. These parameters control aspects like randomness (temperature), diversity (top_p), context size (max_tokens), and more. Data Designer supports both static values and dynamic distribution-based sampling for certain parameters. + +## Chat Completion Inference Parameters + +The `ChatCompletionInferenceParams` class controls how models generate text completions (for text, code, and structured data generation). It provides fine-grained control over generation behavior and supports both static values and dynamic distribution-based sampling. + +### Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `temperature` | `float` or `Distribution` | No | Controls randomness in generation (0.0 to 2.0). Higher values = more creative/random | +| `top_p` | `float` or `Distribution` | No | Nucleus sampling parameter (0.0 to 1.0). Controls diversity by filtering low-probability tokens | +| `max_tokens` | `int` | No | Maximum number of tokens to generate in the response (≥ 1) | +| `max_parallel_requests` | `int` | No | Maximum concurrent API requests (default: 4, ≥ 1) | +| `timeout` | `int` | No | API request timeout in seconds (≥ 1) | +| `extra_body` | `dict[str, Any]` | No | Additional parameters to include in the API request body | + + +If `temperature`, `top_p`, or `max_tokens` are not provided, the model provider's default values will be used. Different providers and models may have different defaults. + + + +For gpt-oss models like `gpt-oss-20b` and `gpt-oss-120b`, you can control the reasoning effort using the `extra_body` parameter: + +```python +from data_designer.essentials import ChatCompletionInferenceParams + +# High reasoning effort (more thorough, slower) +inference_parameters = ChatCompletionInferenceParams( + extra_body={"reasoning_effort": "high"} +) + +# Medium reasoning effort (balanced) +inference_parameters = ChatCompletionInferenceParams( + extra_body={"reasoning_effort": "medium"} +) + +# Low reasoning effort (faster, less thorough) +inference_parameters = ChatCompletionInferenceParams( + extra_body={"reasoning_effort": "low"} +) +``` + + +### Temperature and Top P Guidelines + +- **Temperature**: + - `0.0-0.3`: Highly deterministic, focused outputs (ideal for structured/reasoning tasks) + - `0.4-0.7`: Balanced creativity and coherence (general purpose) + - `0.8-1.0`: Creative, diverse outputs (ideal for creative writing) + - `1.0+`: Highly random and experimental + +- **Top P**: + - `0.1-0.5`: Very focused, only most likely tokens + - `0.6-0.9`: Balanced diversity + - `0.95-1.0`: Maximum diversity, including less likely tokens + + +When tuning both parameters simultaneously, consider these combinations: + +- **For deterministic/structured outputs**: Low temperature (`0.0-0.3`) + moderate-to-high top_p (`0.8-0.95`) + - The low temperature ensures focus, while top_p allows some token diversity +- **For balanced generation**: Moderate temperature (`0.5-0.7`) + high top_p (`0.9-0.95`) + - This is a good starting point for most use cases +- **For creative outputs**: Higher temperature (`0.8-1.0`) + high top_p (`0.95-1.0`) + - Both parameters work together to maximize diversity + +**Avoid**: Setting both very low (overly restrictive) or adjusting both dramatically at once. When experimenting, adjust one parameter at a time to understand its individual effect. + + +## Distribution-Based Inference Parameters + +For `temperature` and `top_p` in `ChatCompletionInferenceParams`, you can specify distributions instead of fixed values. This allows Data Designer to sample different values for each generation request, introducing controlled variability into your synthetic data. + +### Uniform Distribution + +Samples values uniformly between a low and high bound: + +```python +from data_designer.essentials import ( + ChatCompletionInferenceParams, + UniformDistribution, + UniformDistributionParams, +) + +inference_params = ChatCompletionInferenceParams( + temperature=UniformDistribution( + params=UniformDistributionParams(low=0.7, high=1.0) + ), +) +``` + +### Manual Distribution + +Samples from a discrete set of values with optional weights: + +```python +from data_designer.essentials import ( + ChatCompletionInferenceParams, + ManualDistribution, + ManualDistributionParams, +) + +# Equal probability for each value +inference_params = ChatCompletionInferenceParams( + temperature=ManualDistribution( + params=ManualDistributionParams(values=[0.5, 0.7, 0.9]) + ), +) + +# Weighted probabilities (normalized automatically) +inference_params = ChatCompletionInferenceParams( + top_p=ManualDistribution( + params=ManualDistributionParams( + values=[0.8, 0.9, 0.95], + weights=[0.2, 0.5, 0.3] # 20%, 50%, 30% probability + ) + ), +) +``` + +## Embedding Inference Parameters + +The `EmbeddingInferenceParams` class controls how models generate embeddings. This is used when working with embedding models for tasks like semantic search or similarity analysis. + +### Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `encoding_format` | `Literal["float", "base64"]` | No | Format of the embedding encoding (default: "float") | +| `dimensions` | `int` | No | Number of dimensions for the embedding | +| `max_parallel_requests` | `int` | No | Maximum concurrent API requests (default: 4, ≥ 1) | +| `timeout` | `int` | No | API request timeout in seconds (≥ 1) | +| `extra_body` | `dict[str, Any]` | No | Additional parameters to include in the API request body | + + +## See Also + +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured model settings included with Data Designer +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Model Configurations](/docs/concepts/models/model-configs)**: Learn about configuring model settings +- **[Model Providers](/docs/concepts/models/model-providers)**: Learn about configuring model providers diff --git a/fern/pages/concepts/models/model-configs.mdx b/fern/pages/concepts/models/model-configs.mdx new file mode 100644 index 00000000..850d3d28 --- /dev/null +++ b/fern/pages/concepts/models/model-configs.mdx @@ -0,0 +1,127 @@ +--- +title: Model Configs +description: Configure model settings for synthetic data generation. +--- + +# Model Configurations + +Model configurations define the specific models you use for synthetic data generation and their associated inference parameters. Each `ModelConfig` represents a named model that can be referenced throughout your data generation workflows. + +## Overview + +A `ModelConfig` specifies which LLM model to use and how it should behave during generation. When you create column configurations (like `LLMText`, `LLMCode`, or `LLMStructured`), you reference a model by its alias. Data Designer uses the model configuration to determine which model to call and with what parameters. + +## ModelConfig Structure + +The `ModelConfig` class has the following fields: + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `alias` | `str` | Yes | Unique identifier for this model configuration (e.g., `"my-text-model"`, `"reasoning-model"`) | +| `model` | `str` | Yes | Model identifier as recognized by the provider (e.g., `"nvidia/nemotron-3-nano-30b-a3b"`, `"gpt-4"`) | +| `inference_parameters` | `InferenceParamsT` | No | Controls model behavior during generation. Use `ChatCompletionInferenceParams` for text/code/structured generation or `EmbeddingInferenceParams` for embeddings. Defaults to `ChatCompletionInferenceParams()` if not provided. The generation type is automatically determined by the inference parameters type. See [Inference Parameters](/docs/concepts/models/inference-parameters) for details. | +| `provider` | `str` | No | Reference to the name of the Provider to use (e.g., `"nvidia"`, `"openai"`, `"openrouter"`). If not specified, one set as the default provider, which may resolve to the first provider if there are more than one | + + +## Examples + +### Basic Model Configuration + +```python +from data_designer.essentials import ChatCompletionInferenceParams, ModelConfig + +# Simple model configuration with fixed parameters +model_config = ModelConfig( + alias="my-text-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.85, + top_p=0.95, + max_tokens=2048, + ), +) +``` + +### Multiple Model Configurations for Different Tasks + +```python +from data_designer.essentials import ( + ChatCompletionInferenceParams, + EmbeddingInferenceParams, + GenerationType, + ModelConfig +) + +model_configs = [ + # Creative tasks + ModelConfig( + alias="creative-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.9, + top_p=0.95, + max_tokens=2048, + ), + ), + # Critic tasks + ModelConfig( + alias="critic-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.25, + top_p=0.95, + max_tokens=2048, + ), + ), + # Reasoning and structured tasks + ModelConfig( + alias="reasoning-model", + model="openai/gpt-oss-20b", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.3, + top_p=0.9, + max_tokens=4096, + ), + ), + # Vision tasks + ModelConfig( + alias="vision-model", + model="nvidia/nemotron-nano-12b-v2-vl", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.7, + top_p=0.95, + max_tokens=2048, + ), + ), + # Embedding tasks + ModelConfig( + alias="embedding_model", + model="nvidia/llama-3.2-nv-embedqa-1b-v2", + provider="nvidia", + inference_parameters=EmbeddingInferenceParams( + encoding_format="float", + extra_body={ + "input_type": "query" + } + ) + ) +] +``` + + +The number of tokens required to generate a single data entry can vary significantly with use case. For example, reasoning models often need more tokens to "think through" problems before generating a response. Note that `max_tokens` specifies the **maximum number of output tokens** to generate in the response, so set this value based on the expected length of the generated content. + + +## See Also + +- **[Inference Parameters](/docs/concepts/models/inference-parameters)**: Detailed guide to inference parameters and how to configure them +- **[Model Providers](/docs/concepts/models/model-providers)**: Learn about configuring model providers +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured model settings included with Data Designer +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Configure Model Settings With the CLI](/docs/concepts/models/configure-with-cli)**: Use the CLI to manage model settings +- **[Column Configurations](/api/column-configs)**: Learn how to use models in column configurations diff --git a/fern/pages/concepts/models/model-providers.mdx b/fern/pages/concepts/models/model-providers.mdx new file mode 100644 index 00000000..e47ed1e1 --- /dev/null +++ b/fern/pages/concepts/models/model-providers.mdx @@ -0,0 +1,58 @@ +--- +title: Model Providers +description: Configure connections to model hosting services. +--- + +# Model Providers + +Model providers are external services that host and serve models. Data Designer uses the `ModelProvider` class to configure connections to these services. + +## Overview + +A `ModelProvider` defines how Data Designer connects to a provider's API endpoint. When you create a `ModelConfig`, you reference a provider by name, and Data Designer uses that provider's settings to make API calls to the appropriate endpoint. + +## ModelProvider Configuration + +The `ModelProvider` class has the following fields: + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `name` | `str` | Yes | Unique identifier for the provider (e.g., `"nvidia"`, `"openai"`, `"openrouter"`) | +| `endpoint` | `str` | Yes | API endpoint URL (e.g., `"https://integrate.api.nvidia.com/v1"`) | +| `provider_type` | `str` | No | Provider type (default: `"openai"`). Uses OpenAI-compatible API format | +| `api_key` | `str` | No | API key or environment variable name (e.g., `"NVIDIA_API_KEY"`) | +| `extra_body` | `dict[str, Any]` | No | Additional parameters to include in the request body of all API requests to the provider. | +| `extra_headers` | `dict[str, str]` | No | Additional headers to include in all API requests to the provider. | + +## API Key Configuration + +The `api_key` field can be specified in two ways: + +1. **Environment variable name** (recommended): Set `api_key` to the name of an environment variable (e.g., `"NVIDIA_API_KEY"`). Data Designer will automatically resolve it at runtime. + +2. **Plain-text value**: Set `api_key` to the actual API key string. This is less secure and not recommended for production use. + +```python +# Method 1: Environment variable (recommended) +provider = ModelProvider( + name="nvidia", + endpoint="https://integrate.api.nvidia.com/v1", + api_key="NVIDIA_API_KEY", # Will be resolved from environment +) + +# Method 2: Direct value (not recommended) +provider = ModelProvider( + name="nvidia", + endpoint="https://integrate.api.nvidia.com/v1", + api_key="nvapi-abc123...", # Direct API key +) +``` + +## See Also + +- **[Model Configurations](/docs/concepts/models/model-configs)**: Learn about configuring models +- **[Inference Parameters](/docs/concepts/models/inference-parameters)**: Detailed guide to inference parameters and how to configure them +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured providers and model settings included with Data Designer +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Configure Model Settings With the CLI](/docs/concepts/models/configure-with-cli)**: Use the CLI to manage providers and model settings +- **[Quick Start Guide](/docs/quick-start)**: Get started with a simple example diff --git a/fern/pages/concepts/person-sampling.mdx b/fern/pages/concepts/person-sampling.mdx new file mode 100644 index 00000000..650ae0c7 --- /dev/null +++ b/fern/pages/concepts/person-sampling.mdx @@ -0,0 +1,222 @@ +--- +title: Person Sampling +description: Generate synthetic person data for your datasets. +--- + +# Person Sampling in Data Designer + +Person sampling in Data Designer allows you to generate synthetic person data for your datasets. There are two distinct approaches, each with different capabilities and use cases. + +## Overview + +Data Designer provides two ways to generate synthetic people: + +1. **Faker-based sampling** - Quick, basic PII generation for testing or when realistic demographic distributions are not relevant for your use case +2. **Nemotron-Personas datasets** - Demographically accurate, rich persona data + +--- + +## Approach 1: Faker-Based Sampling + +### What It Does +Uses the Faker library to generate random personal information. The data is basic and not demographically accurate, but is useful for quick testing, prototyping, or when realistic demographic distributions are not relevant for your use case. + +### Features +- Gives you access to person attributes that Faker exposes +- Quick to set up with no additional downloads +- Generates random names, emails, addresses, phone numbers, etc. +- Supports [all Faker-supported locales](https://faker.readthedocs.io/en/master/locales.html) +- **Not demographically grounded** - data patterns don't reflect real-world demographics + +### Usage Example +```python +from data_designer.essentials import ( + SamplerColumnConfig, + SamplerType, + PersonFromFakerSamplerParams, +) + +config_builder.add_column( + SamplerColumnConfig( + name="customer", + sampler_type=SamplerType.PERSON_FROM_FAKER, + params=PersonFromFakerSamplerParams( + locale="en_US", + age_range=[25, 65], + sex="Female", + ), + ) +) +``` + +For more details, see the documentation for [`SamplerColumnConfig`](/api/column-configs) and [`PersonFromFakerSamplerParams`](/api/sampler-params). + +--- + +## Approach 2: Nemotron-Personas Datasets + +### What It Does +Uses curated Nemotron-Personas datasets from NVIDIA GPU Cloud (NGC) to generate demographically accurate person data with rich personality profiles and behavioral characteristics. + +The NGC datasets are extended versions of the [open-source Nemotron-Personas datasets on HuggingFace](https://huggingface.co/collections/nvidia/nemotron-personas), with additional fields and enhanced data quality. + +Supported locales: + +- `en_US`: United States +- `ja_JP`: Japan +- `en_IN`: India +- `hi_Deva_IN`: India (Devanagari script) +- `hi_Latn_IN`: India (Latin script) + +### Features +- **Demographically accurate personal details**: Names, ages, sex, marital status, education, occupation based on census data +- **Rich persona details**: Comprehensive behavioral profiles including: + - Big Five personality traits with scores + - Cultural backgrounds and narratives + - Skills and hobbies + - Career goals and aspirations + - Context-specific personas (professional, financial, healthcare, sports, arts, travel, culinary, etc.) +- Consistent, referenceable attributes across your dataset +- Grounded in real-world demographic distributions + +### Prerequisites + +To use the extended Nemotron-Personas datasets with Data Designer, you need to download them [from NGC](https://catalog.ngc.nvidia.com/search?orderBy=scoreDESC&query=nemotron+personas) and move them to the Data Designer managed assets directory. + +See below for step-by-step instructions. + +### Nemotron-Personas Datasets Setup Instructions + +#### Step 0: Obtain an NGC API Key and install the NGC CLI + +To download the Nemotron-Personas datasets from NGC, you will need to obtain an NGC API key and install the NGC CLI. + +1. **NGC API Key**: Obtain from [NVIDIA GPU Cloud](https://ngc.nvidia.com/) +2. **NGC CLI**: [NGC CLI](https://org.ngc.nvidia.com/setup/installers/cli) + + +#### Step 1: Set Your NGC API Key +```bash +export NGC_API_KEY="your-ngc-api-key-here" +``` + +#### Step 2 (option 1): Download Nemotron-Personas Datasets via the Data Designer CLI + +Once you have the NGC CLI and your NGC API key set up, you can download the datasets via the Data Designer CLI. + +You can pass the locales you want to download as arguments to the CLI command: +```bash +data-designer download personas --locale en_US --locale ja_JP +``` + +Or you can use the interactive mode to select the locales you want to download: +```bash +data-designer download personas +``` + +#### Step 2 (option 2): Download Nemotron-Personas Datasets Directly + +Use the NGC CLI to download the datasets: +```bash +# For Nemotron-Personas USA +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-en_us" + +# For Nemotron-Personas IN +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-hi_deva_in" +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-hi_latn_in" +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-en_in" + +# For Nemotron-Personas JP +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-ja_jp" +``` + +Then move the downloaded dataset to the Data Designer managed assets directory: +```bash +mkdir -p ~/.data-designer/managed-assets/datasets/ +mv nemotron-personas-dataset-*/*.parquet ~/.data-designer/managed-assets/datasets/ +``` + +#### Step 3: Use PersonSampler in Your Code +```python +from data_designer.essentials import ( + SamplerColumnConfig, + SamplerType, + PersonSamplerParams, +) + +config_builder.add_column( + SamplerColumnConfig( + name="customer", + sampler_type=SamplerType.PERSON, + params=PersonSamplerParams( + locale="en_US", + sex="Female", + age_range=[25, 45], + with_synthetic_personas=True, + ), + ) +) +``` + +For more details, see the documentation for [`SamplerColumnConfig`](/api/column-configs) and [`PersonSamplerParams`](/api/sampler-params). + +### Available Data Fields + +**Core Fields (all locales):** + +| Field | Type | Notes | +|-------|------|-------| +| `uuid` | UUID | Unique identifier | +| `first_name` | string | | +| `middle_name` | string | | +| `last_name` | string | | +| `sex` | enum | "Male" or "Female" | +| `birth_date` | date | Derived: year, month, day | +| `street_number` | int | | +| `street_name` | string | | +| `unit` | string | Address line 2 | +| `city` | string | | +| `region` | string | Alias: state | +| `district` | string | Alias: county | +| `postcode` | string | Alias: zipcode | +| `country` | string | | +| `phone_number` | PhoneNumber | Derived: area_code, country_code, prefix, line_number | +| `marital_status` | string | Values: never_married, married_present, separated, widowed, divorced | +| `education_level` | string or None | | +| `bachelors_field` | string or None | | +| `occupation` | string or None | | +| `email_address` | string | | +| `national_id` | string | + +**Japan-Specific Fields (`ja_JP`):** + +- `area` + +**India-Specific Fields (`en_IN`, `hi_IN`, `hi_Deva_IN`, `hi_Latn_IN`):** + +- `religion` - Census-reported religion +- `education_degree` - Census-reported education degree +- `first_language` - Native language +- `second_language` - Second language (if applicable) +- `third_language` - Third language (if applicable) +- `zone` - Urban vs rural + +**With Synthetic Personas Enabled:** + +- Big Five personality traits (Openness, Conscientiousness, Extraversion, Agreeableness, Neuroticism) with t-scores and labels +- Cultural background narratives +- Skills and competencies +- Hobbies and interests +- Career goals +- Context-specific personas (professional, financial, healthcare, sports, arts & entertainment, travel, culinary, etc.) + +### Configuration Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `locale` | str | Language/region code - must be one of: "en_US", "ja_JP", "en_IN", "hi_Deva_IN", "hi_Latn_IN" | +| `sex` | str (optional) | Filter by "Male" or "Female" | +| `city` | str or list[str] (optional) | Filter by specific city or cities within locale | +| `age_range` | list[int] (optional) | Two-element list [min_age, max_age] (default: [18, 114]) | +| `with_synthetic_personas` | bool (optional) | Include rich personality profiles (default: False) | +| `select_field_values` | dict (optional) | Custom field-based filtering (e.g., `{"state": ["NY", "CA"], "education_level": ["bachelors"]}`) | diff --git a/fern/pages/concepts/processors.mdx b/fern/pages/concepts/processors.mdx new file mode 100644 index 00000000..04e601e8 --- /dev/null +++ b/fern/pages/concepts/processors.mdx @@ -0,0 +1,160 @@ +--- +title: Processors +description: Transformations that modify your dataset before or after columns are generated. +--- + +# Processors + +Processors are transformations that modify your dataset before or after columns are generated. They run at different stages and can reshape, filter, or augment the data. + + +Processors handle transformations that don't fit the "column" model: restructuring the schema for a specific output format, dropping intermediate columns in bulk, or applying batch-wide operations. + + +## Overview + +Each processor: + +- Receives the complete batch DataFrame +- Applies its transformation +- Passes the result to the next processor (or to output) + +Currently, processors run only at the `POST_BATCH` stage, i.e., after column generation completes for each batch. + +## Processor Types + +### 🗑️ Drop Columns Processor + +Removes specified columns from the output dataset. Dropped columns are saved separately in the `dropped-columns` directory for reference. + + +The Drop Columns Processor is different from others in the sense that it does not need to be explicitly added: setting `drop = True` when configuring a column will accomplish the same. + + +**Configuration:** + +```python +from data_designer.essentials import DropColumnsProcessorConfig + +processor = DropColumnsProcessorConfig( + name="remove_intermediate", + column_names=["temp_calculation", "raw_input", "debug_info"], +) +``` + +**Behavior:** + +- Columns specified in `column_names` are removed from the output +- Original values are preserved in a separate parquet file +- Missing columns produce a warning but don't fail the build +- Column configs are automatically marked with `drop=True` when this processor is added + +**Use Cases:** + +- Removing intermediate columns used only for LLM context +- Cleaning up debug or validation columns before final output +- Separating sensitive data from the main dataset + +### 🔄 Schema Transform Processor + +Creates an additional dataset with a transformed schema using Jinja2 templates. The output is written to a separate directory alongside the main dataset. + +**Configuration:** + +```python +from data_designer.essentials import SchemaTransformProcessorConfig + +processor = SchemaTransformProcessorConfig( + name="chat_format", + template={ + "messages": [ + {"role": "user", "content": "{{ question }}"}, + {"role": "assistant", "content": "{{ answer }}"}, + ], + "metadata": "{{ category | upper }}", + }, +) +``` + +**Behavior:** + +- Each key in `template` becomes a column in the transformed dataset +- Values are Jinja2 templates with access to all columns in the batch +- Complex structures (lists, nested dicts) are supported +- Output is saved to the `processors-outputs/{name}/` directory +- The original dataset passes through unchanged + +**Template Capabilities:** + +- **Variable substitution**: `{{ column_name }}` +- **Filters**: `{{ text | upper }}`, `{{ text | lower }}`, `{{ text | trim }}` +- **Nested structures**: Arbitrarily deep JSON structures +- **Lists**: `["{{ col1 }}", "{{ col2 }}"]` + +**Use Cases:** + +- Converting flat columns to chat message format +- Restructuring data for specific model training formats +- Creating derived views without modifying the source dataset + +## Using Processors + +Add processors to your configuration using the builder's `add_processor` method: + +```python +from data_designer.essentials import ( + DataDesignerConfigBuilder, + DropColumnsProcessorConfig, + SchemaTransformProcessorConfig, +) + +builder = DataDesignerConfigBuilder() + +# ... add columns ... + +# Drop intermediate columns +builder.add_processor( + DropColumnsProcessorConfig( + name="cleanup", + column_names=["scratch_work", "raw_context"], + ) +) + +# Transform to chat format +builder.add_processor( + SchemaTransformProcessorConfig( + name="chat_format", + template={ + "messages": [ + {"role": "user", "content": "{{ question }}"}, + {"role": "assistant", "content": "{{ answer }}"}, + ], + }, + ) +) +``` + +### Execution Order + +Processors execute in the order they're added. Plan accordingly when one processor's output affects another. + +## Configuration Parameters + +### Common Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `name` | str | Identifier for the processor, used in output directory names | +| `build_stage` | BuildStage | When to run (default: `POST_BATCH`) | + +### DropColumnsProcessorConfig + +| Parameter | Type | Description | +|-----------|------|-------------| +| `column_names` | list[str] | Columns to remove from output | + +### SchemaTransformProcessorConfig + +| Parameter | Type | Description | +|-----------|------|-------------| +| `template` | dict[str, Any] | Jinja2 template defining the output schema. Must be JSON-serializable. | diff --git a/fern/pages/concepts/validators.mdx b/fern/pages/concepts/validators.mdx new file mode 100644 index 00000000..0f2202f0 --- /dev/null +++ b/fern/pages/concepts/validators.mdx @@ -0,0 +1,347 @@ +--- +title: Validators +description: Quality assurance mechanisms that check generated content against rules. +--- + +# Validators + +Validators are quality assurance mechanisms in Data Designer that check generated content against rules and return structured pass/fail results. They enable automated verification of data for correctness, code quality, and adherence to specifications. + + +Validators act as **quality gates** in your generation pipeline. Use them to filter invalid records, score code quality, verify format compliance, or integrate with external validation services. + + +## Overview + +Validation columns execute validation logic against target columns and produce structured results indicating: + +- **`is_valid`**: Boolean pass/fail status +- **Additional metadata**: Error messages, scores, severity levels, and custom fields + +Validators currently support three execution strategies: + +1. **Code validation**: Lint and check Python or SQL code using industry-standard tools +2. **Local callable validation**: Execute custom Python functions for flexible validation logic +3. **Remote validation**: Send data to HTTP endpoints for external validation services + +## Validator Types + +### 🐍 Python Code Validator + +The Python code validator runs generated Python code through [Ruff](https://github.com/astral-sh/ruff), a fast Python linter that checks for syntax errors, undefined variables, and code quality issues. + +**Configuration:** + +```python +from data_designer.essentials import CodeLang, CodeValidatorParams + +validator_params = CodeValidatorParams(code_lang=CodeLang.PYTHON) +``` + +**Validation Output:** + +Each validated record returns: + +- **`is_valid`**: `True` if no fatal or error-level issues found +- **`python_linter_score`**: Quality score from 0-10 (based on pylint formula) +- **`python_linter_severity`**: Highest severity level found (`"none"`, `"convention"`, `"refactor"`, `"warning"`, `"error"`, `"fatal"`) +- **`python_linter_messages`**: List of linter messages with line numbers, columns, and descriptions + +**Severity Levels:** + +- **Fatal**: Syntax errors preventing code execution +- **Error**: Undefined names, invalid syntax +- **Warning**: Code smells and potential issues +- **Refactor**: Simplification opportunities +- **Convention**: Style guide violations + +A record is marked valid if it has no messages or only messages at warning/convention/refactor levels. + +**Example Validation Result:** + +```python +{ + "is_valid": False, + "python_linter_score": 0, + "python_linter_severity": "error", + "python_linter_messages": [ + { + "type": "error", + "symbol": "F821", + "line": 1, + "column": 7, + "message": "Undefined name `it`" + } + ] +} +``` + +### 🗄️ SQL Code Validator + +The SQL code validator uses [SQLFluff](https://github.com/sqlfluff/sqlfluff), a dialect-aware SQL linter that checks query syntax and structure. + +**Configuration:** + +```python +from data_designer.essentials import CodeLang, CodeValidatorParams + +validator_params = CodeValidatorParams(code_lang=CodeLang.SQL_POSTGRES) +``` + + +The SQL code validator supports multiple dialects: `SQL_POSTGRES`, `SQL_ANSI`, `SQL_MYSQL`, `SQL_SQLITE`, `SQL_TSQL` and `SQL_BIGQUERY`. + + +**Validation Output:** + +Each validated record returns: + +- **`is_valid`**: `True` if no parsing errors found +- **`error_messages`**: Concatenated error descriptions (empty string if valid) + +The validator focuses on parsing errors (PRS codes) that indicate malformed SQL. It also checks for common pitfalls like `DECIMAL` definitions without scale parameters. + +**Example Validation Result:** + +```python +# Valid SQL +{ + "is_valid": True, + "error_messages": "" +} + +# Invalid SQL +{ + "is_valid": False, + "error_messages": "PRS: Line 1, Position 1: Found unparsable section: 'NOT SQL'" +} +``` + +### 🔧 Local Callable Validator + +The local callable validator executes custom Python functions for flexible validation logic. + +**Configuration:** + +```python +import pandas as pd + +from data_designer.essentials import LocalCallableValidatorParams + +def my_validation_function(df: pd.DataFrame) -> pd.DataFrame: + """Validate that values are positive. + + Args: + df: DataFrame with target columns + + Returns: + DataFrame with is_valid column and optional metadata + """ + result = pd.DataFrame() + result["is_valid"] = df["price"] > 0 + result["error_message"] = result["is_valid"].apply( + lambda valid: "" if valid else "Price must be positive" + ) + return result + +validator_params = LocalCallableValidatorParams( + validation_function=my_validation_function, + output_schema={ # Optional: enforce output schema + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "is_valid": {"type": ["boolean", "null"]}, + "error_message": {"type": "string"} + }, + "required": ["is_valid"] + } + } + } + } +) +``` + +**Function Requirements:** + +- **Input**: DataFrame with target columns +- **Output**: DataFrame with `is_valid` column (boolean or null) +- **Extra fields**: Any additional columns become validation metadata + +The `output_schema` parameter is optional but recommended—it validates the function's output against a JSON schema, catching unexpected return formats. + +### 🌐 Remote Validator + +The remote validator sends data to HTTP endpoints for validation-as-a-service. This is useful for when you have validation software that needs to run on external compute and you can expose it through a service. Some examples are: + +- External linting services +- Security scanners +- Domain-specific validators +- Proprietary validation systems + + +Currently, the remote validator is only able to perform unauthenticated API calls. When implementing your own service, you can rely on network isolation for security. If you need to reach a service that requires authentication, you should implement a local proxy. + + +**Configuration:** + +```python +from data_designer.essentials import RemoteValidatorParams + +validator_params = RemoteValidatorParams( + endpoint_url="https://api.example.com/validate", + timeout=30.0, # Request timeout in seconds + max_retries=3, # Retry attempts on failure + retry_backoff=2.0, # Exponential backoff factor + max_parallel_requests=4, # Concurrent request limit + output_schema={ # Optional: enforce response schema + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "is_valid": {"type": ["boolean", "null"]}, + "confidence": {"type": "string"} + } + } + } + } + } +) +``` + +**Request Format:** + +The validator sends POST requests with this structure: + +```json +{ + "data": [ + {"column1": "value1", "column2": "value2"}, + {"column1": "value3", "column2": "value4"} + ] +} +``` + +**Expected Response Format:** + +The endpoint must return: + +```json +{ + "data": [ + { + "is_valid": true, + "custom_field": "any additional metadata" + }, + { + "is_valid": false, + "custom_field": "more metadata" + } + ] +} +``` + +**Retry Behavior:** + +The validator automatically retries on: + +- Network errors +- HTTP status codes: 429 (rate limit), 500, 502, 503, 504 + +Failed requests use exponential backoff: `delay = retry_backoff^attempt`. + +**Parallelization:** + +Set `max_parallel_requests` to control concurrency. Higher values improve throughput but increase server load. The validator batches requests according to the `batch_size` parameter in the validation column configuration. + +## Using Validators in Columns + +Add validation columns to your configuration using the builder's `add_column` method: + +```python +from data_designer.essentials import ( + CodeValidatorParams, + CodeLang, + DataDesignerConfigBuilder, + LLMCodeColumnConfig, + ValidationColumnConfig, +) + +builder = DataDesignerConfigBuilder() + +# Generate Python code +builder.add_column( + LLMCodeColumnConfig( + name="sorting_algorithm", + prompt="Write a Python function to sort a list using bubble sort.", + code_lang="python", + model_alias="my-model" + ) +) + +# Validate the generated code +builder.add_column( + ValidationColumnConfig( + name="code_validation", + target_columns=["sorting_algorithm"], + validator_type="code", + validator_params=CodeValidatorParams(code_lang=CodeLang.PYTHON), + batch_size=10, + drop=False, + ) +) +``` + +The `target_columns` parameter specifies which columns to validate. All target columns are passed to the validator together (except for code validators, which process each column separately). + +### Configuration Parameters + +See more about parameters used to instantiate `ValidationColumnConfig` in the [API reference](/api/column-configs). + +### Batch Size Considerations + +Larger batch sizes improve efficiency but consume more memory: + +- **Code validators**: 5-20 records (file I/O overhead) +- **Local callable**: 10-50 records (depends on function complexity) +- **Remote validators**: 1-10 records (network latency, server capacity) + +Adjust based on: + +- Validator computational cost +- Available memory +- Network bandwidth (for remote validators) +- Server rate limits + +If the validation logic uses information from other samples, only samples in the batch will be considered. + +### Multiple Column Validation + +Validate multiple columns simultaneously: + +```python +from data_designer.essentials import RemoteValidatorParams, ValidationColumnConfig + +builder.add_column( + ValidationColumnConfig( + name="multi_column_validation", + target_columns=["column_a", "column_b", "column_c"], + validator_type="remote", + validator_params=RemoteValidatorParams( + endpoint_url="https://api.example.com/validate" + ) + ) +) +``` + +**Note**: Code validators always process each target column separately, even when multiple columns are specified. Local callable and remote validators receive all target columns together. + +## See Also + +- [Validator Parameters Reference](/api/validator-params): Configuration object schemas diff --git a/fern/pages/contributing.mdx b/fern/pages/contributing.mdx new file mode 100644 index 00000000..ea6d80b3 --- /dev/null +++ b/fern/pages/contributing.mdx @@ -0,0 +1,241 @@ +--- +title: Contributing +description: How to contribute to NeMo Data Designer +--- + +# 🎨✨ Contributing to NeMo Data Designer 🎨✨ + +Thank you for your interest in contributing to Data Designer! + +We welcome contributions from the community and sincerely appreciate your efforts to improve the project. Whether you're fixing a typo, reporting a bug, proposing a new feature, or implementing a major enhancement, your work helps make Data Designer better for everyone 🎉. + +This guide will help you get started with the contribution process. + +## Table of Contents + +- [Getting Started](#getting-started) +- [Ways to Contribute](#ways-to-contribute) +- [Feature Requests](#feature-requests) +- [Development Guide](#development-guide) +- [Submitting Changes](#submitting-changes) +- [Code of Conduct](#code-of-conduct) +- [Signing off on your work](#signing-off-on-your-work) + + +## Getting Started +👋 Welcome to the Data Designer community! We're excited to have you here. + +Whether you're new to the project or ready to dive in, the resources below will help you get oriented and productive quickly: + +1. **[README.md](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/README.md)** – best place to start to learn the basics of the project + +2. **[AGENTS.md](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/AGENTS.md)** – context and instructions to help AI coding agents work on Data Designer (it's also useful for human developers!) + +3. **[Documentation](https://nvidia-nemo.github.io/DataDesigner/)** – detailed documentation on Data Designer's capabilities and usage + +## Ways to Contribute + +There are many ways to contribute to Data Designer: + +### 🐛 Bug Fixes + +Found a bug? Before reporting, please +1. Verify you're using the latest version: `uv pip install --upgrade data-designer` +2. Search for duplicates in the [issue tracker](https://github.com/NVIDIA-NeMo/DataDesigner/issues) + +When [creating a bug report](https://github.com/NVIDIA-NeMo/DataDesigner/issues/new), please include: +- Data Designer version +- Python version and operating system +- Minimal reproducible example +- Expected vs. actual behavior +- Full error messages and stack traces + +If you are interested in fixing the bug yourself, that's AWESOME! Please follow the [development guide](#development-guide) to get started. + +### ✨ Feature Implementation +Want to add new functionality? Great! Please review [our development approach](#feature-requests) and open a feature request to discuss the idea and get feedback before investing significant time on the implementation. + +### 📖 Documentation Improvements +Documentation is crucial for user adoption. Contributions that clarify usage, add examples, or fix typos are highly valued. + +### 💡 Examples and Tutorials +Share your use cases! Example notebooks and tutorials help others understand how to leverage Data Designer effectively. + +### 🧪 Test Coverage +Help us improve test coverage by adding tests for untested code paths or edge cases. + +## Feature Requests +Data Designer is designed to be as flexible and extensible as possible, and we welcome your ideas for pushing its capabilities even further! To keep the core library maintainable, while also supporting innovation, we take an incremental approach when adding new features – we explore what's already possible, extend through plugins when needed, and integrate the most broadly useful features into the core library: + +### How We Grow Data Designer +1. 🧗 **Explore what's possible**: Can your use case be achieved with current features? We've designed Data Designer to be composable – sometimes creative combinations of existing tools can accomplish what you need. Check out our examples or open an issue if you'd like help exploring this! + +2. 🔌 **Extend through plugins**: If existing features aren't quite enough, consider implementing your idea as a plugin that extends the core library. Plugins let you experiment and share functionality while keeping the core library focused. + +3. ⚙️ **Integrate into the core library**: If your feature or plugin proves broadly useful and aligns with Data Designer's goals, we'd love to integrate it into the core library! We're happy to discuss whether it's a good fit and how to move forward together. + +This approach helps us grow thoughtfully while keeping Data Designer focused and maintainable. + +### Submitting a Feature Request +Open a [new issue](https://github.com/NVIDIA-NeMo/DataDesigner/issues/new) with: + +- **Clear title**: Concise description of the feature +- **Use case**: Explain what problem this solves and why it's important +- **Proposed solution**: Describe how you envision the feature working +- **Alternatives considered**: Other approaches you've thought about +- **Examples**: Code examples or mockups of how users would interact with the feature +- **Willingness to implement**: Are you interested in implementing this yourself? + +## Development Guide +Data Designer uses [`uv`](https://github.com/astral-sh/uv) for dependency management. If you don't have uv installed, follow their [installation instructions](https://docs.astral.sh/uv/getting-started/installation/). + +### Initial Setup +0. **Create or find an issue** + + Before starting work, ensure there's an issue tracking your contribution: + + - For bug fixes: Search [existing issues](https://github.com/NVIDIA-NeMo/DataDesigner/issues) or [create a new one](https://github.com/NVIDIA-NeMo/DataDesigner/issues/new) + - For new features: Open a [feature request](#feature-requests) to discuss the approach first + - Comment on the issue to let maintainers know you're working on it + +1. **Fork and clone the repository** + + Start by [forking the Data Designer repository](https://github.com/NVIDIA-NeMo/DataDesigner/fork), then clone your fork and add the upstream remote: + + ```bash + git clone https://github.com/YOUR_GITHUB_USERNAME/DataDesigner.git + + cd DataDesigner + + git remote add upstream https://github.com/NVIDIA-NeMo/DataDesigner.git + ``` + +2. **Install dependencies** + + ```bash + # Install project with dev dependencies + make install-dev + + # Or, if you use Jupyter / IPython for development + make install-dev-notebooks + ``` + +3. **Verify your setup** + + ```bash + make test && make check-all + ``` + + If no errors are reported, you're ready to develop 🚀 + +### Making Changes + +1. **Create a feature branch** + + ```bash + git checkout main + git pull upstream main + git checkout -b //- + ``` + + Example types of change: + + - `feat` for new features + - `fix` for bug fixes + - `docs` for documentation updates + - `test` for testing changes + - `refactor` for code refactoring + - `chore` for chore tasks + - `style` for style changes + - `perf` for performance improvements + + Example branch name: + + - `johnnygreco/feat/123-add-xyz-generator` for a new feature by @johnnygreco, addressing issue #123 + +2. **Develop your changes** + + Please follow the patterns and conventions used throughout the codebase, as well as those outlined in [AGENTS.md](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/AGENTS.md). + +3. **Test and validate** + + ```bash + make check-all-fix # Format code and fix linting issues + make test # Run all tests + make coverage # Check test coverage (must be >90%) + ``` + + **Writing tests**: Place tests in [tests/](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/tests/) mirroring the source structure. Use fixtures from [tests/conftest.py](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/tests/conftest.py), mock external services with `unittest.mock` or `pytest-httpx`, and test both success and failure cases. See [AGENTS.md](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/AGENTS.md) for patterns and examples. + +4. **Commit your work** + + Write clear, descriptive commit messages, optionally including a brief summary (50 characters or less) and reference issue numbers when applicable (e.g., "Fixes #123"). + + ```bash + git commit -m "Add XYZ generator for synthetic data" -m "Fixes #123" + ``` + +5. **Stay up to date** + + Regularly sync your branch with upstream changes: + + ```bash + git fetch upstream + git merge upstream/main + ``` + +## Submitting Changes + +### Before Submitting + +Ensure your changes meet the following criteria: + +- All tests pass (`make test`) +- Code is formatted and linted (`make check-all-fix`) +- New functionality includes tests +- Documentation is updated (README, docstrings, examples) +- License headers are present on all new files +- Commit messages are clear and descriptive + +### Creating a Pull Request + +1. **Push your changes** to your fork: + + ```bash + git push origin //- + ``` + +2. **Open a pull request** on GitHub from your fork to the main repository + +3. **Respond to review feedback** update your PR as needed + +### Pull Request Review Process + +- Maintainers will review your PR and may request changes +- Address feedback by pushing additional commits to your branch +- Reply to the feedback comment with a link to the commit that addresses it. +- Once approved, a maintainer will merge your PR +- Your contribution will be included in the next release! + +## Code of Conduct +Data Designer follows the Contributor Covenant Code of Conduct. We are committed to providing a welcoming and inclusive environment for all contributors. + +**Please read our complete [Code of Conduct](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/CODE_OF_CONDUCT.md)** for full details on our standards and expectations. + +### License File Headers +All code files that are added to this repository must include the appropriate NVIDIA copyright header: + +```python +# SPDX-FileCopyrightText: Copyright (c) {YEAR} NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +``` + +Use `make update-license-headers` to add headers automatically. + +## Signing off on your work + +When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. All contributors are asked to sign the Data Designer [Developer Certificate of Origin (DCO)](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/DCO) when submitting their first pull request. The process is automated by a bot that will comment on the pull request. Our DCO is the same as the Linux Foundation requires its contributors to sign. + +--- + +Thank you for contributing to NeMo Data Designer! Your efforts help make synthetic data generation more accessible and powerful for everyone. 🎨✨ diff --git a/fern/pages/index.mdx b/fern/pages/index.mdx new file mode 100644 index 00000000..c8fd5ced --- /dev/null +++ b/fern/pages/index.mdx @@ -0,0 +1,53 @@ +--- +title: Welcome to NeMo Data Designer +description: A general framework for generating high-quality synthetic data from scratch or using seed data. +--- + +# 🎨 NeMo Data Designer Library + +[![GitHub](https://img.shields.io/badge/github-repo-952fc6?logo=github)](https://github.com/NVIDIA-NeMo/DataDesigner) [![License](https://img.shields.io/badge/License-Apache_2.0-0074df.svg)](https://opensource.org/licenses/Apache-2.0) [![NeMo Microservices](https://img.shields.io/badge/NeMo-Microservices-76b900)](https://docs.nvidia.com/nemo/microservices/latest/index.html) + +👋 Welcome to the Data Designer community! We're excited to have you here. + +Data Designer is a **general framework** for generating **high-quality** synthetic data **from scratch** or using your own **seed data** as a starting point for domain-grounded data generation. + +## Why Data Designer? + +Generating high-quality synthetic data requires much more than iteratively calling an LLM. + +Data Designer is **purpose-built** to support large-scale, high-quality data generation, including + + * **Diversity** – statistical distributions and variety that reflect real-world data patterns, not repetitive LLM outputs + * **Correlations** – meaningful relationships between fields that LLMs cannot maintain across independent calls + * **Steerability** – flexible control over data characteristics throughout the generation process + * **Validation** – automated quality checks and verification that data meets specifications + * **Reproducibility** – shareable and reproducible generation workflows + +## How does it work? + +Data Designer helps you create datasets through an intuitive, **iterative** process: + +1. **⚙️ Configure** your model settings + - Bring your own OpenAI-compatible model providers and models + - Or use the default model providers and models to get started quickly + - Learn more by reading the [model docs](/docs/concepts/models/default-model-settings) +2. **🏗️ Design** your dataset + - Iteratively design your dataset, column by column + - Leverage tools like statistical samplers and LLMs to generate a variety of data types + - Learn more by reading the [column docs](/docs/concepts/columns) + +3. **🔁 Preview** your results and iterate + - Generate a preview dataset stored in memory for fast iteration + - Inspect sample records and analysis results to refine your configuration + - Try for yourself by running the [tutorial notebooks](/docs/tutorials/overview) +4. **🖼️ Create** your dataset + - Generate your full dataset and save results to disk + - Access the generated dataset and associated artifacts for downstream use + - Give it a try by running the [tutorial notebooks](/docs/tutorials/overview) + +## Library and Microservice + +Data Designer is available as both an open-source library and a NeMo microservice. + + * **Open-source Library**: Purpose-built for flexibility and customization, prioritizing UX excellence, modularity, and extensibility. + * **NeMo Microservice**: An enterprise-grade solution that offers a seamless transition from the library, allowing you to leverage other NeMo microservices and generate datasets at scale. See the [microservice docs](https://docs.nvidia.com/nemo/microservices/latest/design-synthetic-data-from-scratch-or-seeds/index.html) for more details. diff --git a/fern/pages/installation.mdx b/fern/pages/installation.mdx new file mode 100644 index 00000000..ff7bbd6e --- /dev/null +++ b/fern/pages/installation.mdx @@ -0,0 +1,36 @@ +--- +title: Installation +description: How to install Data Designer +--- + +Installing Data Designer is as simple as: + + + + ```bash + pip install data-designer + ``` + + + ```bash + uv add data-designer + ``` + + + +## Development Installation + +To install the latest development version from the GitHub repository: + + + + ```bash + pip install 'git+https://github.com/NVIDIA-NeMo/DataDesigner@main' + ``` + + + ```bash + uv add 'git+https://github.com/NVIDIA-NeMo/DataDesigner@main' + ``` + + diff --git a/fern/pages/plugins/available.mdx b/fern/pages/plugins/available.mdx new file mode 100644 index 00000000..be2dae69 --- /dev/null +++ b/fern/pages/plugins/available.mdx @@ -0,0 +1,8 @@ +--- +title: Available Plugins +description: List of available Data Designer plugins. +--- + +# 🚧 Coming Soon + +This page will list available Data Designer plugins. Stay tuned! diff --git a/fern/pages/plugins/example.mdx b/fern/pages/plugins/example.mdx new file mode 100644 index 00000000..c15f6b76 --- /dev/null +++ b/fern/pages/plugins/example.mdx @@ -0,0 +1,312 @@ +--- +title: Example Plugin +description: A complete walkthrough for creating a Data Designer plugin. +--- + + +The plugin system is currently **experimental** and under active development. The documentation, examples, and plugin interface are subject to significant changes in future releases. If you encounter any issues, have questions, or have ideas for improvement, please consider starting [a discussion on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner/discussions). + + +# Example Plugin: Index Multiplier + +In this guide, we will build a simple plugin that generates values by multiplying the row index by a user-specified multiplier. Admittedly, not the most useful plugin, but it demonstrates the required steps 😜. + +A Data Designer plugin is implemented as a Python package with three main components: + +1. **Configuration Class**: Defines the parameters users can configure +2. **Task Class**: Contains the core implementation of the plugin +3. **Plugin Object**: Connects the config and task classes to make the plugin discoverable + +Let's build the `data-designer-index-multiplier` plugin step by step. + +## Step 1: Create a Python package + +Data Designer plugins are implemented as Python packages. We recommend using a standard structure for your plugin package. + +For example, here is the structure of a `data-designer-index-multiplier` plugin: + +``` +data-designer-index-multiplier/ +├── pyproject.toml +└── src/ + └── data_designer_index_multiplier/ + ├── __init__.py + └── plugin.py +``` + +## Step 2: Create the config class + +The configuration class defines what parameters users can set when using your plugin. For column generator plugins, it must inherit from `SingleColumnConfig` and include a [discriminator field](https://docs.pydantic.dev/latest/concepts/unions/#discriminated-unions). + +```python +from typing import Literal +from data_designer.config.column_configs import SingleColumnConfig + +class IndexMultiplierColumnConfig(SingleColumnConfig): + """Configuration for the index multiplier column generator.""" + + # Configurable parameter for this plugin + multiplier: int = 2 + + # Required: discriminator field with a unique Literal type + # This value identifies your plugin and becomes its column_type + column_type: Literal["index-multiplier"] = "index-multiplier" +``` + +**Key points:** + +- The `column_type` field must be a `Literal` type with a string default +- This value uniquely identifies your plugin (use kebab-case) +- Add any custom parameters your plugin needs (here: `multiplier`) +- `SingleColumnConfig` is a Pydantic model, so you can leverage all of Pydantic's validation features + +## Step 3: Create the implementation class + +The implementation class defines the actual business logic of the plugin. For column generator plugins, it inherits from `ColumnGenerator` and must implement a `metadata` static method and `generate` method: + + +```python +import logging +import pandas as pd + +from data_designer.engine.column_generators.generators.base import ( + ColumnGenerator, + GenerationStrategy, + GeneratorMetadata, +) + +# Data Designer uses the standard Python logging module for logging +logger = logging.getLogger(__name__) + +class IndexMultiplierColumnGenerator(ColumnGenerator[IndexMultiplierColumnConfig]): + @staticmethod + def metadata() -> GeneratorMetadata: + """Define metadata about this generator.""" + return GeneratorMetadata( + name="index-multiplier", + description="Generates values by multiplying the row index by a user-specified multiplier", + generation_strategy=GenerationStrategy.FULL_COLUMN, + ) + + def generate(self, data: pd.DataFrame) -> pd.DataFrame: + """Generate the column data. + + Args: + data: The current DataFrame being built + + Returns: + The DataFrame with the new column added + """ + logger.info( + f"Generating column {self.config.name} " + f"with multiplier {self.config.multiplier}" + ) + + # Access config via self.config + data[self.config.name] = data.index * self.config.multiplier + + return data +``` + +**Key points:** + +- Generic type `ColumnGenerator[IndexMultiplierColumnConfig]` connects the task to its config +- `metadata()` describes your generator and its requirements +- `generation_strategy` can be `FULL_COLUMN`, `CELL_BY_CELL` +- You have access to the configuration parameters via `self.config` + + +The `generation_strategy` specifies how the column generator will generate data. + +- **`FULL_COLUMN`**: Generates the full column (at the batch level) in a single call to `generate` + - `generate` must take as input a `pd.DataFrame` with all previous columns and return a `pd.DataFrame` with the generated column appended + +- **`CELL_BY_CELL`**: Generates one cell at a time + - `generate` must take as input a `dict` with key/value pairs for all previous columns and return a `dict` with an additional key/value for the generated cell + - Supports concurrent workers via a `max_parallel_requests` parameter on the configuration + + +## Step 4: Create the plugin object + +Create a `Plugin` object that makes the plugin discoverable and connects the task and config classes. + +```python +from data_designer.plugins import Plugin, PluginType + +# Plugin instance - this is what gets loaded via entry point +plugin = Plugin( + impl_qualified_name="data_designer_index_multiplier.plugin.IndexMultiplierColumnGenerator", + config_qualified_name="data_designer_index_multiplier.plugin.IndexMultiplierColumnConfig", + plugin_type=PluginType.COLUMN_GENERATOR, + emoji="🔌", +) +``` + +### Complete plugin code + +Pulling it all together, here is the complete plugin code for `src/data_designer_index_multiplier/plugin.py`: + +```python +import logging +from typing import Literal + +import pandas as pd + +from data_designer.config.column_configs import SingleColumnConfig +from data_designer.engine.column_generators.generators.base import ( + ColumnGenerator, + GenerationStrategy, + GeneratorMetadata, +) +from data_designer.plugins import Plugin, PluginType + +# Data Designer uses the standard Python logging module for logging +logger = logging.getLogger(__name__) + + +class IndexMultiplierColumnConfig(SingleColumnConfig): + """Configuration for the index multiplier column generator.""" + + # Configurable parameter for this plugin + multiplier: int = 2 + + # Required: discriminator field with a unique Literal type + # This value identifies your plugin and becomes its column_type + column_type: Literal["index-multiplier"] = "index-multiplier" + + +class IndexMultiplierColumnGenerator(ColumnGenerator[IndexMultiplierColumnConfig]): + @staticmethod + def metadata() -> GeneratorMetadata: + """Define metadata about this generator.""" + return GeneratorMetadata( + name="index-multiplier", + description="Generates values by multiplying the row index by a user-specified multiplier", + generation_strategy=GenerationStrategy.FULL_COLUMN, + ) + + def generate(self, data: pd.DataFrame) -> pd.DataFrame: + """Generate the column data. + + Args: + data: The current DataFrame being built + + Returns: + The DataFrame with the new column added + """ + logger.info( + f"Generating column {self.config.name} " + f"with multiplier {self.config.multiplier}" + ) + + # Access config via self.config + data[self.config.name] = data.index * self.config.multiplier + + return data + + +# Plugin instance - this is what gets loaded via entry point +plugin = Plugin( + impl_qualified_name="data_designer_index_multiplier.plugin.IndexMultiplierColumnGenerator", + config_qualified_name="data_designer_index_multiplier.plugin.IndexMultiplierColumnConfig", + plugin_type=PluginType.COLUMN_GENERATOR, + emoji="🔌", +) +``` + +## Step 5: Package your plugin + +Create a `pyproject.toml` file to define your package and register the entry point: + +```toml +[project] +name = "data-designer-index-multiplier" +version = "1.0.0" +description = "Data Designer index multiplier plugin" +requires-python = ">=3.10" +dependencies = [ + "data-designer", +] + +# Register this plugin via entry points +[project.entry-points."data_designer.plugins"] +index-multiplier = "data_designer_index_multiplier.plugin:plugin" + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/data_designer_index_multiplier"] +``` + + +Plugins are discovered automatically using [Python entry points](https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/#using-package-metadata). It is important to register your plugin as an entry point under the `data_designer.plugins` group. + +The entry point format is: + +```toml +[project.entry-points."data_designer.plugins"] + = ":" +``` + + +## Step 6: Use your plugin + +Install your plugin in editable mode for testing: + +```bash +# From the plugin directory +uv pip install -e . +``` + +Once installed, your plugin works just like built-in column types: + +```python +from data_designer_index_multiplier.plugin import IndexMultiplierColumnConfig + +from data_designer.essentials import ( + CategorySamplerParams, + DataDesigner, + DataDesignerConfigBuilder, + SamplerColumnConfig, +) + +data_designer = DataDesigner() +builder = DataDesignerConfigBuilder() + +# Add a regular column +builder.add_column( + SamplerColumnConfig( + name="category", + sampler_type="category", + params=CategorySamplerParams(values=["A", "B", "C"]), + ) +) + +# Add your custom plugin column +builder.add_column( + IndexMultiplierColumnConfig( + name="v", + multiplier=5, + ) +) + +# Generate data +results = data_designer.create(builder, num_records=10) +print(results.load_dataset()) +``` + +Output: + +``` + category multiplied-index +0 B 0 +1 A 5 +2 C 10 +3 A 15 +4 B 20 +... +``` + +That's it! You have now created and used your first Data Designer plugin. The last step is to package your plugin and share it with the community 🚀 diff --git a/fern/pages/plugins/overview.mdx b/fern/pages/plugins/overview.mdx new file mode 100644 index 00000000..eddcc0be --- /dev/null +++ b/fern/pages/plugins/overview.mdx @@ -0,0 +1,51 @@ +--- +title: Plugins Overview +description: Extend Data Designer's capabilities with custom plugins. +--- + +# Data Designer Plugins + + +The plugin system is currently **experimental** and under active development. The documentation, examples, and plugin interface are subject to significant changes in future releases. If you encounter any issues, have questions, or have ideas for improvement, please consider starting [a discussion on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner/discussions). + + +## What are plugins? + +Plugins are Python packages that extend Data Designer's capabilities without modifying the core library. Similar to [VS Code extensions](https://marketplace.visualstudio.com/vscode) and [Pytest plugins](https://docs.pytest.org/en/stable/reference/plugin_list.html), the plugin system empowers you to build specialized extensions for your specific use cases and share them with the community. + +**Current capabilities**: Data Designer currently supports plugins for column generators (the column types you pass to the config builder's `add_column` method). + +**Coming soon**: Plugin support for processors, validators, and more! + +## How do you use plugins? + +A Data Designer plugin is just a Python package configured with an [entry point](https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/#using-package-metadata) that points to a Data Designer `Plugin` object. Using a plugin is as simple as installing the package: + +```bash +pip install data-designer-{plugin-name} +``` + +Once installed, plugins are automatically discovered and ready to use. See the [example plugin](/docs/plugins/example) for a complete walkthrough. + +## How do you create plugins? + +Creating a plugin involves three main steps: + +### 1. Implement the Plugin Components + +- Create a task class inheriting from `ColumnGenerator` +- Create a config class inheriting from `SingleColumnConfig` +- Instantiate a `Plugin` object connecting them + +### 2. Package Your Plugin + +- Set up a Python package with `pyproject.toml` +- Register your plugin using entry points +- Define dependencies (including `data-designer`) + +### 3. Share Your Plugin + +- Publish to PyPI or another package index +- Share with the community! + +**Ready to get started?** See the [Example Plugin](/docs/plugins/example) for a complete walkthrough! diff --git a/fern/pages/quick-start.mdx b/fern/pages/quick-start.mdx new file mode 100644 index 00000000..58402ebd --- /dev/null +++ b/fern/pages/quick-start.mdx @@ -0,0 +1,93 @@ +--- +title: Quick Start +description: Get started with Data Designer using default model providers and configurations. +--- + +# Quick Start + +Get started with Data Designer using the default model providers and configurations. Data Designer ships with built-in model providers and configurations that make it easy to start generating synthetic data immediately. + +## Prerequisites + +Before you begin, you'll need an API key from one of the default providers: + +- **NVIDIA API Key**: Get yours from [build.nvidia.com](https://build.nvidia.com) +- **OpenAI API Key** (optional): Get yours from [platform.openai.com](https://platform.openai.com/api-keys) +- **OpenRouter API Key** (optional): Get yours from [openrouter.ai](https://openrouter.ai) + +Set your API key as an environment variable: + +```bash +export NVIDIA_API_KEY="your-api-key-here" +# Or for OpenAI +export OPENAI_API_KEY="your-openai-api-key-here" +# Or for OpenRouter +export OPENROUTER_API_KEY="your-openrouter-api-key-here" +``` + +## Example + +Below we'll construct a simple Data Designer workflow that generates multilingual greetings. + +```python +import os + +from data_designer.essentials import ( + CategorySamplerParams, + DataDesigner, + DataDesignerConfigBuilder, + InfoType, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, +) + +# Set your API key from build.nvidia.com +# Skip this step if you've already exported your key to the environemnt variable +os.environ["NVIDIA_API_KEY"] = "your-api-key-here" + +# Create a DataDesigner instance +# This automatically configures the default model providers +data_designer = DataDesigner() + +# Print out all the model providers available +data_designer.info.display(InfoType.MODEL_PROVIDERS) + +# Create a config builder +# This automatically loads the default model configurations +config_builder = DataDesignerConfigBuilder() + +# Print out all the model configurations available +config_builder.info.display(InfoType.MODEL_CONFIGS) + +# Add a sampler column to randomly select a language +config_builder.add_column( + SamplerColumnConfig( + name="language", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["English", "Spanish", "French", "German", "Italian"], + ), + ) +) + +# Add an LLM text generation column +# We'll use the built-in 'nvidia-text' model alias +config_builder.add_column( + LLMTextColumnConfig( + name="greetings", + model_alias="nvidia-text", + prompt="""Write a casual and formal greeting in '{{language}}' language.""", + ) +) + +# Run a preview to generate sample records +preview_results = data_designer.preview(config_builder=config_builder) + +# Display a sample record +preview_results.display_sample_record() +``` + +🎉 Congratulations, you successfully ran one iteration designing your synthetic data. Follow along to learn more. + +To learn more about the default providers and model configurations available, see the [Default Model Settings](/docs/concepts/models/default-model-settings) guide. diff --git a/fern/pages/recipes/code-generation/text-to-python.mdx b/fern/pages/recipes/code-generation/text-to-python.mdx new file mode 100644 index 00000000..d7c7241d --- /dev/null +++ b/fern/pages/recipes/code-generation/text-to-python.mdx @@ -0,0 +1,294 @@ +--- +title: Text to Python +description: Generate Python code from natural language descriptions. +--- + +# Text to Python + + +[Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_python.py) + + +```python +from pathlib import Path + +from data_designer.essentials import ( + CategorySamplerParams, + CodeLang, + CodeValidatorParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMCodeColumnConfig, + LLMJudgeColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + SubcategorySamplerParams, + ValidationColumnConfig, + ValidatorType, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + + config_builder.add_column( + SamplerColumnConfig( + name="industry_sector", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Healthcare", + "Finance", + "Technology", + ], + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="industry_sector", + values={ + "Healthcare": [ + "Electronic Health Records (EHR) Systems", + "Telemedicine Platforms", + "AI-Powered Diagnostic Tools", + ], + "Finance": [ + "Fraud Detection Software", + "Automated Trading Systems", + "Personal Finance Apps", + ], + "Technology": [ + "Cloud Computing Platforms", + "Artificial Intelligence and Machine Learning Platforms", + "DevOps and CI/CD Tools", + ], + }, + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="code_complexity", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Beginner", + "Intermediate", + "Advanced", + ], + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="code_concept", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="code_complexity", + values={ + "Beginner": [ + "Variables", + "Data Types", + "Functions", + "Loops", + "Classes", + ], + "Intermediate": [ + "List Comprehensions", + "Object-oriented programming", + "Lambda Functions", + "Web frameworks", + "Pandas", + ], + "Advanced": [ + "Multithreading", + "Context Managers", + "Generators", + ], + }, + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="instruction_phrase", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Write a function that", + "Create a class that", + "Implement a script", + "Can you create a function", + "Develop a module that", + ], + ), + ), + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="instruction", + model_alias=model_alias, + system_prompt=("You are an expert at generating clear and specific programming tasks."), + prompt=( + "Generate an instruction to create Python code that solves a specific problem.\n" + "Each instruction should begin with one of the following phrases: {{ instruction_phrase }}.\n\n" + "Important Guidelines:\n" + "* Industry Relevance: Ensure the instruction pertains to the {{ industry_sector }} sector and {{ topic }} topic.\n" + "* Code Complexity: Tailor the instruction to the {{ code_complexity }} level. Utilize relevant {{ code_concept }} where appropriate to match the complexity level.\n" + "* Clarity and Specificity: Make the problem statement clear and unambiguous. Provide sufficient context to understand the requirements without being overly verbose.\n" + "* Response Formatting: Do not include any markers such as ### Response ### in the instruction.\n" + ), + ) + ) + + config_builder.add_column( + LLMCodeColumnConfig( + name="code_implementation", + model_alias=model_alias, + code_lang=CodeLang.PYTHON, + system_prompt=( + "You are an expert Python programmer who writes clean, efficient, and well-documented code." + ), + prompt=( + "Write Python code for the following instruction:\n" + "Instruction: {{ instruction }}\n\n" + "Important Guidelines:\n" + "* Code Quality: Your code should be clean, complete, self-contained, and accurate.\n" + "* Code Validity: Please ensure that your Python code is executable and does not contain any errors.\n" + "* Packages: Remember to import any necessary libraries, and to use all libraries you import.\n" + "* Complexity & Concepts: The code should be written at a {{ code_complexity }} level, making use of concepts such as {{ code_concept }}.\n" + ), + ) + ) + + config_builder.add_column( + LLMJudgeColumnConfig( + name="code_judge_result", + model_alias=model_alias, + prompt=TEXT_TO_PYTHON_JUDGE_TEMPLATE, + scores=python_scoring, + ) + ) + + config_builder.add_column( + ValidationColumnConfig( + name="code_validity_result", + validator_type=ValidatorType.CODE, + target_columns=["code_implementation"], + validator_params=CodeValidatorParams( + code_lang=CodeLang.PYTHON, + ), + batch_size=100, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +TEXT_TO_PYTHON_JUDGE_TEMPLATE = """\ +You are an expert in Python programming, with specialized knowledge in software engineering, data science, and algorithmic problem-solving. + +You think about potential flaws and errors in the code. You are a tough critic, but a fair one. + +Take a deep breath and use the Python Code Quality Rubric below to score the **Generated Python Code** based on the INSTRUCTIONS. + +#### INSTRUCTIONS +The Generated Python Code should be a valid response to the Natural Language Prompt below + +Natural Language Prompt: +{{ instruction }} + +Generated Python Code +{{ code_implementation }} +""" + + +python_scoring = [ + Score( + name="Relevance", + description="Adherence to INSTRUCTIONS and CONTEXT", + options={ + 4: "Perfectly meets all specified requirements.", + 3: "Meets most requirements with minor deviations.", + 2: "Moderate deviation from the instructions.", + 1: "Significant deviations from the instructions.", + 0: "Does not adhere to the instructions.", + }, + ), + Score( + name="Pythonic", + description="Pythonic Code and Best Practices (Does the code follow Python conventions and best practices?)", + options={ + 4: "The code exemplifies Pythonic principles, making excellent use of Python-specific constructs, standard library modules and programming idioms; follows all relevant PEPs.", + 3: "The code closely follows Python conventions and adheres to many best practices; good use of Python-specific constructs, standard library modules and programming idioms.", + 2: "The code generally follows Python conventions but has room for better alignment with Pythonic practices.", + 1: "The code loosely follows Python conventions, with several deviations from best practices.", + 0: "The code does not follow Python conventions or best practices, using non-Pythonic approaches.", + }, + ), + Score( + name="Readability", + description="Readability and Maintainability (Is the Python code easy to understand and maintain?)", + options={ + 4: ( + "The code is excellently formatted, follows PEP 8 guidelines, is elegantly concise and clear, uses meaningful variable names, " + "ensuring high readability and ease of maintenance; organizes complex logic well. Docstrings are given in a Google Docstring format." + ), + 3: "The code is well-formatted in the sense of code-as-documentation, making it relatively easy to understand and maintain; uses descriptive names and organizes logic clearly.", + 2: "The code is somewhat readable with basic formatting and some comments, but improvements are needed; needs better use of descriptive names and organization.", + 1: "The code has minimal formatting, making it hard to understand; lacks meaningful names and organization.", + 0: "The code is unreadable, with no attempt at formatting or description.", + }, + ), + Score( + name="Efficiency", + description="Efficiency and Performance (Is the code optimized for performance?)", + options={ + 4: "The solution is highly efficient, using appropriate data structures and algorithms; avoids unnecessary computations and optimizes for both time and space complexity.", + 3: "The solution is efficient, with good use of Python's built-in functions and libraries; minor areas for optimization.", + 2: "The solution is moderately efficient, but misses some opportunities for optimization; uses some inefficient patterns.", + 1: "The solution shows poor efficiency, with notable performance issues; lacks effective optimization techniques.", + 0: "The solution is highly inefficient; overlooks fundamental optimization practices, resulting in significant performance issues.", + }, + ), +] + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() +``` diff --git a/fern/pages/recipes/code-generation/text-to-sql.mdx b/fern/pages/recipes/code-generation/text-to-sql.mdx new file mode 100644 index 00000000..9d0d33e0 --- /dev/null +++ b/fern/pages/recipes/code-generation/text-to-sql.mdx @@ -0,0 +1,336 @@ +--- +title: Text to SQL +description: Generate SQL queries from natural language descriptions. +--- + +# Text to SQL + + +[Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_sql.py) + + +```python +from pathlib import Path + +from data_designer.essentials import ( + CategorySamplerParams, + CodeLang, + CodeValidatorParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMCodeColumnConfig, + LLMJudgeColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + SubcategorySamplerParams, + ValidationColumnConfig, + ValidatorType, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + + config_builder.add_column( + SamplerColumnConfig( + name="industry_sector", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["Healthcare", "Finance", "Technology"], + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="industry_sector", + values={ + "Healthcare": [ + "Electronic Health Records (EHR) Systems", + "Telemedicine Platforms", + "AI-Powered Diagnostic Tools", + ], + "Finance": [ + "Fraud Detection Software", + "Automated Trading Systems", + "Personal Finance Apps", + ], + "Technology": [ + "Cloud Computing Platforms", + "Artificial Intelligence and Machine Learning Platforms", + "DevOps and CI/CD Tools", + ], + }, + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="sql_complexity", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["Beginner", "Intermediate", "Advanced"], + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="sql_concept", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="sql_complexity", + values={ + "Beginner": [ + "Basic SELECT Statements", + "WHERE Clauses", + "Basic JOINs", + "INSERT, UPDATE, DELETE", + ], + "Intermediate": [ + "Aggregation Functions", + "Multiple JOINs", + "Subqueries", + "Views", + ], + "Advanced": [ + "Window Functions", + "Common Table Expressions (CTEs)", + "Stored Procedures", + "Query Optimization", + ], + }, + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="sql_task_type", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Data Retrieval", + "Data Manipulation", + "Analytics and Reporting", + "Data Transformation", + ], + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="instruction_phrase", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Write an SQL query that", + "Create an SQL statement to", + "Develop an SQL query to", + "Can you write SQL that", + "Formulate an SQL query that", + ], + ), + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="sql_prompt", + model_alias=model_alias, + system_prompt="You are an expert at generating clear and specific SQL tasks.", + prompt=SQL_PROMPT_TEXT, + ) + ) + + config_builder.add_column( + LLMCodeColumnConfig( + name="sql_context", + model_alias=model_alias, + code_lang=CodeLang.SQL_ANSI, + system_prompt=( + "You are an expert SQL database designer who creates clean, efficient, and " + "well-structured database schemas." + ), + prompt=SQL_CONTEXT_TEXT, + ) + ) + + config_builder.add_column( + LLMCodeColumnConfig( + name="sql", + model_alias=model_alias, + code_lang=CodeLang.SQL_ANSI, + system_prompt="You are an expert SQL programmer who writes clean, efficient, and well-structured queries.", + prompt=SQL_CODE_TEXT, + ) + ) + + config_builder.add_column( + ValidationColumnConfig( + name="code_validity_result", + validator_type=ValidatorType.CODE, + target_columns=["sql"], + validator_params=CodeValidatorParams( + code_lang=CodeLang.SQL_ANSI, + ), + batch_size=100, + ) + ) + + config_builder.add_column( + LLMJudgeColumnConfig( + name="code_judge_result", + model_alias=model_alias, + prompt=TEXT_TO_SQL_JUDGE_TEMPLATE, + scores=sql_scoring, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +SQL_PROMPT_TEXT = ( + "Generate an instruction to create SQL code that solves a specific problem.\n" + "Each instruction should begin with one of the following phrases: {{instruction_phrase}}.\n\n" + "Important Guidelines:\n" + "* Industry Relevance: Ensure the instruction pertains to the {{industry_sector}} sector and {{topic}} topic.\n" + "* SQL Complexity: Tailor the instruction to the {{sql_complexity}} level. Utilize relevant {{sql_concept}} " + "where appropriate to match the complexity level.\n" + "* Task Type: The instruction should involve a {{sql_task_type}} task.\n" + "* Clarity and Specificity: Make the problem statement clear and unambiguous. Provide sufficient context to " + "understand the requirements without being overly verbose.\n" + "* Response Formatting: Do not include any markers such as ### Response ### in the instruction.\n" +) + +SQL_CONTEXT_TEXT = ( + "Generate the SQL for creating database tables that would be relevant for the following instruction:\n" + "Instruction: {{sql_prompt}}\n\n" + "Important Guidelines:\n" + "* Relevance: Ensure all tables are directly related to the {{industry_sector}} sector and {{topic}} topic.\n" + "* Completeness: Include all essential columns with appropriate data types, primary/foreign keys, and necessary constraints.\n" + "* Realism: Use realistic table structures typical for the specified industry.\n" + "* Executable SQL: Provide complete CREATE TABLE statements that can be run without modification.\n" + "* Consistency: Use consistent naming conventions (e.g., snake_case for table and column names).\n" + "* Sample Data: Include INSERT statements with sample data that makes sense for the tables (at least 5-10 rows per table)." +) + +SQL_CODE_TEXT = ( + "Write SQL code for the following instruction based on the provided database context:\n" + "Instruction: {{sql_prompt}}\n\n" + "Database Context:\n" + "{{sql_context}}\n\n" + "Important Guidelines:\n" + "* Code Quality: Your SQL should be clean, complete, self-contained and accurate.\n" + "* Code Validity: Please ensure that your SQL code is executable and does not contain any errors.\n" + "* Context: Base your query on the provided database context. Only reference tables and columns that " + "exist in the context.\n" + "* Complexity & Concepts: The SQL should be written at a {{sql_complexity}} level, making use of " + "concepts such as {{sql_concept}}.\n" + "* Task Type: Ensure your solution implements the appropriate {{sql_task_type}} operation.\n" + "* Comments: Include brief comments explaining the key parts of your query.\n" +) + + +TEXT_TO_SQL_JUDGE_TEMPLATE = """\ +You are an expert in SQL with deep knowledge of relational modeling, query semantics, +and performance tuning across common dialects (e.g., PostgreSQL, MySQL, SQLite, SQL Server). +You think critically about correctness, readability, and efficiency. + +Use the SQL Query Quality Rubric below to score the **Generated SQL Query** based on the INSTRUCTIONS. + +#### INSTRUCTIONS +The Generated SQL Query should be a valid response to the Natural Language Prompt below + +Natural Language Prompt: +{{ sql_prompt }} + +Database Context: +{{ sql_context }} + +Generated SQL Query +{{ sql }} +""" + + +sql_scoring = [ + Score( + name="Relevance", + description="Adherence to INSTRUCTIONS and CONTEXT", + options={ + 4: "Perfectly meets all specified requirements.", + 3: "Meets most requirements with minor deviations.", + 2: "Moderate deviation from the instructions.", + 1: "Significant deviations from the instructions.", + 0: "Does not adhere to the instructions.", + }, + ), + Score( + name="SQL Correctness", + description="Syntax and semantic correctness; returns the intended result", + options={ + 4: "Valid SQL with correct joins, filters, grouping/aggregation, and NULL handling; produces the intended result set under the stated/implicit dialect.", + 3: "Generally correct with minor issues (e.g., edge-case NULLs, minor grouping detail) but still likely yields the intended result.", + 2: "Partially correct; noticeable semantic mistakes (joins, grouping, filters) that may change results or fail in edge cases.", + 1: "Largely incorrect; major semantic or syntactic errors likely causing failure or wrong results.", + 0: "Invalid SQL or unrelated to the task; will not run or cannot produce a meaningful result.", + }, + ), + Score( + name="Readability", + description="Formatting, clarity, and maintainability", + options={ + 4: "Cleanly formatted (keywords/clauses consistently styled), clear structure (CTEs/subqueries where helpful), meaningful table/column aliases, and concise.", + 3: "Generally readable with consistent formatting and understandable aliases; could be organized slightly better.", + 2: "Somewhat readable but inconsistent formatting or confusing aliasing; structure is harder to follow.", + 1: "Poorly formatted and hard to read; unclear structure and aliasing.", + 0: "Unreadable or chaotic; no meaningful structure or styling.", + }, + ), + Score( + name="Efficiency", + description="Query performance best practices", + options={ + 4: "Uses sargable predicates, appropriate joins, selective filters early, avoids SELECT *, unnecessary DISTINCT, and wasteful subqueries; likely to use indexes effectively.", + 3: "Mostly efficient; minor opportunities for improvement (e.g., simplifying expressions, reducing data early).", + 2: "Moderate inefficiencies (e.g., non-sargable filters, unnecessary nested subqueries, broad SELECT *).", + 1: "Notably inefficient patterns likely causing large scans or poor plans.", + 0: "Highly inefficient; ignores basic best practices and likely to perform very poorly.", + }, + ), +] + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() +``` diff --git a/fern/pages/recipes/index.mdx b/fern/pages/recipes/index.mdx new file mode 100644 index 00000000..c77cee15 --- /dev/null +++ b/fern/pages/recipes/index.mdx @@ -0,0 +1,72 @@ +--- +title: Use Case Recipes +description: Ready-to-use code examples for common Data Designer use cases. +--- + +# Use Case Recipes + +Recipes are a collection of code examples that demonstrate how to leverage Data Designer in specific use cases. +Each recipe is a self-contained example that can be run independently. + + +Recipes provide working code for specific use cases without detailed explanations. If you're learning Data Designer for the first time, we recommend starting with our [tutorial notebooks](/docs/tutorials/overview), which offer step-by-step guidance and explain core concepts. Once you're familiar with the basics, return here for practical, ready-to-use implementations. + + + + + Generate a dataset of natural language instructions paired with Python code implementations, with varying complexity levels and industry focuses. + + **Demonstrates:** + - Python code generation + - Python code validation + - LLM-as-judge + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_python.py) + + + Generate a dataset of natural language instructions paired with SQL code implementations, with varying complexity levels and industry focuses. + + **Demonstrates:** + - SQL code generation + - SQL code validation + - LLM-as-judge + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_sql.py) + + + Generate a dataset that contains information about products and associated question/answer pairs. + + **Demonstrates:** + - Structured outputs + - Expression columns + - LLM-as-judge + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/product_info_qa.py) + + + Generate a dataset of multi-turn chat conversations between a user and an AI assistant. + + **Demonstrates:** + - Structured outputs + - Expression columns + - LLM-as-judge + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/multi_turn_chat.py) + + diff --git a/fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx b/fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx new file mode 100644 index 00000000..85015f6e --- /dev/null +++ b/fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx @@ -0,0 +1,217 @@ +--- +title: Multi-Turn Chat +description: Generate multi-turn conversational dialogues. +--- + +# Multi-Turn Chat + + +[Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/multi_turn_chat.py) + + +```python +from pathlib import Path +from typing import Literal + +from pydantic import BaseModel, Field + +from data_designer.essentials import ( + CategorySamplerParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMJudgeColumnConfig, + LLMStructuredColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + SubcategorySamplerParams, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + + config_builder.add_column( + SamplerColumnConfig( + name="domain", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=["Tech Support", "Personal Finances", "Educational Guidance"]), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="domain", + values={ + "Tech Support": [ + "Troubleshooting a Laptop", + "Setting Up a Home Wi-Fi Network", + "Installing Software Updates", + ], + "Personal Finances": [ + "Budgeting Advice", + "Understanding Taxes", + "Investment Strategies", + ], + "Educational Guidance": [ + "Choosing a College Major", + "Effective Studying Techniques", + "Learning a New Language", + ], + }, + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="complexity", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=["Basic", "Intermediate", "Advanced"]), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="conversation_length", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=[2, 4, 6, 8]), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="user_mood", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=["happy", "silly", "sarcastic", "combative", "disappointed", "toxic"]), + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="assistant_system_prompt", + prompt=( + "Write a reasonable system prompt for a helpful AI assistant with expertise in " + "{{domain}} and {{topic}}. The AI assistant must not engage in harmful behaviors." + ), + model_alias=model_alias, + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="user_task", + prompt="Define a simple task related to {{topic}} of {{complexity}} complexity for the user.", + model_alias=model_alias, + ) + ) + + config_builder.add_column( + LLMStructuredColumnConfig( + name="conversation", + prompt=( + "\n{{user_task}}\n\n\n" + "{{assistant_system_prompt}}\n\n" + "Generate a conversation between a user and an AI assistant with about .\n" + "User is asking the assistant for advice and is in a {{user_mood}} mood.\n" + "The conversation must be {{conversation_length}} messages in length.\n" + "The conversation must come to a natural end in {{conversation_length}} messages and if the assistant is unable " + "to solve the user's needs by then, they should offer to continue the conversation later or redirect them to additional resources.\n" + "The conversation must be realistic and natural, incorporating details from both the user's and assistant's profiles.\n" + "The AI assistant role has the system prompt defined in . All of its responses must conform to :\n" + ), + output_format=ChatConversation, + model_alias=model_alias, + ) + ) + + config_builder.add_column( + LLMJudgeColumnConfig( + name="toxicity_evaluation", + prompt=TOXICITY_PROMPT_TEMPLATE, + scores=[toxicity_score], + model_alias=model_alias, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +toxicity_score = Score( + name="Toxicity", + description="Evaluation of harmful, harassing, or hateful language in conversations", + options={ + "4": "No toxicity detected; conversation is respectful and appropriate.", + "3": "Mild toxicity; slightly rude or sarcastic but not hateful or harmful.", + "2": "Moderate toxicity; contains disrespectful or harassing language.", + "1": "Severe toxicity; contains overt hate, harassment, or harmful content.", + "0": "Extreme toxicity; contains dangerous, threatening, or abusive content.", + }, +) + +TOXICITY_PROMPT_TEMPLATE = ( + "You are an expert in content moderation and digital communication ethics. " + "You are evaluating the toxicity level in user messages.\n" + "Take a deep breath and carefully analyze the conversation provided. \n" + "Use the rubric to score the level of toxicity in the USER'S messages only.\n" + "Consider:\n" + "- Focus primarily on harassment, hate, or harmful language\n" + "- Evaluate the full context of the conversation\n" + "- Be objective and consistent in your assessment\n" + "- If any single message exhibits toxicity, it should influence the final score\n" + "## CONVERSATION\n" + "{{ conversation }}" +) + + +class Message(BaseModel): + """A single message turn in the conversation.""" + + role: Literal["user", "assistant"] = Field(..., description="Which role is writing the message.") + content: str = Field(..., description="Message contents.") + + +class ChatConversation(BaseModel): + """A chat conversation between a specific user and an AI assistant. + * All conversations are initiated by the user role. + * The assistant role always responds to the user message. + * Turns alternate between user and assistant roles. + * The last message is always from the assistant role. + * Message content can be long or short. + * All assistant messages are faithful responses and must be answered fully. + """ + + conversation: list[Message] = Field(..., description="List of all messages in the conversation.") + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() +``` diff --git a/fern/pages/recipes/qa-and-chat/product-info-qa.mdx b/fern/pages/recipes/qa-and-chat/product-info-qa.mdx new file mode 100644 index 00000000..59d2df7e --- /dev/null +++ b/fern/pages/recipes/qa-and-chat/product-info-qa.mdx @@ -0,0 +1,237 @@ +--- +title: Product Info QA +description: Generate question-answer pairs for product information. +--- + +# Product Info QA + + +[Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/product_info_qa.py) + + +```python +import string +from pathlib import Path + +from pydantic import BaseModel, Field + +from data_designer.essentials import ( + BernoulliSamplerParams, + CategorySamplerParams, + DataDesigner, + DataDesignerConfigBuilder, + ExpressionColumnConfig, + LLMJudgeColumnConfig, + LLMStructuredColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + UniformSamplerParams, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + config_builder.add_column( + SamplerColumnConfig( + name="category", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Electronics", + "Clothing", + "Home Appliances", + "Groceries", + "Toiletries", + "Sports Equipment", + "Toys", + "Books", + "Pet Supplies", + "Tools & Home Improvement", + "Beauty", + "Health & Wellness", + "Outdoor Gear", + "Automotive", + "Jewelry", + "Watches", + "Office Supplies", + "Gifts", + "Arts & Crafts", + "Baby & Kids", + "Music", + "Video Games", + "Movies", + "Software", + "Tech Devices", + ] + ), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="price_tens_of_dollars", + sampler_type=SamplerType.UNIFORM, + params=UniformSamplerParams(low=1, high=200), + ) + ) + + config_builder.add_column( + ExpressionColumnConfig( + name="product_price", + expr="{{ (price_tens_of_dollars * 10) - 0.01 | round(2) }}", + dtype="float", + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="first_letter", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=list(string.ascii_uppercase)), + ) + ) + + config_builder.add_column( + SamplerColumnConfig( + name="is_hallucination", + sampler_type=SamplerType.BERNOULLI, + params=BernoulliSamplerParams(p=0.5), + ) + ) + + config_builder.add_column( + LLMStructuredColumnConfig( + name="product_info", + model_alias=model_alias, + prompt=( + "Generate a realistic product description for a product in the {{ category }} " + "category that costs {{ product_price }}.\n" + "The name of the product MUST start with the letter {{ first_letter }}.\n" + ), + output_format=ProductInfo, + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="question", + model_alias=model_alias, + prompt=("Ask a question about the following product:\n\n {{ product_info }}"), + ) + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="answer", + model_alias=model_alias, + prompt=( + "{%- if is_hallucination == 0 -%}\n" + "\n" + "{{ product_info }}\n" + "\n" + "{%- endif -%}\n" + "User Question: {{ question }}\n" + "Directly and succinctly answer the user's question.\n" + "{%- if is_hallucination == 1 -%}\n" + "Make up whatever information you need to in order to answer the user's request.\n" + "{%- endif -%}" + ), + ) + ) + + # Evaluate answer quality + config_builder.add_column( + LLMJudgeColumnConfig( + name="llm_answer_metrics", + model_alias=model_alias, + prompt=( + "\n" + "{{ product_info }}\n" + "\n" + "User Question: {{question }}\n" + "AI Assistant Answer: {{ answer }}\n" + "Judge the AI assistant's response to the user's question about the product described in ." + ), + scores=answer_quality_scores, + ) + ) + + config_builder.add_column( + ExpressionColumnConfig( + name="completeness_result", + expr="{{ llm_answer_metrics.Completeness.score }}", + ) + ) + + config_builder.add_column( + ExpressionColumnConfig( + name="accuracy_result", + expr="{{ llm_answer_metrics.Accuracy.score }}", + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +class ProductInfo(BaseModel): + product_name: str = Field(..., description="A realistic product name for the market.") + key_features: list[str] = Field(..., min_length=1, max_length=3, description="Key product features.") + description: str = Field( + ..., + description="A short, engaging description of what the product does, highlighting a unique but believable feature.", + ) + price_usd: float = Field(..., description="The price of the product", ge=10, le=1000, decimal_places=2) + + +completeness_score = Score( + name="Completeness", + description="Evaluation of AI assistant's thoroughness in addressing all aspects of the user's query.", + options={ + "Complete": "The response thoroughly covers all key points requested in the question, providing sufficient detail to satisfy the user's information needs.", + "PartiallyComplete": "The response addresses the core question but omits certain important details or fails to elaborate on relevant aspects that were requested.", + "Incomplete": "The response significantly lacks necessary information, missing major components of what was asked and leaving the query largely unanswered.", + }, +) + +accuracy_score = Score( + name="Accuracy", + description="Evaluation of how factually correct the AI assistant's response is relative to the product information.", + options={ + "Accurate": "The information provided aligns perfectly with the product specifications without introducing any misleading or incorrect details.", + "PartiallyAccurate": "While some information is correctly stated, the response contains minor factual errors or potentially misleading statements about the product.", + "Inaccurate": "The response presents significantly wrong information about the product, with claims that contradict the actual product details.", + }, +) + +answer_quality_scores = [completeness_score, accuracy_score] + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() +``` diff --git a/fern/pages/tutorials/images-as-context.mdx b/fern/pages/tutorials/images-as-context.mdx new file mode 100644 index 00000000..29ce8eb3 --- /dev/null +++ b/fern/pages/tutorials/images-as-context.mdx @@ -0,0 +1,282 @@ +--- +title: Images as Context +--- + + +Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/4-providing-images-as-context.ipynb). + + +# 🎨 Data Designer Tutorial: Providing Images as Context for Vision-Based Data Generation + +#### 📚 What you'll learn + +This notebook demonstrates how to provide images as context to generate text descriptions using vision-language models. + +- ✨ **Visual Document Processing**: Converting images to chat-ready format for model consumption +- 🔍 **Vision-Language Generation**: Using vision models to generate detailed summaries from images + +If this is your first time using Data Designer, we recommend starting with the [first tutorial](/docs/tutorials/the-basics) in this series. + +### 📦 Import the essentials + +- The `essentials` module provides quick access to the most commonly used objects. + +```python +# Standard library imports +import base64 +import io +import uuid + +# Third-party imports +import pandas as pd +import rich +from datasets import load_dataset +from IPython.display import display +from rich.panel import Panel + +# Data Designer imports +from data_designer.essentials import ( + ChatCompletionInferenceParams, + DataDesigner, + DataDesignerConfigBuilder, + DataFrameSeedSource, + ImageContext, + ImageFormat, + LLMTextColumnConfig, + ModalityDataType, + ModelConfig, +) +``` + +### ⚙️ Initialize the Data Designer interface + +- `DataDesigner` is the main object is responsible for managing the data generation process. +- When initialized without arguments, the [default model providers](/docs/concepts/models/default-model-settings) are used. + +```python +data_designer = DataDesigner() +``` + +### 🎛️ Define model configurations + +- Each `ModelConfig` defines a model that can be used during the generation process. +- The "model alias" is used to reference the model in the Data Designer config (as we will see below). +- The "model provider" is the external service that hosts the model (see the [model config](/docs/concepts/models/default-model-settings) docs for more details). +- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider. + +```python +# This name is set in the model provider configuration. +MODEL_PROVIDER = "nvidia" + +model_configs = [ + ModelConfig( + alias="vision", + model="meta/llama-4-scout-17b-16e-instruct", + provider=MODEL_PROVIDER, + inference_parameters=ChatCompletionInferenceParams( + temperature=0.60, + top_p=0.95, + max_tokens=2048, + ), + ), +] +``` + +### 🏗️ Initialize the Data Designer Config Builder + +- The Data Designer config defines the dataset schema and generation process. +- The config builder provides an intuitive interface for building this configuration. +- The list of model configs is provided to the builder at initialization. + +```python +config_builder = DataDesignerConfigBuilder(model_configs=model_configs) +``` + +### 🌱 Seed Dataset Creation + +In this section, we'll prepare our visual documents as a seed dataset for summarization: + +- **Loading Visual Documents**: We use the ColPali dataset containing document images +- **Image Processing**: Convert images to base64 format for vision model consumption +- **Metadata Extraction**: Preserve relevant document information (filename, page number, source, etc.) + +The seed dataset will be used to generate detailed text summaries of each document image. + +```python +# Dataset processing configuration +IMG_COUNT = 512 # Number of images to process +BASE64_IMAGE_HEIGHT = 512 # Standardized height for model input + +# Load ColPali dataset for visual documents +img_dataset_cfg = {"path": "vidore/colpali_train_set", "split": "train", "streaming": True} +``` + +```python +def resize_image(image, height: int): + """ + Resize image while maintaining aspect ratio. + + Args: + image: PIL Image object + height: Target height in pixels + + Returns: + Resized PIL Image object + """ + original_width, original_height = image.size + width = int(original_width * (height / original_height)) + return image.resize((width, height)) + + +def convert_image_to_chat_format(record, height: int) -> dict: + """ + Convert PIL image to base64 format for chat template usage. + + Args: + record: Dataset record containing image and metadata + height: Target height for image resizing + + Returns: + Updated record with base64_image and uuid fields + """ + # Resize image for consistent processing + image = resize_image(record["image"], height) + + # Convert to base64 string + img_buffer = io.BytesIO() + image.save(img_buffer, format="PNG") + byte_data = img_buffer.getvalue() + base64_encoded_data = base64.b64encode(byte_data) + base64_string = base64_encoded_data.decode("utf-8") + + # Return updated record + return record | {"base64_image": base64_string, "uuid": str(uuid.uuid4())} +``` + +```python +# Load and process the visual document dataset +print("📥 Loading and processing document images...") + +img_dataset_iter = iter( + load_dataset(**img_dataset_cfg).map(convert_image_to_chat_format, fn_kwargs={"height": BASE64_IMAGE_HEIGHT}) +) +img_dataset = pd.DataFrame([next(img_dataset_iter) for _ in range(IMG_COUNT)]) + +print(f"✅ Loaded {len(img_dataset)} images with columns: {list(img_dataset.columns)}") +``` + +```python +img_dataset.head() +``` + +```python +# Add the seed dataset containing our processed images +df_seed = pd.DataFrame(img_dataset)[["uuid", "image_filename", "base64_image", "page", "options", "source"]] +config_builder.with_seed_dataset(DataFrameSeedSource(df=df_seed)) +``` + +```python +# Add a column to generate detailed document summaries +config_builder.add_column( + LLMTextColumnConfig( + name="summary", + model_alias="vision", + prompt=( + "Provide a detailed summary of the content in this image in Markdown format. " + "Start from the top of the image and then describe it from top to bottom. " + "Place a summary at the bottom." + ), + multi_modal_context=[ + ImageContext( + column_name="base64_image", + data_type=ModalityDataType.BASE64, + image_format=ImageFormat.PNG, + ) + ], + ) +) +``` + +### 🔁 Iteration is key – preview the dataset! + +1. Use the `preview` method to generate a sample of records quickly. +2. Inspect the results for quality and format issues. +3. Adjust column configurations, prompts, or parameters as needed. +4. Re-run the preview until satisfied. + +```python +preview = data_designer.preview(config_builder, num_records=2) +``` + +```python +# Run this cell multiple times to cycle through the 2 preview records. +preview.display_sample_record() +``` + +```python +# The preview dataset is available as a pandas DataFrame. +preview.dataset +``` + +### 📊 Analyze the generated data + +- Data Designer automatically generates a basic statistical analysis of the generated data. +- This analysis is available via the `analysis` property of generation result objects. + +```python +# Print the analysis as a table. +preview.analysis.to_report() +``` + +### 🔎 Visual Inspection + +Let's compare the original document image with the generated summary to validate quality: + +```python +# Compare original document with generated summary +index = 0 # Change this to view different examples + +# Merge preview data with original images for comparison +comparison_dataset = preview.dataset.merge(pd.DataFrame(img_dataset)[["uuid", "image"]], how="left", on="uuid") + +# Extract the record for display +record = comparison_dataset.iloc[index] + +print("📄 Original Document Image:") +display(resize_image(record.image, BASE64_IMAGE_HEIGHT)) + +print("\n📝 Generated Summary:") +rich.print(Panel(record.summary, title="Document Summary", title_align="left")) +``` + +### 🆙 Scale up! + +- Happy with your preview data? +- Use the `create` method to submit larger Data Designer generation jobs. + +```python +results = data_designer.create(config_builder, num_records=10, dataset_name="tutorial-4") +``` + +```python +# Load the generated dataset as a pandas DataFrame. +dataset = results.load_dataset() + +dataset.head() +``` + +```python +# Load the analysis results into memory. +analysis = results.load_analysis() + +analysis.to_report() +``` + +## ⏭️ Next Steps + +Now that you've learned how to use visual context for image summarization in Data Designer, explore more: + +- Experiment with different vision models for specific document types +- Try different prompt variations to generate specialized descriptions (e.g., technical details, key findings) +- Combine vision-based summaries with other column types for multi-modal workflows +- Apply this pattern to other vision tasks like image captioning, OCR validation, or visual question answering diff --git a/fern/pages/tutorials/overview.mdx b/fern/pages/tutorials/overview.mdx new file mode 100644 index 00000000..907d9c80 --- /dev/null +++ b/fern/pages/tutorials/overview.mdx @@ -0,0 +1,87 @@ +--- +title: Tutorials Overview +description: Step-by-step tutorials for learning Data Designer. +--- + +# 📓 Tutorials + +Welcome to the Data Designer tutorials! These interactive notebooks guide you through the core concepts and features of Data Designer. + +## Getting Started + +Each tutorial builds on the previous one, so we recommend following them in order: + + + + Learn the fundamentals of Data Designer by generating a simple product review dataset. + + **Topics covered:** + - Sampler columns for categorical and numerical data + - LLM-generated text columns + - Previewing and iterating on your dataset + + + Learn advanced data generation using structured outputs and Jinja expressions. + + **Topics covered:** + - Pydantic models for structured output schemas + - Expression columns with Jinja2 templates + - Conditional logic in prompts + + + Bootstrap generation from existing data to create domain-grounded synthetic datasets. + + **Topics covered:** + - Loading seed datasets from local files + - Referencing seed data in prompts + - Combining real and synthetic data + + + Use vision-language models to generate text descriptions from images. + + **Topics covered:** + - Processing images for model input + - Vision model configuration + - Document summarization workflows + + + +## Running the Tutorials + +Each tutorial is available as an interactive Jupyter notebook that you can run in Google Colab. Click the "Open in Colab" badge at the top of each tutorial to launch it directly in your browser. + +### Prerequisites + +Before running the tutorials, make sure you have: + +1. **An API key** from one of the supported providers: + - [NVIDIA API Key](https://build.nvidia.com) (recommended) + - [OpenAI API Key](https://platform.openai.com/api-keys) + - [OpenRouter API Key](https://openrouter.ai) + +2. **Set your API key** as an environment variable or in the notebook: + ```bash + export NVIDIA_API_KEY="your-api-key-here" + ``` + +## Additional Resources + +- **[Quick Start Guide](/docs/quick-start)**: A condensed introduction to Data Designer +- **[Use Case Recipes](/docs/recipes)**: Complete working examples for specific use cases +- **[API Reference](/api/models)**: Detailed documentation for all configuration options diff --git a/fern/pages/tutorials/seeding-with-dataset.mdx b/fern/pages/tutorials/seeding-with-dataset.mdx new file mode 100644 index 00000000..3d5d7c59 --- /dev/null +++ b/fern/pages/tutorials/seeding-with-dataset.mdx @@ -0,0 +1,257 @@ +--- +title: Seeding with a Dataset +--- + + +Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb). + + +# 🎨 Data Designer Tutorial: Seeding Synthetic Data Generation with an External Dataset + +#### 📚 What you'll learn + +In this notebook, we will demonstrate how to seed synthetic data generation in Data Designer with an external dataset. + +If this is your first time using Data Designer, we recommend starting with the [first tutorial](/docs/tutorials/the-basics) in this series. + +### 📦 Import the essentials + +- The `essentials` module provides quick access to the most commonly used objects. + +```python +from data_designer.essentials import ( + ChatCompletionInferenceParams, + DataDesigner, + DataDesignerConfigBuilder, + LocalFileSeedSource, + ModelConfig, +) +``` + +### ⚙️ Initialize the Data Designer interface + +- `DataDesigner` is the main object is responsible for managing the data generation process. +- When initialized without arguments, the [default model providers](/docs/concepts/models/default-model-settings) are used. + +```python +data_designer = DataDesigner() +``` + +### 🎛️ Define model configurations + +- Each `ModelConfig` defines a model that can be used during the generation process. +- The "model alias" is used to reference the model in the Data Designer config (as we will see below). +- The "model provider" is the external service that hosts the model (see the [model config](/docs/concepts/models/default-model-settings) docs for more details). +- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider. + +```python +# This name is set in the model provider configuration. +MODEL_PROVIDER = "nvidia" + +# The model ID is from build.nvidia.com. +MODEL_ID = "nvidia/nemotron-3-nano-30b-a3b" + +# We choose this alias to be descriptive for our use case. +MODEL_ALIAS = "nemotron-nano-v3" + +model_configs = [ + ModelConfig( + alias=MODEL_ALIAS, + model=MODEL_ID, + provider=MODEL_PROVIDER, + inference_parameters=ChatCompletionInferenceParams( + temperature=1.0, + top_p=1.0, + max_tokens=2048, + extra_body={"chat_template_kwargs": {"enable_thinking": False}}, + ), + ) +] +``` + +### 🏗️ Initialize the Data Designer Config Builder + +- The Data Designer config defines the dataset schema and generation process. +- The config builder provides an intuitive interface for building this configuration. +- The list of model configs is provided to the builder at initialization. + +```python +config_builder = DataDesignerConfigBuilder(model_configs=model_configs) +``` + +## 🏥 Prepare a seed dataset + +- For this notebook, we'll create a synthetic dataset of patient notes. +- We will _seed_ the generation process with a [symptom-to-diagnosis dataset](https://huggingface.co/datasets/gretelai/symptom_to_diagnosis). + + +- Seed datasets let you steer the generation process by providing context that is specific to your use case. +- Seed datasets are also an excellent way to inject real-world diversity into your synthetic data. +- During generation, prompt templates can reference any of the seed dataset fields. + + +```python +# Download sample dataset from Github +import urllib.request + +url = "https://raw.githubusercontent.com/NVIDIA/GenerativeAIExamples/refs/heads/main/nemo/NeMo-Data-Designer/data/gretelai_symptom_to_diagnosis.csv" +local_filename, _ = urllib.request.urlretrieve(url, "gretelai_symptom_to_diagnosis.csv") + +# Seed datasets are passed as reference objects to the config builder. +seed_source = LocalFileSeedSource(path=local_filename) + +config_builder.with_seed_dataset(seed_source) +``` + +## 🎨 Designing our synthetic patient notes dataset + +- Here we use `add_column` with keyword arguments (rather than imported config objects). +- Generally, we recommend using concrete objects, but this is a convenient shorthand. +- **Note**: The prompt template can reference fields from our seed dataset: + - `{{ diagnosis }}` - the medical diagnosis from the seed data + - `{{ patient_summary }}` - the symptom description from the seed data + +```python +config_builder.add_column( + name="patient_sampler", + column_type="sampler", + sampler_type="person_from_faker", +) + +config_builder.add_column( + name="doctor_sampler", + column_type="sampler", + sampler_type="person_from_faker", +) + +config_builder.add_column( + name="patient_id", + column_type="sampler", + sampler_type="uuid", + params={ + "prefix": "PT-", + "short_form": True, + "uppercase": True, + }, +) + +config_builder.add_column( + name="first_name", + column_type="expression", + expr="{{ patient_sampler.first_name}}", +) + +config_builder.add_column( + name="last_name", + column_type="expression", + expr="{{ patient_sampler.last_name }}", +) + + +config_builder.add_column( + name="dob", + column_type="expression", + expr="{{ patient_sampler.birth_date }}", +) + +config_builder.add_column( + name="symptom_onset_date", + column_type="sampler", + sampler_type="datetime", + params={"start": "2024-01-01", "end": "2024-12-31"}, +) + +config_builder.add_column( + name="date_of_visit", + column_type="sampler", + sampler_type="timedelta", + params={"dt_min": 1, "dt_max": 30, "reference_column_name": "symptom_onset_date"}, +) + +config_builder.add_column( + name="physician", + column_type="expression", + expr="Dr. {{ doctor_sampler.last_name }}", +) + +config_builder.add_column( + name="physician_notes", + column_type="llm-text", + prompt="""\ +You are a primary-care physician who just had an appointment with {{ first_name }} {{ last_name }}, +who has been struggling with symptoms from {{ diagnosis }} since {{ symptom_onset_date }}. +The date of today's visit is {{ date_of_visit }}. + +{{ patient_summary }} + +Write careful notes about your visit with {{ first_name }}, +as Dr. {{ doctor_sampler.first_name }} {{ doctor_sampler.last_name }}. + +Format the notes as a busy doctor might. +Respond with only the notes, no other text. +""", + model_alias=MODEL_ALIAS, +) + +data_designer.validate(config_builder) +``` + +### 🔁 Iteration is key – preview the dataset! + +1. Use the `preview` method to generate a sample of records quickly. +2. Inspect the results for quality and format issues. +3. Adjust column configurations, prompts, or parameters as needed. +4. Re-run the preview until satisfied. + +```python +preview = data_designer.preview(config_builder, num_records=2) +``` + +```python +# Run this cell multiple times to cycle through the 2 preview records. +preview.display_sample_record() +``` + +```python +# The preview dataset is available as a pandas DataFrame. +preview.dataset +``` + +### 📊 Analyze the generated data + +- Data Designer automatically generates a basic statistical analysis of the generated data. +- This analysis is available via the `analysis` property of generation result objects. + +```python +# Print the analysis as a table. +preview.analysis.to_report() +``` + +### 🆙 Scale up! + +- Happy with your preview data? +- Use the `create` method to submit larger Data Designer generation jobs. + +```python +results = data_designer.create(config_builder, num_records=10, dataset_name="tutorial-3") +``` + +```python +# Load the generated dataset as a pandas DataFrame. +dataset = results.load_dataset() + +dataset.head() +``` + +```python +# Load the analysis results into memory. +analysis = results.load_analysis() + +analysis.to_report() +``` + +## ⏭️ Next Steps + +Check out the following tutorial to learn more about: + +- [Providing images as context](/docs/tutorials/images-as-context) diff --git a/fern/pages/tutorials/structured-outputs.mdx b/fern/pages/tutorials/structured-outputs.mdx new file mode 100644 index 00000000..cf184fef --- /dev/null +++ b/fern/pages/tutorials/structured-outputs.mdx @@ -0,0 +1,316 @@ +--- +title: Structured Outputs and Jinja Expressions +--- + + +Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb). + + +# 🎨 Data Designer Tutorial: Structured Outputs and Jinja Expressions + +#### 📚 What you'll learn + +In this notebook, we will continue our exploration of Data Designer, demonstrating more advanced data generation using structured outputs and Jinja expressions. + +If this is your first time using Data Designer, we recommend starting with the [first tutorial](/docs/tutorials/the-basics) in this series. + +### 📦 Import the essentials + +- The `essentials` module provides quick access to the most commonly used objects. + +```python +from data_designer.essentials import ( + CategorySamplerParams, + ChatCompletionInferenceParams, + DataDesigner, + DataDesignerConfigBuilder, + ExpressionColumnConfig, + LLMStructuredColumnConfig, + ModelConfig, + PersonFromFakerSamplerParams, + SamplerColumnConfig, + SamplerType, + SubcategorySamplerParams, +) +``` + +### ⚙️ Initialize the Data Designer interface + +- `DataDesigner` is the main object that is used to interface with the library. +- When initialized without arguments, the [default model providers](/docs/concepts/models/default-model-settings) are used. + +```python +data_designer = DataDesigner() +``` + +### 🎛️ Define model configurations + +- Each `ModelConfig` defines a model that can be used during the generation process. +- The "model alias" is used to reference the model in the Data Designer config (as we will see below). +- The "model provider" is the external service that hosts the model (see the [model config](/docs/concepts/models/default-model-settings) docs for more details). +- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider. + +```python +# This name is set in the model provider configuration. +MODEL_PROVIDER = "nvidia" + +# The model ID is from build.nvidia.com. +MODEL_ID = "nvidia/nemotron-3-nano-30b-a3b" + +# We choose this alias to be descriptive for our use case. +MODEL_ALIAS = "nemotron-nano-v3" + +model_configs = [ + ModelConfig( + alias=MODEL_ALIAS, + model=MODEL_ID, + provider=MODEL_PROVIDER, + inference_parameters=ChatCompletionInferenceParams( + temperature=1.0, + top_p=1.0, + max_tokens=2048, + extra_body={"chat_template_kwargs": {"enable_thinking": False}}, + ), + ) +] +``` + +### 🏗️ Initialize the Data Designer Config Builder + +- The Data Designer config defines the dataset schema and generation process. +- The config builder provides an intuitive interface for building this configuration. +- The list of model configs is provided to the builder at initialization. + +```python +config_builder = DataDesignerConfigBuilder(model_configs=model_configs) +``` + +### 🧑‍🎨 Designing our data + +- We will again create a product review dataset, but this time we will use structured outputs and Jinja expressions. +- Structured outputs let you specify the exact schema of the data you want to generate. +- Data Designer supports schemas specified using either JSON schema or Pydantic data models (recommended). + +We'll define our structured outputs using [Pydantic](https://docs.pydantic.dev/latest/) data models. + + +- Pydantic models provide better IDE support and type validation. +- They are more Pythonic than raw JSON schemas. +- They integrate seamlessly with Data Designer's structured output system. + + +```python +from decimal import Decimal +from typing import Literal + +from pydantic import BaseModel, Field + + +# We define a Product schema so that the name, description, and price are generated +# in one go, with the types and constraints specified. +class Product(BaseModel): + name: str = Field(description="The name of the product") + description: str = Field(description="A description of the product") + price: Decimal = Field(description="The price of the product", ge=10, le=1000, decimal_places=2) + + +class ProductReview(BaseModel): + rating: int = Field(description="The rating of the product", ge=1, le=5) + customer_mood: Literal["irritated", "mad", "happy", "neutral", "excited"] = Field( + description="The mood of the customer" + ) + review: str = Field(description="A review of the product") +``` + +Next, let's design our product review dataset using a few more tricks compared to the previous notebook. + +```python +# Since we often only want a few attributes from Person objects, we can +# set drop=True in the column config to drop the column from the final dataset. +config_builder.add_column( + SamplerColumnConfig( + name="customer", + sampler_type=SamplerType.PERSON_FROM_FAKER, + params=PersonFromFakerSamplerParams(), + drop=True, + ) +) + +config_builder.add_column( + SamplerColumnConfig( + name="product_category", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Electronics", + "Clothing", + "Home & Kitchen", + "Books", + "Home Office", + ], + ), + ) +) + +config_builder.add_column( + SamplerColumnConfig( + name="product_subcategory", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="product_category", + values={ + "Electronics": ["Smartphones", "Laptops", "Headphones", "Cameras", "Accessories"], + "Clothing": ["Men's Clothing", "Women's Clothing", "Winter Coats", "Activewear", "Accessories"], + "Home & Kitchen": ["Appliances", "Cookware", "Furniture", "Decor", "Organization"], + "Books": ["Fiction", "Non-Fiction", "Self-Help", "Textbooks", "Classics"], + "Home Office": ["Desks", "Chairs", "Storage", "Office Supplies", "Lighting"], + }, + ), + ) +) + +config_builder.add_column( + SamplerColumnConfig( + name="target_age_range", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=["18-25", "25-35", "35-50", "50-65", "65+"]), + ) +) + +# Sampler columns support conditional params, which are used if the condition is met. +# In this example, we set the review style to rambling if the target age range is 18-25. +# Note conditional parameters are only supported for Sampler column types. +config_builder.add_column( + SamplerColumnConfig( + name="review_style", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["rambling", "brief", "detailed", "structured with bullet points"], + weights=[1, 2, 2, 1], + ), + conditional_params={ + "target_age_range == '18-25'": CategorySamplerParams(values=["rambling"]), + }, + ) +) + +# Optionally validate that the columns are configured correctly. +data_designer.validate(config_builder) +``` + +Next, we will use more advanced Jinja expressions to create new columns. + +Jinja expressions let you: + +- Access nested attributes: `{{ customer.first_name }}` +- Combine values: `{{ customer.first_name }} {{ customer.last_name }}` +- Use conditional logic: `{% if condition %}...{% endif %}` + +```python +# We can create new columns using Jinja expressions that reference +# existing columns, including attributes of nested objects. +config_builder.add_column( + ExpressionColumnConfig(name="customer_name", expr="{{ customer.first_name }} {{ customer.last_name }}") +) + +config_builder.add_column(ExpressionColumnConfig(name="customer_age", expr="{{ customer.age }}")) + +config_builder.add_column( + LLMStructuredColumnConfig( + name="product", + prompt=( + "Create a product in the '{{ product_category }}' category, focusing on products " + "related to '{{ product_subcategory }}'. The target age range of the ideal customer is " + "{{ target_age_range }} years old. The product should be priced between $10 and $1000." + ), + output_format=Product, + model_alias=MODEL_ALIAS, + ) +) + +# We can even use if/else logic in our Jinja expressions to create more complex prompt patterns. +config_builder.add_column( + LLMStructuredColumnConfig( + name="customer_review", + prompt=( + "Your task is to write a review for the following product:\n\n" + "Product Name: {{ product.name }}\n" + "Product Description: {{ product.description }}\n" + "Price: {{ product.price }}\n\n" + "Imagine your name is {{ customer_name }} and you are from {{ customer.city }}, {{ customer.state }}. " + "Write the review in a style that is '{{ review_style }}'." + "{% if target_age_range == '18-25' %}" + "Make sure the review is more informal and conversational.\n" + "{% else %}" + "Make sure the review is more formal and structured.\n" + "{% endif %}" + "The review field should contain only the review, no other text." + ), + output_format=ProductReview, + model_alias=MODEL_ALIAS, + ) +) + +data_designer.validate(config_builder) +``` + +### 🔁 Iteration is key – preview the dataset! + +1. Use the `preview` method to generate a sample of records quickly. +2. Inspect the results for quality and format issues. +3. Adjust column configurations, prompts, or parameters as needed. +4. Re-run the preview until satisfied. + +```python +preview = data_designer.preview(config_builder, num_records=2) +``` + +```python +# Run this cell multiple times to cycle through the 2 preview records. +preview.display_sample_record() +``` + +```python +# The preview dataset is available as a pandas DataFrame. +preview.dataset +``` + +### 📊 Analyze the generated data + +- Data Designer automatically generates a basic statistical analysis of the generated data. +- This analysis is available via the `analysis` property of generation result objects. + +```python +# Print the analysis as a table. +preview.analysis.to_report() +``` + +### 🆙 Scale up! + +- Happy with your preview data? +- Use the `create` method to submit larger Data Designer generation jobs. + +```python +results = data_designer.create(config_builder, num_records=10, dataset_name="tutorial-2") +``` + +```python +# Load the generated dataset as a pandas DataFrame. +dataset = results.load_dataset() + +dataset.head() +``` + +```python +# Load the analysis results into memory. +analysis = results.load_analysis() + +analysis.to_report() +``` + +## ⏭️ Next Steps + +Check out the following tutorials to learn more about: + +- [Seeding synthetic data generation with an external dataset](/docs/tutorials/seeding-with-dataset) +- [Providing images as context](/docs/tutorials/images-as-context) diff --git a/fern/pages/tutorials/the-basics.mdx b/fern/pages/tutorials/the-basics.mdx new file mode 100644 index 00000000..f03886cb --- /dev/null +++ b/fern/pages/tutorials/the-basics.mdx @@ -0,0 +1,323 @@ +--- +title: "Tutorial: The Basics" +description: Learn the fundamentals of Data Designer by generating a simple product review dataset. +--- + +# 🎨 Data Designer Tutorial: The Basics + + +Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/1-the-basics.ipynb). + + +#### 📚 What you'll learn + +This notebook demonstrates the basics of Data Designer by generating a simple product review dataset. + +### 📦 Import the essentials + +- The `essentials` module provides quick access to the most commonly used objects. + +```python +from data_designer.essentials import ( + CategorySamplerParams, + ChatCompletionInferenceParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMTextColumnConfig, + ModelConfig, + PersonFromFakerSamplerParams, + SamplerColumnConfig, + SamplerType, + SubcategorySamplerParams, + UniformSamplerParams, +) +``` + +### ⚙️ Initialize the Data Designer interface + +- `DataDesigner` is the main object is responsible for managing the data generation process. + +- When initialized without arguments, the [default model providers](/docs/concepts/models/default-model-settings) are used. + +```python +data_designer = DataDesigner() +``` + +### 🎛️ Define model configurations + +- Each `ModelConfig` defines a model that can be used during the generation process. + +- The "model alias" is used to reference the model in the Data Designer config (as we will see below). + +- The "model provider" is the external service that hosts the model (see the [model config](/docs/concepts/models/default-model-settings) docs for more details). + +- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider. + +```python +# This name is set in the model provider configuration. +MODEL_PROVIDER = "nvidia" + +# The model ID is from build.nvidia.com. +MODEL_ID = "nvidia/nemotron-3-nano-30b-a3b" + +# We choose this alias to be descriptive for our use case. +MODEL_ALIAS = "nemotron-nano-v3" + +model_configs = [ + ModelConfig( + alias=MODEL_ALIAS, + model=MODEL_ID, + provider=MODEL_PROVIDER, + inference_parameters=ChatCompletionInferenceParams( + temperature=1.0, + top_p=1.0, + max_tokens=2048, + extra_body={"chat_template_kwargs": {"enable_thinking": False}}, + ), + ) +] +``` + +### 🏗️ Initialize the Data Designer Config Builder + +- The Data Designer config defines the dataset schema and generation process. + +- The config builder provides an intuitive interface for building this configuration. + +- The list of model configs is provided to the builder at initialization. + +```python +config_builder = DataDesignerConfigBuilder(model_configs=model_configs) +``` + +## 🎲 Getting started with sampler columns + +- Sampler columns offer non-LLM based generation of synthetic data. + +- They are particularly useful for **steering the diversity** of the generated data, as we demonstrate below. + +You can view available samplers using the config builder's `info` property: + +```python +config_builder.info.display("samplers") +``` + +Let's start designing our product review dataset by adding product category and subcategory columns. + +```python +config_builder.add_column( + SamplerColumnConfig( + name="product_category", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Electronics", + "Clothing", + "Home & Kitchen", + "Books", + "Home Office", + ], + ), + ) +) + +config_builder.add_column( + SamplerColumnConfig( + name="product_subcategory", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="product_category", + values={ + "Electronics": [ + "Smartphones", + "Laptops", + "Headphones", + "Cameras", + "Accessories", + ], + "Clothing": [ + "Men's Clothing", + "Women's Clothing", + "Winter Coats", + "Activewear", + "Accessories", + ], + "Home & Kitchen": [ + "Appliances", + "Cookware", + "Furniture", + "Decor", + "Organization", + ], + "Books": [ + "Fiction", + "Non-Fiction", + "Self-Help", + "Textbooks", + "Classics", + ], + "Home Office": [ + "Desks", + "Chairs", + "Storage", + "Office Supplies", + "Lighting", + ], + }, + ), + ) +) + +config_builder.add_column( + SamplerColumnConfig( + name="target_age_range", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams(values=["18-25", "25-35", "35-50", "50-65", "65+"]), + ) +) + +# Optionally validate that the columns are configured correctly. +data_designer.validate(config_builder) +``` + +Next, let's add samplers to generate data related to the customer and their review. + +```python +config_builder.add_column( + SamplerColumnConfig( + name="customer", + sampler_type=SamplerType.PERSON_FROM_FAKER, + params=PersonFromFakerSamplerParams(age_range=[18, 70], locale="en_US"), + ) +) + +config_builder.add_column( + SamplerColumnConfig( + name="number_of_stars", + sampler_type=SamplerType.UNIFORM, + params=UniformSamplerParams(low=1, high=5), + convert_to="int", # Convert the sampled float to an integer. + ) +) + +config_builder.add_column( + SamplerColumnConfig( + name="review_style", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=["rambling", "brief", "detailed", "structured with bullet points"], + weights=[1, 2, 2, 1], + ), + ) +) + +data_designer.validate(config_builder) +``` + +## 🦜 LLM-generated columns + +- The real power of Data Designer comes from leveraging LLMs to generate text, code, and structured data. + +- When prompting the LLM, we can use Jinja templating to reference other columns in the dataset. + +- As we see below, nested json fields can be accessed using dot notation. + +```python +config_builder.add_column( + LLMTextColumnConfig( + name="product_name", + prompt=( + "You are a helpful assistant that generates product names. DO NOT add quotes around the product name.\n\n" + "Come up with a creative product name for a product in the '{{ product_category }}' category, focusing " + "on products related to '{{ product_subcategory }}'. The target age range of the ideal customer is " + "{{ target_age_range }} years old. Respond with only the product name, no other text." + ), + model_alias=MODEL_ALIAS, + ) +) + +config_builder.add_column( + LLMTextColumnConfig( + name="customer_review", + prompt=( + "You are a customer named {{ customer.first_name }} from {{ customer.city }}, {{ customer.state }}. " + "You are {{ customer.age }} years old and recently purchased a product called {{ product_name }}. " + "Write a review of this product, which you gave a rating of {{ number_of_stars }} stars. " + "The style of the review should be '{{ review_style }}'. " + "Respond with only the review, no other text." + ), + model_alias=MODEL_ALIAS, + ) +) + +data_designer.validate(config_builder) +``` + +### 🔁 Iteration is key – preview the dataset! + +1. Use the `preview` method to generate a sample of records quickly. + +2. Inspect the results for quality and format issues. + +3. Adjust column configurations, prompts, or parameters as needed. + +4. Re-run the preview until satisfied. + +```python +preview = data_designer.preview(config_builder, num_records=2) +``` + +```python +# Run this cell multiple times to cycle through the 2 preview records. +preview.display_sample_record() +``` + +```python +# The preview dataset is available as a pandas DataFrame. +preview.dataset +``` + +### 📊 Analyze the generated data + +- Data Designer automatically generates a basic statistical analysis of the generated data. + +- This analysis is available via the `analysis` property of generation result objects. + +```python +# Print the analysis as a table. +preview.analysis.to_report() +``` + +### 🆙 Scale up! + +- Happy with your preview data? + +- Use the `create` method to submit larger Data Designer generation jobs. + +```python +results = data_designer.create(config_builder, num_records=10, dataset_name="tutorial-1") +``` + +```python +# Load the generated dataset as a pandas DataFrame. +dataset = results.load_dataset() + +dataset.head() +``` + +```python +# Load the analysis results into memory. +analysis = results.load_analysis() + +analysis.to_report() +``` + +## ⏭️ Next Steps + +Now that you've seen the basics of Data Designer, check out the following tutorials to learn more about: + +- [Structured outputs and jinja expressions](/docs/tutorials/structured-outputs) + +- [Seeding synthetic data generation with an external dataset](/docs/tutorials/seeding-with-dataset) + +- [Providing images as context](/docs/tutorials/images-as-context) diff --git a/scripts/fern_migration/convert_admonitions.py b/scripts/fern_migration/convert_admonitions.py new file mode 100644 index 00000000..bbfab359 --- /dev/null +++ b/scripts/fern_migration/convert_admonitions.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +"""Convert MkDocs admonitions to Fern callouts.""" +import re +import sys + +ADMONITION_MAP = { + "note": "Note", + "tip": "Tip", + "info": "Info", + "warning": "Warning", + "danger": "Warning", + "question": "Info", + "example": "Info", + "abstract": "Note", + "success": "Tip", + "failure": "Warning", + "bug": "Warning", +} + + +def convert_admonitions(content: str) -> str: + """Convert !!! admonitions to components.""" + pattern = r'!!! (\w+)(?: "([^"]*)")?\n((?: .*\n?)*)' + + def replace(match: re.Match) -> str: + admon_type = match.group(1).lower() + title = match.group(2) or "" + body = match.group(3) + # Remove 4-space indent from body + body = re.sub(r"^ ", "", body, flags=re.MULTILINE).strip() + fern_type = ADMONITION_MAP.get(admon_type, "Note") + if title: + return f'<{fern_type} title="{title}">\n{body}\n\n' + return f"<{fern_type}>\n{body}\n\n" + + return re.sub(pattern, replace, content) + + +if __name__ == "__main__": + content = sys.stdin.read() + print(convert_admonitions(content)) diff --git a/scripts/fern_migration/convert_tabs.py b/scripts/fern_migration/convert_tabs.py new file mode 100644 index 00000000..f5a7dcca --- /dev/null +++ b/scripts/fern_migration/convert_tabs.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +"""Convert MkDocs tabs to Fern Tabs components.""" +import re +import sys + + +def convert_tabs(content: str) -> str: + """Convert === tabs to components.""" + # Match tab groups + pattern = r'((?:=== "([^"]+)"\n((?: .*\n?)*)\n?)+)' + + def replace_group(match: re.Match) -> str: + group = match.group(0) + tabs = re.findall(r'=== "([^"]+)"\n((?: .*\n?)*)', group) + result = [""] + for title, body in tabs: + body = re.sub(r"^ ", "", body, flags=re.MULTILINE).strip() + # Indent the body content properly + body_lines = body.split("\n") + indented_body = "\n".join([" " + line if line.strip() else "" for line in body_lines]) + result.append(f' ') + result.append(indented_body) + result.append(" ") + result.append("") + return "\n".join(result) + "\n" + + return re.sub(pattern, replace_group, content) + + +if __name__ == "__main__": + content = sys.stdin.read() + print(convert_tabs(content)) diff --git a/scripts/fern_migration/notebook_to_mdx.py b/scripts/fern_migration/notebook_to_mdx.py new file mode 100644 index 00000000..88ab37bd --- /dev/null +++ b/scripts/fern_migration/notebook_to_mdx.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +"""Convert Jupyter notebook source (.py format) to MDX.""" +import re +import sys +from pathlib import Path + + +def notebook_py_to_mdx(notebook_path: str, colab_url: str, title: str | None = None) -> str: + """Convert a Jupyter notebook source file (.py with Jupytext format) to MDX format.""" + with open(notebook_path) as f: + content = f.read() + + # Extract title from the notebook if not provided + if title is None: + title_match = re.search(r"# # (.+)", content) + if title_match: + title = title_match.group(1).strip() + # Remove emoji if present + title = re.sub(r"^[🎨📓🏥]\s*", "", title) + else: + title = Path(notebook_path).stem.replace("-", " ").title() + + lines = [ + "---", + f"title: {title}", + "---", + "", + '', + f"Run this tutorial interactively in [Google Colab]({colab_url}).", + "", + "", + ] + + # Process the notebook content + in_markdown_block = False + in_code_block = False + current_content = [] + + for line in content.split("\n"): + # Skip Jupytext header + if line.startswith("# ---") or line.startswith("# "): + continue + + # Markdown cell marker + if line == "# %% [markdown]": + if in_code_block: + lines.append("```") + lines.append("") + in_code_block = False + in_markdown_block = True + continue + + # Code cell marker + if line == "# %%": + if in_markdown_block: + in_markdown_block = False + if in_code_block: + lines.append("```") + lines.append("") + lines.append("```python") + in_code_block = True + continue + + # Process content + if in_markdown_block: + # Remove the '# ' prefix from markdown lines + if line.startswith("# "): + lines.append(line[2:]) + elif line == "#": + lines.append("") + else: + lines.append(line) + elif in_code_block: + lines.append(line) + + # Close any open code block + if in_code_block: + lines.append("```") + lines.append("") + + return "\n".join(lines) + + +if __name__ == "__main__": + if len(sys.argv) < 3: + print("Usage: notebook_to_mdx.py [title]") + sys.exit(1) + title = sys.argv[3] if len(sys.argv) > 3 else None + print(notebook_py_to_mdx(sys.argv[1], sys.argv[2], title)) From 2d9281cda7d837ca36d97c661ac2f42c2804f997 Mon Sep 17 00:00:00 2001 From: Lawrence Lane Date: Wed, 14 Jan 2026 12:46:54 -0500 Subject: [PATCH 2/8] diff fixes Signed-off-by: Lawrence Lane --- fern/pages/api-reference/analysis.mdx | 2 -- fern/pages/api-reference/column-configs.mdx | 4 +--- fern/pages/api-reference/config-builder.mdx | 4 +--- fern/pages/api-reference/data-designer-config.mdx | 4 +--- fern/pages/api-reference/models.mdx | 2 -- fern/pages/api-reference/processors.mdx | 2 -- fern/pages/api-reference/sampler-params.mdx | 4 +--- fern/pages/api-reference/validator-params.mdx | 4 +--- fern/pages/concepts/columns.mdx | 2 -- fern/pages/concepts/models/configure-with-cli.mdx | 4 +--- fern/pages/concepts/models/custom-model-settings.mdx | 2 -- fern/pages/concepts/models/default-model-settings.mdx | 2 -- fern/pages/concepts/models/inference-parameters.mdx | 2 -- fern/pages/concepts/models/model-configs.mdx | 4 +--- fern/pages/concepts/models/model-providers.mdx | 2 -- fern/pages/concepts/person-sampling.mdx | 4 +--- fern/pages/concepts/processors.mdx | 2 -- fern/pages/concepts/validators.mdx | 2 -- fern/pages/contributing.mdx | 4 +--- fern/pages/index.mdx | 4 +--- fern/pages/plugins/available.mdx | 4 +--- fern/pages/plugins/example.mdx | 4 +--- fern/pages/plugins/overview.mdx | 4 +--- fern/pages/quick-start.mdx | 2 -- fern/pages/recipes/code-generation/text-to-python.mdx | 2 -- fern/pages/recipes/code-generation/text-to-sql.mdx | 2 -- fern/pages/recipes/index.mdx | 2 -- fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx | 2 -- fern/pages/recipes/qa-and-chat/product-info-qa.mdx | 2 -- fern/pages/tutorials/images-as-context.mdx | 4 +--- fern/pages/tutorials/overview.mdx | 4 +--- fern/pages/tutorials/seeding-with-dataset.mdx | 4 +--- fern/pages/tutorials/structured-outputs.mdx | 4 +--- fern/pages/tutorials/the-basics.mdx | 4 +--- 34 files changed, 18 insertions(+), 86 deletions(-) diff --git a/fern/pages/api-reference/analysis.mdx b/fern/pages/api-reference/analysis.mdx index f340b73d..a912d2b9 100644 --- a/fern/pages/api-reference/analysis.mdx +++ b/fern/pages/api-reference/analysis.mdx @@ -3,8 +3,6 @@ title: Analysis description: API reference for dataset analysis and profiling. --- -# Analysis - The `analysis` modules provide tools for profiling and analyzing generated datasets. It includes statistics tracking, column profiling, and reporting capabilities. ## Column Statistics diff --git a/fern/pages/api-reference/column-configs.mdx b/fern/pages/api-reference/column-configs.mdx index dde8b350..a692166f 100644 --- a/fern/pages/api-reference/column-configs.mdx +++ b/fern/pages/api-reference/column-configs.mdx @@ -1,10 +1,8 @@ --- -title: Column Configs +title: Column Configurations description: API reference for column configuration objects. --- -# Column Configurations - The `column_configs` module defines configuration objects for all Data Designer column types. Each configuration inherits from `SingleColumnConfig`, which provides shared arguments like the column `name`, whether to `drop` the column after generation, and the `column_type`. diff --git a/fern/pages/api-reference/config-builder.mdx b/fern/pages/api-reference/config-builder.mdx index 80e61341..a301e35b 100644 --- a/fern/pages/api-reference/config-builder.mdx +++ b/fern/pages/api-reference/config-builder.mdx @@ -1,10 +1,8 @@ --- -title: Config Builder +title: Data Designer's Config Builder description: API reference for the DataDesignerConfigBuilder. --- -# Data Designer's Config Builder - The `config_builder` module provides a high-level interface for constructing Data Designer configurations through the `DataDesignerConfigBuilder` class, enabling programmatic creation of `DataDesignerConfig` objects by incrementally adding column configurations, constraints, processors, and profilers. You can use the builder to create Data Designer configurations from scratch or from existing configurations stored in YAML/JSON files via `from_config()`. The builder includes validation capabilities to catch configuration errors early and can work with seed datasets from local sources or external datastores. Once configured, use `build()` to generate the final configuration object or `write_config()` to serialize it to disk. diff --git a/fern/pages/api-reference/data-designer-config.mdx b/fern/pages/api-reference/data-designer-config.mdx index 19d5a9be..94d7c01d 100644 --- a/fern/pages/api-reference/data-designer-config.mdx +++ b/fern/pages/api-reference/data-designer-config.mdx @@ -1,10 +1,8 @@ --- -title: Data Designer Config +title: Data Designer Configuration description: API reference for the DataDesignerConfig object. --- -# Data Designer Configuration - `DataDesignerConfig` is the main configuration object for building datasets with Data Designer. It is a declarative configuration for defining the dataset you want to generate column-by-column, including options for dataset post-processing, validation, and profiling. Generally, you should use the [DataDesignerConfigBuilder](/api/config-builder) to build your configuration, but you can also build it manually by instantiating the `DataDesignerConfig` class directly. diff --git a/fern/pages/api-reference/models.mdx b/fern/pages/api-reference/models.mdx index c472a5a9..831f780b 100644 --- a/fern/pages/api-reference/models.mdx +++ b/fern/pages/api-reference/models.mdx @@ -3,8 +3,6 @@ title: Models description: API reference for model configuration objects. --- -# Models - The `models` module defines configuration objects for model-based generation. `ModelProvider` specifies connection and authentication details for custom providers. `ModelConfig` encapsulates model details including the model alias, identifier, and inference parameters. [Inference Parameters](/docs/concepts/models/inference-parameters) controls model behavior through settings like `temperature`, `top_p`, and `max_tokens`, with support for both fixed values and distribution-based sampling. The module includes `ImageContext` for providing image inputs to multimodal models. For more information on how they are used, see below: diff --git a/fern/pages/api-reference/processors.mdx b/fern/pages/api-reference/processors.mdx index 644ea0f9..e1ce9ca4 100644 --- a/fern/pages/api-reference/processors.mdx +++ b/fern/pages/api-reference/processors.mdx @@ -3,8 +3,6 @@ title: Processors description: API reference for processor configuration objects. --- -# Processors - The `processors` module defines configuration objects for post-generation data transformations. Processors run after column generation and can modify the dataset schema or content before output. ## DropColumnsProcessorConfig diff --git a/fern/pages/api-reference/sampler-params.mdx b/fern/pages/api-reference/sampler-params.mdx index ecba1214..7858fd81 100644 --- a/fern/pages/api-reference/sampler-params.mdx +++ b/fern/pages/api-reference/sampler-params.mdx @@ -1,10 +1,8 @@ --- -title: Sampler Params +title: Sampler Parameters description: API reference for sampler parameter configuration objects. --- -# Sampler Parameters - The `sampler_params` module defines parameter configuration objects for all Data Designer sampler types. Sampler parameters are used within the `SamplerColumnConfig` to specify how values should be generated for sampled columns. diff --git a/fern/pages/api-reference/validator-params.mdx b/fern/pages/api-reference/validator-params.mdx index 57a0faf3..3308e224 100644 --- a/fern/pages/api-reference/validator-params.mdx +++ b/fern/pages/api-reference/validator-params.mdx @@ -1,10 +1,8 @@ --- -title: Validator Params +title: Validator Parameters description: API reference for validator parameter configuration objects. --- -# Validator Parameters - When creating a `ValidationColumnConfig`, two parameters are used to define the validator: `validator_type` and `validator_params`. The `validator_type` parameter can be set to either `code`, `local_callable` or `remote`. The `validator_params` accompanying each of these is described below. diff --git a/fern/pages/concepts/columns.mdx b/fern/pages/concepts/columns.mdx index a3d293d1..ac13079a 100644 --- a/fern/pages/concepts/columns.mdx +++ b/fern/pages/concepts/columns.mdx @@ -3,8 +3,6 @@ title: Columns description: The fundamental building blocks in Data Designer for defining dataset fields. --- -# Columns - Columns are the fundamental building blocks in Data Designer. Each column represents a field in your dataset and defines how to generate it—whether that's sampling from a distribution, calling an LLM, or applying a transformation. diff --git a/fern/pages/concepts/models/configure-with-cli.mdx b/fern/pages/concepts/models/configure-with-cli.mdx index ecbbd435..90b58bf1 100644 --- a/fern/pages/concepts/models/configure-with-cli.mdx +++ b/fern/pages/concepts/models/configure-with-cli.mdx @@ -1,10 +1,8 @@ --- -title: Configure with CLI +title: Configuring Model Settings Using The CLI description: Use the Data Designer CLI to manage model providers and configurations. --- -# Configuring Model Settings Using The CLI - The Data Designer CLI provides an interactive interface for creating and managing default model providers and model configurations stored in your Data Designer home directory (default: `~/.data-designer/`). ## Configuration Files diff --git a/fern/pages/concepts/models/custom-model-settings.mdx b/fern/pages/concepts/models/custom-model-settings.mdx index 99d97480..bf713956 100644 --- a/fern/pages/concepts/models/custom-model-settings.mdx +++ b/fern/pages/concepts/models/custom-model-settings.mdx @@ -3,8 +3,6 @@ title: Custom Model Settings description: Create custom providers and model configurations for Data Designer. --- -# Custom Model Settings - While Data Designer ships with pre-configured model providers and configurations, you can create custom configurations to use different models, adjust inference parameters, or connect to custom API endpoints. ## When to Use Custom Settings diff --git a/fern/pages/concepts/models/default-model-settings.mdx b/fern/pages/concepts/models/default-model-settings.mdx index 030fca82..cedcc521 100644 --- a/fern/pages/concepts/models/default-model-settings.mdx +++ b/fern/pages/concepts/models/default-model-settings.mdx @@ -3,8 +3,6 @@ title: Default Model Settings description: Pre-configured model providers and configurations included with Data Designer. --- -# Default Model Settings - Data Designer ships with pre-configured model providers and model configurations that make it easy to start generating synthetic data without manual setup. ## Model Providers diff --git a/fern/pages/concepts/models/inference-parameters.mdx b/fern/pages/concepts/models/inference-parameters.mdx index 866bd229..49046245 100644 --- a/fern/pages/concepts/models/inference-parameters.mdx +++ b/fern/pages/concepts/models/inference-parameters.mdx @@ -3,8 +3,6 @@ title: Inference Parameters description: Control model behavior during synthetic data generation. --- -# Inference Parameters - Inference parameters control how models generate responses during synthetic data generation. Data Designer provides two types of inference parameters: `ChatCompletionInferenceParams` for text/code/structured generation and `EmbeddingInferenceParams` for embedding generation. ## Overview diff --git a/fern/pages/concepts/models/model-configs.mdx b/fern/pages/concepts/models/model-configs.mdx index 850d3d28..fc4cace5 100644 --- a/fern/pages/concepts/models/model-configs.mdx +++ b/fern/pages/concepts/models/model-configs.mdx @@ -1,10 +1,8 @@ --- -title: Model Configs +title: Model Configurations description: Configure model settings for synthetic data generation. --- -# Model Configurations - Model configurations define the specific models you use for synthetic data generation and their associated inference parameters. Each `ModelConfig` represents a named model that can be referenced throughout your data generation workflows. ## Overview diff --git a/fern/pages/concepts/models/model-providers.mdx b/fern/pages/concepts/models/model-providers.mdx index e47ed1e1..efc877f3 100644 --- a/fern/pages/concepts/models/model-providers.mdx +++ b/fern/pages/concepts/models/model-providers.mdx @@ -3,8 +3,6 @@ title: Model Providers description: Configure connections to model hosting services. --- -# Model Providers - Model providers are external services that host and serve models. Data Designer uses the `ModelProvider` class to configure connections to these services. ## Overview diff --git a/fern/pages/concepts/person-sampling.mdx b/fern/pages/concepts/person-sampling.mdx index 650ae0c7..0bb23ce3 100644 --- a/fern/pages/concepts/person-sampling.mdx +++ b/fern/pages/concepts/person-sampling.mdx @@ -1,10 +1,8 @@ --- -title: Person Sampling +title: Person Sampling in Data Designer description: Generate synthetic person data for your datasets. --- -# Person Sampling in Data Designer - Person sampling in Data Designer allows you to generate synthetic person data for your datasets. There are two distinct approaches, each with different capabilities and use cases. ## Overview diff --git a/fern/pages/concepts/processors.mdx b/fern/pages/concepts/processors.mdx index 04e601e8..2efcd1c7 100644 --- a/fern/pages/concepts/processors.mdx +++ b/fern/pages/concepts/processors.mdx @@ -3,8 +3,6 @@ title: Processors description: Transformations that modify your dataset before or after columns are generated. --- -# Processors - Processors are transformations that modify your dataset before or after columns are generated. They run at different stages and can reshape, filter, or augment the data. diff --git a/fern/pages/concepts/validators.mdx b/fern/pages/concepts/validators.mdx index 0f2202f0..7b50d618 100644 --- a/fern/pages/concepts/validators.mdx +++ b/fern/pages/concepts/validators.mdx @@ -3,8 +3,6 @@ title: Validators description: Quality assurance mechanisms that check generated content against rules. --- -# Validators - Validators are quality assurance mechanisms in Data Designer that check generated content against rules and return structured pass/fail results. They enable automated verification of data for correctness, code quality, and adherence to specifications. diff --git a/fern/pages/contributing.mdx b/fern/pages/contributing.mdx index ea6d80b3..b317a75a 100644 --- a/fern/pages/contributing.mdx +++ b/fern/pages/contributing.mdx @@ -1,10 +1,8 @@ --- -title: Contributing +title: 🎨✨ Contributing to NeMo Data Designer 🎨✨ description: How to contribute to NeMo Data Designer --- -# 🎨✨ Contributing to NeMo Data Designer 🎨✨ - Thank you for your interest in contributing to Data Designer! We welcome contributions from the community and sincerely appreciate your efforts to improve the project. Whether you're fixing a typo, reporting a bug, proposing a new feature, or implementing a major enhancement, your work helps make Data Designer better for everyone 🎉. diff --git a/fern/pages/index.mdx b/fern/pages/index.mdx index c8fd5ced..218beadf 100644 --- a/fern/pages/index.mdx +++ b/fern/pages/index.mdx @@ -1,10 +1,8 @@ --- -title: Welcome to NeMo Data Designer +title: 🎨 NeMo Data Designer Library description: A general framework for generating high-quality synthetic data from scratch or using seed data. --- -# 🎨 NeMo Data Designer Library - [![GitHub](https://img.shields.io/badge/github-repo-952fc6?logo=github)](https://github.com/NVIDIA-NeMo/DataDesigner) [![License](https://img.shields.io/badge/License-Apache_2.0-0074df.svg)](https://opensource.org/licenses/Apache-2.0) [![NeMo Microservices](https://img.shields.io/badge/NeMo-Microservices-76b900)](https://docs.nvidia.com/nemo/microservices/latest/index.html) 👋 Welcome to the Data Designer community! We're excited to have you here. diff --git a/fern/pages/plugins/available.mdx b/fern/pages/plugins/available.mdx index be2dae69..594e53ac 100644 --- a/fern/pages/plugins/available.mdx +++ b/fern/pages/plugins/available.mdx @@ -1,8 +1,6 @@ --- -title: Available Plugins +title: "🚧 Available Plugins: Coming Soon" description: List of available Data Designer plugins. --- -# 🚧 Coming Soon - This page will list available Data Designer plugins. Stay tuned! diff --git a/fern/pages/plugins/example.mdx b/fern/pages/plugins/example.mdx index c15f6b76..0be568cb 100644 --- a/fern/pages/plugins/example.mdx +++ b/fern/pages/plugins/example.mdx @@ -1,5 +1,5 @@ --- -title: Example Plugin +title: "Example Plugin: Index Multiplier" description: A complete walkthrough for creating a Data Designer plugin. --- @@ -7,8 +7,6 @@ description: A complete walkthrough for creating a Data Designer plugin. The plugin system is currently **experimental** and under active development. The documentation, examples, and plugin interface are subject to significant changes in future releases. If you encounter any issues, have questions, or have ideas for improvement, please consider starting [a discussion on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner/discussions).
-# Example Plugin: Index Multiplier - In this guide, we will build a simple plugin that generates values by multiplying the row index by a user-specified multiplier. Admittedly, not the most useful plugin, but it demonstrates the required steps 😜. A Data Designer plugin is implemented as a Python package with three main components: diff --git a/fern/pages/plugins/overview.mdx b/fern/pages/plugins/overview.mdx index eddcc0be..613f3b3f 100644 --- a/fern/pages/plugins/overview.mdx +++ b/fern/pages/plugins/overview.mdx @@ -1,10 +1,8 @@ --- -title: Plugins Overview +title: Data Designer Plugins description: Extend Data Designer's capabilities with custom plugins. --- -# Data Designer Plugins - The plugin system is currently **experimental** and under active development. The documentation, examples, and plugin interface are subject to significant changes in future releases. If you encounter any issues, have questions, or have ideas for improvement, please consider starting [a discussion on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner/discussions). diff --git a/fern/pages/quick-start.mdx b/fern/pages/quick-start.mdx index 58402ebd..e52b3c02 100644 --- a/fern/pages/quick-start.mdx +++ b/fern/pages/quick-start.mdx @@ -3,8 +3,6 @@ title: Quick Start description: Get started with Data Designer using default model providers and configurations. --- -# Quick Start - Get started with Data Designer using the default model providers and configurations. Data Designer ships with built-in model providers and configurations that make it easy to start generating synthetic data immediately. ## Prerequisites diff --git a/fern/pages/recipes/code-generation/text-to-python.mdx b/fern/pages/recipes/code-generation/text-to-python.mdx index d7c7241d..79e597a7 100644 --- a/fern/pages/recipes/code-generation/text-to-python.mdx +++ b/fern/pages/recipes/code-generation/text-to-python.mdx @@ -3,8 +3,6 @@ title: Text to Python description: Generate Python code from natural language descriptions. --- -# Text to Python - [Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_python.py) diff --git a/fern/pages/recipes/code-generation/text-to-sql.mdx b/fern/pages/recipes/code-generation/text-to-sql.mdx index 9d0d33e0..26ddd1ab 100644 --- a/fern/pages/recipes/code-generation/text-to-sql.mdx +++ b/fern/pages/recipes/code-generation/text-to-sql.mdx @@ -3,8 +3,6 @@ title: Text to SQL description: Generate SQL queries from natural language descriptions. --- -# Text to SQL - [Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_sql.py) diff --git a/fern/pages/recipes/index.mdx b/fern/pages/recipes/index.mdx index c77cee15..34d15a59 100644 --- a/fern/pages/recipes/index.mdx +++ b/fern/pages/recipes/index.mdx @@ -3,8 +3,6 @@ title: Use Case Recipes description: Ready-to-use code examples for common Data Designer use cases. --- -# Use Case Recipes - Recipes are a collection of code examples that demonstrate how to leverage Data Designer in specific use cases. Each recipe is a self-contained example that can be run independently. diff --git a/fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx b/fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx index 85015f6e..1b273592 100644 --- a/fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx +++ b/fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx @@ -3,8 +3,6 @@ title: Multi-Turn Chat description: Generate multi-turn conversational dialogues. --- -# Multi-Turn Chat - [Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/multi_turn_chat.py) diff --git a/fern/pages/recipes/qa-and-chat/product-info-qa.mdx b/fern/pages/recipes/qa-and-chat/product-info-qa.mdx index 59d2df7e..f3cdf7c4 100644 --- a/fern/pages/recipes/qa-and-chat/product-info-qa.mdx +++ b/fern/pages/recipes/qa-and-chat/product-info-qa.mdx @@ -3,8 +3,6 @@ title: Product Info QA description: Generate question-answer pairs for product information. --- -# Product Info QA - [Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/product_info_qa.py) diff --git a/fern/pages/tutorials/images-as-context.mdx b/fern/pages/tutorials/images-as-context.mdx index 29ce8eb3..5897ace2 100644 --- a/fern/pages/tutorials/images-as-context.mdx +++ b/fern/pages/tutorials/images-as-context.mdx @@ -1,13 +1,11 @@ --- -title: Images as Context +title: "🎨 Data Designer Tutorial: Images as Context for Vision-Based Generation" --- Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/4-providing-images-as-context.ipynb). -# 🎨 Data Designer Tutorial: Providing Images as Context for Vision-Based Data Generation - #### 📚 What you'll learn This notebook demonstrates how to provide images as context to generate text descriptions using vision-language models. diff --git a/fern/pages/tutorials/overview.mdx b/fern/pages/tutorials/overview.mdx index 907d9c80..38cc6859 100644 --- a/fern/pages/tutorials/overview.mdx +++ b/fern/pages/tutorials/overview.mdx @@ -1,10 +1,8 @@ --- -title: Tutorials Overview +title: 📓 Tutorials description: Step-by-step tutorials for learning Data Designer. --- -# 📓 Tutorials - Welcome to the Data Designer tutorials! These interactive notebooks guide you through the core concepts and features of Data Designer. ## Getting Started diff --git a/fern/pages/tutorials/seeding-with-dataset.mdx b/fern/pages/tutorials/seeding-with-dataset.mdx index 3d5d7c59..b6e9c351 100644 --- a/fern/pages/tutorials/seeding-with-dataset.mdx +++ b/fern/pages/tutorials/seeding-with-dataset.mdx @@ -1,13 +1,11 @@ --- -title: Seeding with a Dataset +title: "🎨 Data Designer Tutorial: Seeding with an External Dataset" --- Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb). -# 🎨 Data Designer Tutorial: Seeding Synthetic Data Generation with an External Dataset - #### 📚 What you'll learn In this notebook, we will demonstrate how to seed synthetic data generation in Data Designer with an external dataset. diff --git a/fern/pages/tutorials/structured-outputs.mdx b/fern/pages/tutorials/structured-outputs.mdx index cf184fef..458bde2e 100644 --- a/fern/pages/tutorials/structured-outputs.mdx +++ b/fern/pages/tutorials/structured-outputs.mdx @@ -1,13 +1,11 @@ --- -title: Structured Outputs and Jinja Expressions +title: "🎨 Data Designer Tutorial: Structured Outputs and Jinja Expressions" --- Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb). -# 🎨 Data Designer Tutorial: Structured Outputs and Jinja Expressions - #### 📚 What you'll learn In this notebook, we will continue our exploration of Data Designer, demonstrating more advanced data generation using structured outputs and Jinja expressions. diff --git a/fern/pages/tutorials/the-basics.mdx b/fern/pages/tutorials/the-basics.mdx index f03886cb..34e1a388 100644 --- a/fern/pages/tutorials/the-basics.mdx +++ b/fern/pages/tutorials/the-basics.mdx @@ -1,10 +1,8 @@ --- -title: "Tutorial: The Basics" +title: "🎨 Data Designer Tutorial: The Basics" description: Learn the fundamentals of Data Designer by generating a simple product review dataset. --- -# 🎨 Data Designer Tutorial: The Basics - Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/1-the-basics.ipynb). From b6b8d678617913e6c2d388ee349653c366286dc9 Mon Sep 17 00:00:00 2001 From: Lawrence Lane Date: Wed, 14 Jan 2026 12:57:33 -0500 Subject: [PATCH 3/8] multi-version test Signed-off-by: Lawrence Lane --- fern/docs.yml | 108 ++---------------- .../pages/api-reference/analysis.mdx | 0 .../pages/api-reference/column-configs.mdx | 0 .../pages/api-reference/config-builder.mdx | 0 .../api-reference/data-designer-config.mdx | 0 .../pages/api-reference/models.mdx | 0 .../pages/api-reference/processors.mdx | 0 .../pages/api-reference/sampler-params.mdx | 0 .../pages/api-reference/validator-params.mdx | 0 fern/{ => v0.3.3}/pages/concepts/columns.mdx | 0 .../concepts/models/configure-with-cli.mdx | 0 .../concepts/models/custom-model-settings.mdx | 0 .../models/default-model-settings.mdx | 0 .../concepts/models/inference-parameters.mdx | 0 .../pages/concepts/models/model-configs.mdx | 0 .../pages/concepts/models/model-providers.mdx | 0 .../pages/concepts/person-sampling.mdx | 0 .../pages/concepts/processors.mdx | 0 .../pages/concepts/validators.mdx | 0 fern/{ => v0.3.3}/pages/contributing.mdx | 0 fern/{ => v0.3.3}/pages/index.mdx | 0 fern/{ => v0.3.3}/pages/installation.mdx | 0 fern/{ => v0.3.3}/pages/plugins/available.mdx | 0 fern/{ => v0.3.3}/pages/plugins/example.mdx | 0 fern/{ => v0.3.3}/pages/plugins/overview.mdx | 0 fern/{ => v0.3.3}/pages/quick-start.mdx | 0 .../code-generation/text-to-python.mdx | 0 .../recipes/code-generation/text-to-sql.mdx | 0 fern/{ => v0.3.3}/pages/recipes/index.mdx | 0 .../recipes/qa-and-chat/multi-turn-chat.mdx | 0 .../recipes/qa-and-chat/product-info-qa.mdx | 0 .../pages/tutorials/images-as-context.mdx | 0 .../{ => v0.3.3}/pages/tutorials/overview.mdx | 0 .../pages/tutorials/seeding-with-dataset.mdx | 0 .../pages/tutorials/structured-outputs.mdx | 0 .../pages/tutorials/the-basics.mdx | 0 fern/v0.4.0/pages/index.mdx | 18 +++ fern/versions/v0.3.3.yml | 101 ++++++++++++++++ fern/versions/v0.4.0.yml | 12 ++ 39 files changed, 138 insertions(+), 101 deletions(-) rename fern/{ => v0.3.3}/pages/api-reference/analysis.mdx (100%) rename fern/{ => v0.3.3}/pages/api-reference/column-configs.mdx (100%) rename fern/{ => v0.3.3}/pages/api-reference/config-builder.mdx (100%) rename fern/{ => v0.3.3}/pages/api-reference/data-designer-config.mdx (100%) rename fern/{ => v0.3.3}/pages/api-reference/models.mdx (100%) rename fern/{ => v0.3.3}/pages/api-reference/processors.mdx (100%) rename fern/{ => v0.3.3}/pages/api-reference/sampler-params.mdx (100%) rename fern/{ => v0.3.3}/pages/api-reference/validator-params.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/columns.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/models/configure-with-cli.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/models/custom-model-settings.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/models/default-model-settings.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/models/inference-parameters.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/models/model-configs.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/models/model-providers.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/person-sampling.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/processors.mdx (100%) rename fern/{ => v0.3.3}/pages/concepts/validators.mdx (100%) rename fern/{ => v0.3.3}/pages/contributing.mdx (100%) rename fern/{ => v0.3.3}/pages/index.mdx (100%) rename fern/{ => v0.3.3}/pages/installation.mdx (100%) rename fern/{ => v0.3.3}/pages/plugins/available.mdx (100%) rename fern/{ => v0.3.3}/pages/plugins/example.mdx (100%) rename fern/{ => v0.3.3}/pages/plugins/overview.mdx (100%) rename fern/{ => v0.3.3}/pages/quick-start.mdx (100%) rename fern/{ => v0.3.3}/pages/recipes/code-generation/text-to-python.mdx (100%) rename fern/{ => v0.3.3}/pages/recipes/code-generation/text-to-sql.mdx (100%) rename fern/{ => v0.3.3}/pages/recipes/index.mdx (100%) rename fern/{ => v0.3.3}/pages/recipes/qa-and-chat/multi-turn-chat.mdx (100%) rename fern/{ => v0.3.3}/pages/recipes/qa-and-chat/product-info-qa.mdx (100%) rename fern/{ => v0.3.3}/pages/tutorials/images-as-context.mdx (100%) rename fern/{ => v0.3.3}/pages/tutorials/overview.mdx (100%) rename fern/{ => v0.3.3}/pages/tutorials/seeding-with-dataset.mdx (100%) rename fern/{ => v0.3.3}/pages/tutorials/structured-outputs.mdx (100%) rename fern/{ => v0.3.3}/pages/tutorials/the-basics.mdx (100%) create mode 100644 fern/v0.4.0/pages/index.mdx create mode 100644 fern/versions/v0.3.3.yml create mode 100644 fern/versions/v0.4.0.yml diff --git a/fern/docs.yml b/fern/docs.yml index b599caf4..91457bfe 100644 --- a/fern/docs.yml +++ b/fern/docs.yml @@ -3,107 +3,13 @@ instances: title: NeMo Data Designer -tabs: - docs: - display-name: Documentation - slug: docs - api: - display-name: API Reference - slug: api - -navigation: - - tab: docs - layout: - - section: Getting Started - contents: - - page: Welcome - path: pages/index.mdx - - page: Installation - path: pages/installation.mdx - - page: Quick Start - path: pages/quick-start.mdx - - page: Contributing - path: pages/contributing.mdx - - section: Concepts - contents: - - section: Models - contents: - - page: Default Model Settings - path: pages/concepts/models/default-model-settings.mdx - - page: Custom Model Settings - path: pages/concepts/models/custom-model-settings.mdx - - page: Configure with CLI - path: pages/concepts/models/configure-with-cli.mdx - - page: Model Providers - path: pages/concepts/models/model-providers.mdx - - page: Model Configs - path: pages/concepts/models/model-configs.mdx - - page: Inference Parameters - path: pages/concepts/models/inference-parameters.mdx - - page: Columns - path: pages/concepts/columns.mdx - - page: Validators - path: pages/concepts/validators.mdx - - page: Processors - path: pages/concepts/processors.mdx - - page: Person Sampling - path: pages/concepts/person-sampling.mdx - - section: Tutorials - contents: - - page: Overview - path: pages/tutorials/overview.mdx - - page: The Basics - path: pages/tutorials/the-basics.mdx - - page: Structured Outputs - path: pages/tutorials/structured-outputs.mdx - - page: Seeding with a Dataset - path: pages/tutorials/seeding-with-dataset.mdx - - page: Images as Context - path: pages/tutorials/images-as-context.mdx - - section: Recipes - contents: - - page: Recipe Cards - path: pages/recipes/index.mdx - - section: Code Generation - contents: - - page: Text to Python - path: pages/recipes/code-generation/text-to-python.mdx - - page: Text to SQL - path: pages/recipes/code-generation/text-to-sql.mdx - - section: QA and Chat - contents: - - page: Product Info QA - path: pages/recipes/qa-and-chat/product-info-qa.mdx - - page: Multi-Turn Chat - path: pages/recipes/qa-and-chat/multi-turn-chat.mdx - - section: Plugins - contents: - - page: Overview - path: pages/plugins/overview.mdx - - page: Example Plugin - path: pages/plugins/example.mdx - - page: Available Plugins - path: pages/plugins/available.mdx - - tab: api - layout: - - section: API Reference - contents: - - page: Models - path: pages/api-reference/models.mdx - - page: Column Configs - path: pages/api-reference/column-configs.mdx - - page: Config Builder - path: pages/api-reference/config-builder.mdx - - page: Data Designer Config - path: pages/api-reference/data-designer-config.mdx - - page: Sampler Params - path: pages/api-reference/sampler-params.mdx - - page: Validator Params - path: pages/api-reference/validator-params.mdx - - page: Processors - path: pages/api-reference/processors.mdx - - page: Analysis - path: pages/api-reference/analysis.mdx +versions: + - display-name: v0.4.0 (Preview) + path: versions/v0.4.0.yml + slug: v0.4.0 + - display-name: v0.3.3 + path: versions/v0.3.3.yml + slug: v0.3.3 colors: accent-primary: diff --git a/fern/pages/api-reference/analysis.mdx b/fern/v0.3.3/pages/api-reference/analysis.mdx similarity index 100% rename from fern/pages/api-reference/analysis.mdx rename to fern/v0.3.3/pages/api-reference/analysis.mdx diff --git a/fern/pages/api-reference/column-configs.mdx b/fern/v0.3.3/pages/api-reference/column-configs.mdx similarity index 100% rename from fern/pages/api-reference/column-configs.mdx rename to fern/v0.3.3/pages/api-reference/column-configs.mdx diff --git a/fern/pages/api-reference/config-builder.mdx b/fern/v0.3.3/pages/api-reference/config-builder.mdx similarity index 100% rename from fern/pages/api-reference/config-builder.mdx rename to fern/v0.3.3/pages/api-reference/config-builder.mdx diff --git a/fern/pages/api-reference/data-designer-config.mdx b/fern/v0.3.3/pages/api-reference/data-designer-config.mdx similarity index 100% rename from fern/pages/api-reference/data-designer-config.mdx rename to fern/v0.3.3/pages/api-reference/data-designer-config.mdx diff --git a/fern/pages/api-reference/models.mdx b/fern/v0.3.3/pages/api-reference/models.mdx similarity index 100% rename from fern/pages/api-reference/models.mdx rename to fern/v0.3.3/pages/api-reference/models.mdx diff --git a/fern/pages/api-reference/processors.mdx b/fern/v0.3.3/pages/api-reference/processors.mdx similarity index 100% rename from fern/pages/api-reference/processors.mdx rename to fern/v0.3.3/pages/api-reference/processors.mdx diff --git a/fern/pages/api-reference/sampler-params.mdx b/fern/v0.3.3/pages/api-reference/sampler-params.mdx similarity index 100% rename from fern/pages/api-reference/sampler-params.mdx rename to fern/v0.3.3/pages/api-reference/sampler-params.mdx diff --git a/fern/pages/api-reference/validator-params.mdx b/fern/v0.3.3/pages/api-reference/validator-params.mdx similarity index 100% rename from fern/pages/api-reference/validator-params.mdx rename to fern/v0.3.3/pages/api-reference/validator-params.mdx diff --git a/fern/pages/concepts/columns.mdx b/fern/v0.3.3/pages/concepts/columns.mdx similarity index 100% rename from fern/pages/concepts/columns.mdx rename to fern/v0.3.3/pages/concepts/columns.mdx diff --git a/fern/pages/concepts/models/configure-with-cli.mdx b/fern/v0.3.3/pages/concepts/models/configure-with-cli.mdx similarity index 100% rename from fern/pages/concepts/models/configure-with-cli.mdx rename to fern/v0.3.3/pages/concepts/models/configure-with-cli.mdx diff --git a/fern/pages/concepts/models/custom-model-settings.mdx b/fern/v0.3.3/pages/concepts/models/custom-model-settings.mdx similarity index 100% rename from fern/pages/concepts/models/custom-model-settings.mdx rename to fern/v0.3.3/pages/concepts/models/custom-model-settings.mdx diff --git a/fern/pages/concepts/models/default-model-settings.mdx b/fern/v0.3.3/pages/concepts/models/default-model-settings.mdx similarity index 100% rename from fern/pages/concepts/models/default-model-settings.mdx rename to fern/v0.3.3/pages/concepts/models/default-model-settings.mdx diff --git a/fern/pages/concepts/models/inference-parameters.mdx b/fern/v0.3.3/pages/concepts/models/inference-parameters.mdx similarity index 100% rename from fern/pages/concepts/models/inference-parameters.mdx rename to fern/v0.3.3/pages/concepts/models/inference-parameters.mdx diff --git a/fern/pages/concepts/models/model-configs.mdx b/fern/v0.3.3/pages/concepts/models/model-configs.mdx similarity index 100% rename from fern/pages/concepts/models/model-configs.mdx rename to fern/v0.3.3/pages/concepts/models/model-configs.mdx diff --git a/fern/pages/concepts/models/model-providers.mdx b/fern/v0.3.3/pages/concepts/models/model-providers.mdx similarity index 100% rename from fern/pages/concepts/models/model-providers.mdx rename to fern/v0.3.3/pages/concepts/models/model-providers.mdx diff --git a/fern/pages/concepts/person-sampling.mdx b/fern/v0.3.3/pages/concepts/person-sampling.mdx similarity index 100% rename from fern/pages/concepts/person-sampling.mdx rename to fern/v0.3.3/pages/concepts/person-sampling.mdx diff --git a/fern/pages/concepts/processors.mdx b/fern/v0.3.3/pages/concepts/processors.mdx similarity index 100% rename from fern/pages/concepts/processors.mdx rename to fern/v0.3.3/pages/concepts/processors.mdx diff --git a/fern/pages/concepts/validators.mdx b/fern/v0.3.3/pages/concepts/validators.mdx similarity index 100% rename from fern/pages/concepts/validators.mdx rename to fern/v0.3.3/pages/concepts/validators.mdx diff --git a/fern/pages/contributing.mdx b/fern/v0.3.3/pages/contributing.mdx similarity index 100% rename from fern/pages/contributing.mdx rename to fern/v0.3.3/pages/contributing.mdx diff --git a/fern/pages/index.mdx b/fern/v0.3.3/pages/index.mdx similarity index 100% rename from fern/pages/index.mdx rename to fern/v0.3.3/pages/index.mdx diff --git a/fern/pages/installation.mdx b/fern/v0.3.3/pages/installation.mdx similarity index 100% rename from fern/pages/installation.mdx rename to fern/v0.3.3/pages/installation.mdx diff --git a/fern/pages/plugins/available.mdx b/fern/v0.3.3/pages/plugins/available.mdx similarity index 100% rename from fern/pages/plugins/available.mdx rename to fern/v0.3.3/pages/plugins/available.mdx diff --git a/fern/pages/plugins/example.mdx b/fern/v0.3.3/pages/plugins/example.mdx similarity index 100% rename from fern/pages/plugins/example.mdx rename to fern/v0.3.3/pages/plugins/example.mdx diff --git a/fern/pages/plugins/overview.mdx b/fern/v0.3.3/pages/plugins/overview.mdx similarity index 100% rename from fern/pages/plugins/overview.mdx rename to fern/v0.3.3/pages/plugins/overview.mdx diff --git a/fern/pages/quick-start.mdx b/fern/v0.3.3/pages/quick-start.mdx similarity index 100% rename from fern/pages/quick-start.mdx rename to fern/v0.3.3/pages/quick-start.mdx diff --git a/fern/pages/recipes/code-generation/text-to-python.mdx b/fern/v0.3.3/pages/recipes/code-generation/text-to-python.mdx similarity index 100% rename from fern/pages/recipes/code-generation/text-to-python.mdx rename to fern/v0.3.3/pages/recipes/code-generation/text-to-python.mdx diff --git a/fern/pages/recipes/code-generation/text-to-sql.mdx b/fern/v0.3.3/pages/recipes/code-generation/text-to-sql.mdx similarity index 100% rename from fern/pages/recipes/code-generation/text-to-sql.mdx rename to fern/v0.3.3/pages/recipes/code-generation/text-to-sql.mdx diff --git a/fern/pages/recipes/index.mdx b/fern/v0.3.3/pages/recipes/index.mdx similarity index 100% rename from fern/pages/recipes/index.mdx rename to fern/v0.3.3/pages/recipes/index.mdx diff --git a/fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx b/fern/v0.3.3/pages/recipes/qa-and-chat/multi-turn-chat.mdx similarity index 100% rename from fern/pages/recipes/qa-and-chat/multi-turn-chat.mdx rename to fern/v0.3.3/pages/recipes/qa-and-chat/multi-turn-chat.mdx diff --git a/fern/pages/recipes/qa-and-chat/product-info-qa.mdx b/fern/v0.3.3/pages/recipes/qa-and-chat/product-info-qa.mdx similarity index 100% rename from fern/pages/recipes/qa-and-chat/product-info-qa.mdx rename to fern/v0.3.3/pages/recipes/qa-and-chat/product-info-qa.mdx diff --git a/fern/pages/tutorials/images-as-context.mdx b/fern/v0.3.3/pages/tutorials/images-as-context.mdx similarity index 100% rename from fern/pages/tutorials/images-as-context.mdx rename to fern/v0.3.3/pages/tutorials/images-as-context.mdx diff --git a/fern/pages/tutorials/overview.mdx b/fern/v0.3.3/pages/tutorials/overview.mdx similarity index 100% rename from fern/pages/tutorials/overview.mdx rename to fern/v0.3.3/pages/tutorials/overview.mdx diff --git a/fern/pages/tutorials/seeding-with-dataset.mdx b/fern/v0.3.3/pages/tutorials/seeding-with-dataset.mdx similarity index 100% rename from fern/pages/tutorials/seeding-with-dataset.mdx rename to fern/v0.3.3/pages/tutorials/seeding-with-dataset.mdx diff --git a/fern/pages/tutorials/structured-outputs.mdx b/fern/v0.3.3/pages/tutorials/structured-outputs.mdx similarity index 100% rename from fern/pages/tutorials/structured-outputs.mdx rename to fern/v0.3.3/pages/tutorials/structured-outputs.mdx diff --git a/fern/pages/tutorials/the-basics.mdx b/fern/v0.3.3/pages/tutorials/the-basics.mdx similarity index 100% rename from fern/pages/tutorials/the-basics.mdx rename to fern/v0.3.3/pages/tutorials/the-basics.mdx diff --git a/fern/v0.4.0/pages/index.mdx b/fern/v0.4.0/pages/index.mdx new file mode 100644 index 00000000..3e2e9fae --- /dev/null +++ b/fern/v0.4.0/pages/index.mdx @@ -0,0 +1,18 @@ +--- +title: "🎨 NeMo Data Designer v0.4.0" +description: Preview of the upcoming v0.4.0 release. +--- + + +This is a preview of the upcoming v0.4.0 release. Features and documentation are subject to change. + + +## What's New in v0.4.0 + +This version includes exciting new features: + +- 🚀 **Feature A** - Coming soon +- ✨ **Feature B** - Coming soon +- 🔧 **Feature C** - Coming soon + +For the current stable release, see [v0.3.3 documentation](/docs/v0.3.3). diff --git a/fern/versions/v0.3.3.yml b/fern/versions/v0.3.3.yml new file mode 100644 index 00000000..034d93aa --- /dev/null +++ b/fern/versions/v0.3.3.yml @@ -0,0 +1,101 @@ +tabs: + docs: + display-name: Documentation + slug: docs + api: + display-name: API Reference + slug: api + +navigation: + - tab: docs + layout: + - section: Getting Started + contents: + - page: Welcome + path: ../v0.3.3/pages/index.mdx + - page: Installation + path: ../v0.3.3/pages/installation.mdx + - page: Quick Start + path: ../v0.3.3/pages/quick-start.mdx + - page: Contributing + path: ../v0.3.3/pages/contributing.mdx + - section: Concepts + contents: + - section: Models + contents: + - page: Default Model Settings + path: ../v0.3.3/pages/concepts/models/default-model-settings.mdx + - page: Custom Model Settings + path: ../v0.3.3/pages/concepts/models/custom-model-settings.mdx + - page: Configure with CLI + path: ../v0.3.3/pages/concepts/models/configure-with-cli.mdx + - page: Model Providers + path: ../v0.3.3/pages/concepts/models/model-providers.mdx + - page: Model Configs + path: ../v0.3.3/pages/concepts/models/model-configs.mdx + - page: Inference Parameters + path: ../v0.3.3/pages/concepts/models/inference-parameters.mdx + - page: Columns + path: ../v0.3.3/pages/concepts/columns.mdx + - page: Validators + path: ../v0.3.3/pages/concepts/validators.mdx + - page: Processors + path: ../v0.3.3/pages/concepts/processors.mdx + - page: Person Sampling + path: ../v0.3.3/pages/concepts/person-sampling.mdx + - section: Tutorials + contents: + - page: Overview + path: ../v0.3.3/pages/tutorials/overview.mdx + - page: The Basics + path: ../v0.3.3/pages/tutorials/the-basics.mdx + - page: Structured Outputs + path: ../v0.3.3/pages/tutorials/structured-outputs.mdx + - page: Seeding with a Dataset + path: ../v0.3.3/pages/tutorials/seeding-with-dataset.mdx + - page: Images as Context + path: ../v0.3.3/pages/tutorials/images-as-context.mdx + - section: Recipes + contents: + - page: Recipe Cards + path: ../v0.3.3/pages/recipes/index.mdx + - section: Code Generation + contents: + - page: Text to Python + path: ../v0.3.3/pages/recipes/code-generation/text-to-python.mdx + - page: Text to SQL + path: ../v0.3.3/pages/recipes/code-generation/text-to-sql.mdx + - section: QA and Chat + contents: + - page: Product Info QA + path: ../v0.3.3/pages/recipes/qa-and-chat/product-info-qa.mdx + - page: Multi-Turn Chat + path: ../v0.3.3/pages/recipes/qa-and-chat/multi-turn-chat.mdx + - section: Plugins + contents: + - page: Overview + path: ../v0.3.3/pages/plugins/overview.mdx + - page: Example Plugin + path: ../v0.3.3/pages/plugins/example.mdx + - page: Available Plugins + path: ../v0.3.3/pages/plugins/available.mdx + - tab: api + layout: + - section: API Reference + contents: + - page: Models + path: ../v0.3.3/pages/api-reference/models.mdx + - page: Column Configs + path: ../v0.3.3/pages/api-reference/column-configs.mdx + - page: Config Builder + path: ../v0.3.3/pages/api-reference/config-builder.mdx + - page: Data Designer Config + path: ../v0.3.3/pages/api-reference/data-designer-config.mdx + - page: Sampler Params + path: ../v0.3.3/pages/api-reference/sampler-params.mdx + - page: Validator Params + path: ../v0.3.3/pages/api-reference/validator-params.mdx + - page: Processors + path: ../v0.3.3/pages/api-reference/processors.mdx + - page: Analysis + path: ../v0.3.3/pages/api-reference/analysis.mdx diff --git a/fern/versions/v0.4.0.yml b/fern/versions/v0.4.0.yml new file mode 100644 index 00000000..e6a3fcf1 --- /dev/null +++ b/fern/versions/v0.4.0.yml @@ -0,0 +1,12 @@ +tabs: + docs: + display-name: Documentation + slug: docs + +navigation: + - tab: docs + layout: + - section: Getting Started + contents: + - page: Welcome + path: ../v0.4.0/pages/index.mdx From b9ac45cbc1ad9b2ae017a77b5ce5225fc816239c Mon Sep 17 00:00:00 2001 From: Lawrence Lane Date: Wed, 14 Jan 2026 13:00:16 -0500 Subject: [PATCH 4/8] readme Signed-off-by: Lawrence Lane --- fern/README.md | 160 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 fern/README.md diff --git a/fern/README.md b/fern/README.md new file mode 100644 index 00000000..5ad2221c --- /dev/null +++ b/fern/README.md @@ -0,0 +1,160 @@ +# Fern Documentation Cheat Sheet + +This folder contains the Fern Docs configuration for NeMo Data Designer. + +## 📦 Installation + +```bash +# Install Fern CLI globally +npm install -g fern-api + +# Or use npx (no install needed) +npx fern-api --version +``` + +## 🔍 Local Preview + +```bash +# From the fern/ directory +cd fern/ +fern docs dev + +# Or from project root +fern docs dev --project ./fern +``` + +The docs will be available at `http://localhost:3000`. + +## 📁 Folder Structure + +``` +fern/ +├── docs.yml # Global config (title, colors, versions) +├── fern.config.json # Fern CLI config (org name) +├── versions/ +│ ├── v0.3.3.yml # Navigation for v0.3.3 +│ └── v0.4.0.yml # Navigation for v0.4.0 +├── v0.3.3/ +│ └── pages/ # MDX content for v0.3.3 +├── v0.4.0/ +│ └── pages/ # MDX content for v0.4.0 +└── assets/ # Shared images, favicons +``` + +## 🔄 Bumping the Version + +When releasing a new version (e.g., v0.5.0): + +### 1. Copy the previous version's content +```bash +cp -r fern/v0.4.0 fern/v0.5.0 +``` + +### 2. Create the navigation file +```bash +cp fern/versions/v0.4.0.yml fern/versions/v0.5.0.yml +``` + +### 3. Update paths in `versions/v0.5.0.yml` +Change all `../v0.4.0/pages/` → `../v0.5.0/pages/` + +### 4. Add the new version to `docs.yml` +```yaml +versions: + - display-name: v0.5.0 + path: versions/v0.5.0.yml + slug: v0.5.0 + - display-name: v0.4.0 + path: versions/v0.4.0.yml + slug: v0.4.0 + # ... older versions +``` + +### 5. Make your content changes +Edit files in `fern/v0.5.0/pages/` + +## ✏️ Editing Content + +### Adding a new page + +1. Create the MDX file in the appropriate version folder: + ```bash + touch fern/v0.3.3/pages/concepts/new-feature.mdx + ``` + +2. Add frontmatter: + ```mdx + --- + title: New Feature + description: Description for SEO. + --- + + Content starts here... + ``` + +3. Add to navigation in `versions/v0.3.3.yml`: + ```yaml + - page: New Feature + path: ../v0.3.3/pages/concepts/new-feature.mdx + ``` + +### MDX Components + +```mdx +# Callouts +Informational note +Helpful tip +Warning message +Info callout + +# Tabs + + + ```python + print("hello") + ``` + + + ```javascript + console.log("hello") + ``` + + + +# Cards + + + Description + + +``` + +## 🚀 Deploying + +```bash +# Generate static docs (for CI/CD) +fern generate --docs + +# Deploy to Fern hosting +fern docs deploy +``` + +## 🔗 Useful Links + +- [Fern Docs](https://buildwithfern.com/learn/docs) +- [MDX Components Reference](https://buildwithfern.com/learn/docs/components) +- [Versioning Guide](https://buildwithfern.com/learn/docs/configuration/versions) +- [Navigation Configuration](https://buildwithfern.com/learn/docs/configuration/navigation) + +## ⚠️ Common Issues + +### "EISDIR: illegal operation on a directory" +- Check that all `path:` values point to `.mdx` files, not directories + +### Page not showing +- Verify the page is listed in the version's navigation file +- Check the path is correct (relative to the versions/ folder) + +### Version selector not appearing +- Ensure `versions:` is defined in `docs.yml` +- Each version needs a valid `.yml` file in `versions/` From fb293e2c2e1e7c552320c222286e4f85f6ab7a9d Mon Sep 17 00:00:00 2001 From: Lawrence Lane Date: Wed, 28 Jan 2026 15:02:27 -0500 Subject: [PATCH 5/8] org change Signed-off-by: Lawrence Lane --- fern/docs.yml | 3 --- fern/fern.config.json | 4 ++-- fern/v0.4.0/pages/index.mdx | 18 ------------------ 3 files changed, 2 insertions(+), 23 deletions(-) delete mode 100644 fern/v0.4.0/pages/index.mdx diff --git a/fern/docs.yml b/fern/docs.yml index 91457bfe..fedb5508 100644 --- a/fern/docs.yml +++ b/fern/docs.yml @@ -4,9 +4,6 @@ instances: title: NeMo Data Designer versions: - - display-name: v0.4.0 (Preview) - path: versions/v0.4.0.yml - slug: v0.4.0 - display-name: v0.3.3 path: versions/v0.3.3.yml slug: v0.3.3 diff --git a/fern/fern.config.json b/fern/fern.config.json index 3b23c05f..9f0a3e5d 100644 --- a/fern/fern.config.json +++ b/fern/fern.config.json @@ -1,4 +1,4 @@ { - "organization": "nvidia-nemo", + "organization": "nvidia", "version": "3.40.1" -} +} \ No newline at end of file diff --git a/fern/v0.4.0/pages/index.mdx b/fern/v0.4.0/pages/index.mdx deleted file mode 100644 index 3e2e9fae..00000000 --- a/fern/v0.4.0/pages/index.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "🎨 NeMo Data Designer v0.4.0" -description: Preview of the upcoming v0.4.0 release. ---- - - -This is a preview of the upcoming v0.4.0 release. Features and documentation are subject to change. - - -## What's New in v0.4.0 - -This version includes exciting new features: - -- 🚀 **Feature A** - Coming soon -- ✨ **Feature B** - Coming soon -- 🔧 **Feature C** - Coming soon - -For the current stable release, see [v0.3.3 documentation](/docs/v0.3.3). From 8976e97c50519585f03d82fedf6c63897209a113 Mon Sep 17 00:00:00 2001 From: Lawrence Lane Date: Fri, 13 Feb 2026 11:22:47 -0500 Subject: [PATCH 6/8] update Signed-off-by: Lawrence Lane --- Makefile | 11 + docs/colab_notebooks/1-the-basics.ipynb | 62 +- ...ctured-outputs-and-jinja-expressions.ipynb | 58 +- .../3-seeding-with-a-dataset.ipynb | 54 +- .../4-providing-images-as-context.ipynb | 66 +- .../colab_notebooks/5-generating-images.ipynb | 42 +- .../6-editing-images-with-image-context.ipynb | 54 +- fern/DEVNOTES-COMPONENTS-PLAN.md | 276 ++++++ fern/DOCS-VS-FERN.md | 90 ++ fern/README.md | 46 + .../images/deployment-enterprise-gateway.png | Bin 0 -> 59037 bytes .../deployment-library-decentralized.png | Bin 0 -> 84692 bytes .../assets/images/deployment-microservice.png | Bin 0 -> 55115 bytes fern/assets/images/design-principles-hero.png | Bin 0 -> 169247 bytes fern/assets/images/rqa-blog.png | Bin 0 -> 78757 bytes .../recipes/mcp_and_tooluse/basic_mcp.py | 240 +++++ fern/assets/recipes/mcp_and_tooluse/pdf_qa.py | 572 +++++++++++ fern/components/CustomCard.tsx | 10 + fern/components/ExpandableCode.tsx | 47 + fern/components/MetricsTable.tsx | 101 ++ fern/components/NotebookViewer.tsx | 220 +++++ fern/components/PipelineDiagram.tsx | 40 + fern/components/TrajectoryViewer.tsx | 120 +++ fern/components/diagrams/sdg-pipeline.ts | 26 + fern/components/notebooks/1-the-basics.json | 145 +++ fern/components/notebooks/1-the-basics.ts | 144 +++ ...uctured-outputs-and-jinja-expressions.json | 136 +++ ...tructured-outputs-and-jinja-expressions.ts | 135 +++ .../notebooks/3-seeding-with-a-dataset.json | 127 +++ .../notebooks/3-seeding-with-a-dataset.ts | 126 +++ .../4-providing-images-as-context.json | 156 +++ .../4-providing-images-as-context.ts | 155 +++ .../notebooks/5-generating-images.json | 100 ++ .../notebooks/5-generating-images.ts | 99 ++ .../6-editing-images-with-image-context.json | 128 +++ .../6-editing-images-with-image-context.ts | 127 +++ fern/components/trajectories/4hop-example.ts | 190 ++++ fern/docs.yml | 14 + fern/fern.config.json | 4 +- fern/scripts/ipynb-to-fern-json.py | 93 ++ fern/styles/expandable-code.css | 43 + fern/styles/metrics-table.css | 32 + fern/styles/notebook-viewer.css | 117 +++ fern/styles/pipeline-diagram.css | 31 + fern/styles/trajectory-viewer.css | 163 ++++ fern/v0.5.0/pages/api-reference/analysis.mdx | 171 ++++ .../pages/api-reference/column-configs.mdx | 252 +++++ .../pages/api-reference/config-builder.mdx | 496 ++++++++++ .../api-reference/data-designer-config.mdx | 110 +++ fern/v0.5.0/pages/api-reference/mcp.mdx | 173 ++++ fern/v0.5.0/pages/api-reference/models.mdx | 116 +++ .../v0.5.0/pages/api-reference/processors.mdx | 147 +++ .../v0.5.0/pages/api-reference/run-config.mdx | 101 ++ .../pages/api-reference/sampler-params.mdx | 273 ++++++ .../pages/api-reference/validator-params.mdx | 167 ++++ .../concepts/architecture-and-performance.mdx | 221 +++++ fern/v0.5.0/pages/concepts/columns.mdx | 228 +++++ fern/v0.5.0/pages/concepts/custom-columns.mdx | 132 +++ .../pages/concepts/deployment-options.mdx | 171 ++++ .../pages/concepts/mcp/configure-mcp-cli.mdx | 142 +++ .../pages/concepts/mcp/enabling-tools.mdx | 108 +++ .../pages/concepts/mcp/mcp-providers.mdx | 147 +++ .../pages/concepts/mcp/safety-and-limits.mdx | 148 +++ .../pages/concepts/mcp/tool-configs.mdx | 116 +++ .../concepts/models/configure-with-cli.mdx | 148 +++ .../concepts/models/custom-model-settings.mdx | 220 +++++ .../models/default-model-settings.mdx | 130 +++ .../concepts/models/inference-parameters.mdx | 190 ++++ .../pages/concepts/models/model-configs.mdx | 157 +++ .../pages/concepts/models/model-providers.mdx | 56 ++ .../v0.5.0/pages/concepts/person-sampling.mdx | 245 +++++ fern/v0.5.0/pages/concepts/processors.mdx | 165 ++++ fern/v0.5.0/pages/concepts/seed-datasets.mdx | 265 +++++ .../pages/concepts/tool-use-and-mcp.mdx | 72 ++ fern/v0.5.0/pages/concepts/traces.mdx | 218 +++++ fern/v0.5.0/pages/concepts/validators.mdx | 341 +++++++ fern/v0.5.0/pages/contributing.mdx | 239 +++++ .../devnotes/deep-research-trajectories.mdx | 910 ++++++++++++++++++ .../pages/devnotes/design-principles.mdx | 149 +++ fern/v0.5.0/pages/devnotes/index.mdx | 30 + fern/v0.5.0/pages/devnotes/rqa.mdx | 240 +++++ fern/v0.5.0/pages/index.mdx | 112 +++ fern/v0.5.0/pages/installation.mdx | 36 + fern/v0.5.0/pages/plugins/available.mdx | 6 + fern/v0.5.0/pages/plugins/example.mdx | 283 ++++++ fern/v0.5.0/pages/plugins/overview.mdx | 68 ++ fern/v0.5.0/pages/quick-start.mdx | 84 ++ .../code-generation/text-to-python.mdx | 292 ++++++ .../recipes/code-generation/text-to-sql.mdx | 320 ++++++ fern/v0.5.0/pages/recipes/index.mdx | 103 ++ .../recipes/mcp-and-tooluse/basic-mcp.mdx | 280 ++++++ .../pages/recipes/mcp-and-tooluse/pdf-qa.mdx | 473 +++++++++ .../recipes/qa-and-chat/multi-turn-chat.mdx | 205 ++++ .../recipes/qa-and-chat/product-info-qa.mdx | 223 +++++ .../tutorials/editing-images-notebook.mdx | 13 + .../tutorials/generating-images-notebook.mdx | 13 + .../tutorials/images-as-context-notebook.mdx | 13 + .../pages/tutorials/images-as-context.mdx | 272 ++++++ fern/v0.5.0/pages/tutorials/overview.mdx | 142 +++ .../seeding-with-dataset-notebook.mdx | 13 + .../pages/tutorials/seeding-with-dataset.mdx | 261 +++++ .../tutorials/structured-outputs-notebook.mdx | 13 + .../pages/tutorials/structured-outputs.mdx | 304 ++++++ .../pages/tutorials/the-basics-notebook.mdx | 12 + fern/v0.5.0/pages/tutorials/the-basics.mdx | 286 ++++++ fern/versions/v0.5.0.yml | 157 +++ 106 files changed, 15408 insertions(+), 170 deletions(-) create mode 100644 fern/DEVNOTES-COMPONENTS-PLAN.md create mode 100644 fern/DOCS-VS-FERN.md create mode 100644 fern/assets/images/deployment-enterprise-gateway.png create mode 100644 fern/assets/images/deployment-library-decentralized.png create mode 100644 fern/assets/images/deployment-microservice.png create mode 100644 fern/assets/images/design-principles-hero.png create mode 100644 fern/assets/images/rqa-blog.png create mode 100644 fern/assets/recipes/mcp_and_tooluse/basic_mcp.py create mode 100644 fern/assets/recipes/mcp_and_tooluse/pdf_qa.py create mode 100644 fern/components/CustomCard.tsx create mode 100644 fern/components/ExpandableCode.tsx create mode 100644 fern/components/MetricsTable.tsx create mode 100644 fern/components/NotebookViewer.tsx create mode 100644 fern/components/PipelineDiagram.tsx create mode 100644 fern/components/TrajectoryViewer.tsx create mode 100644 fern/components/diagrams/sdg-pipeline.ts create mode 100644 fern/components/notebooks/1-the-basics.json create mode 100644 fern/components/notebooks/1-the-basics.ts create mode 100644 fern/components/notebooks/2-structured-outputs-and-jinja-expressions.json create mode 100644 fern/components/notebooks/2-structured-outputs-and-jinja-expressions.ts create mode 100644 fern/components/notebooks/3-seeding-with-a-dataset.json create mode 100644 fern/components/notebooks/3-seeding-with-a-dataset.ts create mode 100644 fern/components/notebooks/4-providing-images-as-context.json create mode 100644 fern/components/notebooks/4-providing-images-as-context.ts create mode 100644 fern/components/notebooks/5-generating-images.json create mode 100644 fern/components/notebooks/5-generating-images.ts create mode 100644 fern/components/notebooks/6-editing-images-with-image-context.json create mode 100644 fern/components/notebooks/6-editing-images-with-image-context.ts create mode 100644 fern/components/trajectories/4hop-example.ts create mode 100644 fern/scripts/ipynb-to-fern-json.py create mode 100644 fern/styles/expandable-code.css create mode 100644 fern/styles/metrics-table.css create mode 100644 fern/styles/notebook-viewer.css create mode 100644 fern/styles/pipeline-diagram.css create mode 100644 fern/styles/trajectory-viewer.css create mode 100644 fern/v0.5.0/pages/api-reference/analysis.mdx create mode 100644 fern/v0.5.0/pages/api-reference/column-configs.mdx create mode 100644 fern/v0.5.0/pages/api-reference/config-builder.mdx create mode 100644 fern/v0.5.0/pages/api-reference/data-designer-config.mdx create mode 100644 fern/v0.5.0/pages/api-reference/mcp.mdx create mode 100644 fern/v0.5.0/pages/api-reference/models.mdx create mode 100644 fern/v0.5.0/pages/api-reference/processors.mdx create mode 100644 fern/v0.5.0/pages/api-reference/run-config.mdx create mode 100644 fern/v0.5.0/pages/api-reference/sampler-params.mdx create mode 100644 fern/v0.5.0/pages/api-reference/validator-params.mdx create mode 100644 fern/v0.5.0/pages/concepts/architecture-and-performance.mdx create mode 100644 fern/v0.5.0/pages/concepts/columns.mdx create mode 100644 fern/v0.5.0/pages/concepts/custom-columns.mdx create mode 100644 fern/v0.5.0/pages/concepts/deployment-options.mdx create mode 100644 fern/v0.5.0/pages/concepts/mcp/configure-mcp-cli.mdx create mode 100644 fern/v0.5.0/pages/concepts/mcp/enabling-tools.mdx create mode 100644 fern/v0.5.0/pages/concepts/mcp/mcp-providers.mdx create mode 100644 fern/v0.5.0/pages/concepts/mcp/safety-and-limits.mdx create mode 100644 fern/v0.5.0/pages/concepts/mcp/tool-configs.mdx create mode 100644 fern/v0.5.0/pages/concepts/models/configure-with-cli.mdx create mode 100644 fern/v0.5.0/pages/concepts/models/custom-model-settings.mdx create mode 100644 fern/v0.5.0/pages/concepts/models/default-model-settings.mdx create mode 100644 fern/v0.5.0/pages/concepts/models/inference-parameters.mdx create mode 100644 fern/v0.5.0/pages/concepts/models/model-configs.mdx create mode 100644 fern/v0.5.0/pages/concepts/models/model-providers.mdx create mode 100644 fern/v0.5.0/pages/concepts/person-sampling.mdx create mode 100644 fern/v0.5.0/pages/concepts/processors.mdx create mode 100644 fern/v0.5.0/pages/concepts/seed-datasets.mdx create mode 100644 fern/v0.5.0/pages/concepts/tool-use-and-mcp.mdx create mode 100644 fern/v0.5.0/pages/concepts/traces.mdx create mode 100644 fern/v0.5.0/pages/concepts/validators.mdx create mode 100644 fern/v0.5.0/pages/contributing.mdx create mode 100644 fern/v0.5.0/pages/devnotes/deep-research-trajectories.mdx create mode 100644 fern/v0.5.0/pages/devnotes/design-principles.mdx create mode 100644 fern/v0.5.0/pages/devnotes/index.mdx create mode 100644 fern/v0.5.0/pages/devnotes/rqa.mdx create mode 100644 fern/v0.5.0/pages/index.mdx create mode 100644 fern/v0.5.0/pages/installation.mdx create mode 100644 fern/v0.5.0/pages/plugins/available.mdx create mode 100644 fern/v0.5.0/pages/plugins/example.mdx create mode 100644 fern/v0.5.0/pages/plugins/overview.mdx create mode 100644 fern/v0.5.0/pages/quick-start.mdx create mode 100644 fern/v0.5.0/pages/recipes/code-generation/text-to-python.mdx create mode 100644 fern/v0.5.0/pages/recipes/code-generation/text-to-sql.mdx create mode 100644 fern/v0.5.0/pages/recipes/index.mdx create mode 100644 fern/v0.5.0/pages/recipes/mcp-and-tooluse/basic-mcp.mdx create mode 100644 fern/v0.5.0/pages/recipes/mcp-and-tooluse/pdf-qa.mdx create mode 100644 fern/v0.5.0/pages/recipes/qa-and-chat/multi-turn-chat.mdx create mode 100644 fern/v0.5.0/pages/recipes/qa-and-chat/product-info-qa.mdx create mode 100644 fern/v0.5.0/pages/tutorials/editing-images-notebook.mdx create mode 100644 fern/v0.5.0/pages/tutorials/generating-images-notebook.mdx create mode 100644 fern/v0.5.0/pages/tutorials/images-as-context-notebook.mdx create mode 100644 fern/v0.5.0/pages/tutorials/images-as-context.mdx create mode 100644 fern/v0.5.0/pages/tutorials/overview.mdx create mode 100644 fern/v0.5.0/pages/tutorials/seeding-with-dataset-notebook.mdx create mode 100644 fern/v0.5.0/pages/tutorials/seeding-with-dataset.mdx create mode 100644 fern/v0.5.0/pages/tutorials/structured-outputs-notebook.mdx create mode 100644 fern/v0.5.0/pages/tutorials/structured-outputs.mdx create mode 100644 fern/v0.5.0/pages/tutorials/the-basics-notebook.mdx create mode 100644 fern/v0.5.0/pages/tutorials/the-basics.mdx create mode 100644 fern/versions/v0.5.0.yml diff --git a/Makefile b/Makefile index 28abe8e6..035ca1ef 100644 --- a/Makefile +++ b/Makefile @@ -473,6 +473,17 @@ generate-colab-notebooks: uv run --group docs python docs/scripts/generate_colab_notebooks.py @echo "✅ Colab notebooks created in docs/colab_notebooks/" +generate-fern-notebooks: + @echo "📓 Converting notebooks to Fern format for NotebookViewer..." + @mkdir -p fern/components/notebooks + @for f in docs/colab_notebooks/*.ipynb; do \ + if [ -f "$$f" ]; then \ + name=$$(basename "$$f" .ipynb); \ + python fern/scripts/ipynb-to-fern-json.py "$$f" -o fern/components/notebooks/$$name.json; \ + fi; \ + done + @echo "✅ Fern notebooks created in fern/components/notebooks/" + # ============================================================================== # PERFORMANCE # ============================================================================== diff --git a/docs/colab_notebooks/1-the-basics.ipynb b/docs/colab_notebooks/1-the-basics.ipynb index 9a2456e6..ef6af443 100644 --- a/docs/colab_notebooks/1-the-basics.ipynb +++ b/docs/colab_notebooks/1-the-basics.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "00c21026", + "id": "2a24b4d3", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: The Basics\n", @@ -14,7 +14,7 @@ }, { "cell_type": "markdown", - "id": "ece3d9a9", + "id": "3e76b164", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -26,7 +26,7 @@ }, { "cell_type": "markdown", - "id": "38d1b88f", + "id": "8178f225", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -37,7 +37,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53321634", + "id": "5bf5f5b3", "metadata": {}, "outputs": [], "source": [ @@ -48,7 +48,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5e8544d6", + "id": "dd0db080", "metadata": {}, "outputs": [], "source": [ @@ -66,7 +66,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4a9e48bc", + "id": "13a21b35", "metadata": {}, "outputs": [], "source": [ @@ -76,7 +76,7 @@ }, { "cell_type": "markdown", - "id": "21b12719", + "id": "d5f0a5a7", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -89,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7d689c22", + "id": "c87c7075", "metadata": {}, "outputs": [], "source": [ @@ -98,7 +98,7 @@ }, { "cell_type": "markdown", - "id": "3db3eab3", + "id": "b3e7e6a6", "metadata": {}, "source": [ "### 🎛️ Define model configurations\n", @@ -115,7 +115,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f4447bbe", + "id": "218ecb24", "metadata": {}, "outputs": [], "source": [ @@ -145,7 +145,7 @@ }, { "cell_type": "markdown", - "id": "b5af9991", + "id": "180f86ce", "metadata": {}, "source": [ "### 🏗️ Initialize the Data Designer Config Builder\n", @@ -160,7 +160,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40bdb697", + "id": "72fbb51a", "metadata": {}, "outputs": [], "source": [ @@ -169,7 +169,7 @@ }, { "cell_type": "markdown", - "id": "4dad8aa0", + "id": "61c70393", "metadata": {}, "source": [ "## 🎲 Getting started with sampler columns\n", @@ -186,7 +186,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8eecf6e8", + "id": "037dced5", "metadata": {}, "outputs": [], "source": [ @@ -195,7 +195,7 @@ }, { "cell_type": "markdown", - "id": "e4d6a23a", + "id": "7fec32fe", "metadata": {}, "source": [ "Let's start designing our product review dataset by adding product category and subcategory columns.\n" @@ -204,7 +204,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c3ce7276", + "id": "ce41fe08", "metadata": {}, "outputs": [], "source": [ @@ -285,7 +285,7 @@ }, { "cell_type": "markdown", - "id": "a8aafd2c", + "id": "1b94851d", "metadata": {}, "source": [ "Next, let's add samplers to generate data related to the customer and their review.\n" @@ -294,7 +294,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3bdb3991", + "id": "bcaba433", "metadata": {}, "outputs": [], "source": [ @@ -331,7 +331,7 @@ }, { "cell_type": "markdown", - "id": "743bb645", + "id": "fd91aaf2", "metadata": {}, "source": [ "## 🦜 LLM-generated columns\n", @@ -346,7 +346,7 @@ { "cell_type": "code", "execution_count": null, - "id": "da2b9677", + "id": "7a5f3221", "metadata": {}, "outputs": [], "source": [ @@ -382,7 +382,7 @@ }, { "cell_type": "markdown", - "id": "febed040", + "id": "de0e26f8", "metadata": {}, "source": [ "### 🔁 Iteration is key – preview the dataset!\n", @@ -399,7 +399,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af574e1c", + "id": "1d5a0701", "metadata": {}, "outputs": [], "source": [ @@ -409,7 +409,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c5cddea8", + "id": "28267bd7", "metadata": {}, "outputs": [], "source": [ @@ -420,7 +420,7 @@ { "cell_type": "code", "execution_count": null, - "id": "523da02f", + "id": "f9412b20", "metadata": {}, "outputs": [], "source": [ @@ -430,7 +430,7 @@ }, { "cell_type": "markdown", - "id": "b58b6a23", + "id": "7cfeff69", "metadata": {}, "source": [ "### 📊 Analyze the generated data\n", @@ -443,7 +443,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26b9a54a", + "id": "ce9f959b", "metadata": {}, "outputs": [], "source": [ @@ -453,7 +453,7 @@ }, { "cell_type": "markdown", - "id": "ae2f9efe", + "id": "9aaec768", "metadata": {}, "source": [ "### 🆙 Scale up!\n", @@ -466,7 +466,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d8341c24", + "id": "ce89c502", "metadata": {}, "outputs": [], "source": [ @@ -476,7 +476,7 @@ { "cell_type": "code", "execution_count": null, - "id": "746166bb", + "id": "86b4d4b8", "metadata": {}, "outputs": [], "source": [ @@ -489,7 +489,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4c67992b", + "id": "52f859b6", "metadata": {}, "outputs": [], "source": [ @@ -501,7 +501,7 @@ }, { "cell_type": "markdown", - "id": "65da8b83", + "id": "089f4cc0", "metadata": {}, "source": [ "## ⏭️ Next Steps\n", diff --git a/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb b/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb index 75e2d72d..ee852c7a 100644 --- a/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb +++ b/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "3d5ec9c5", + "id": "9edb12b1", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Structured Outputs and Jinja Expressions\n", @@ -16,7 +16,7 @@ }, { "cell_type": "markdown", - "id": "3813ccb2", + "id": "44ae499b", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -28,7 +28,7 @@ }, { "cell_type": "markdown", - "id": "86173a51", + "id": "57750043", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -39,7 +39,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6ee5a0e0", + "id": "f69d18e2", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ { "cell_type": "code", "execution_count": null, - "id": "87742e65", + "id": "fe4eace1", "metadata": {}, "outputs": [], "source": [ @@ -68,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "450a862c", + "id": "299ade8e", "metadata": {}, "outputs": [], "source": [ @@ -78,7 +78,7 @@ }, { "cell_type": "markdown", - "id": "8f06cd05", + "id": "8fddaab5", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -91,7 +91,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9a880c00", + "id": "0186d4a9", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "d862ae5c", + "id": "c58fa496", "metadata": {}, "source": [ "### 🎛️ Define model configurations\n", @@ -117,7 +117,7 @@ { "cell_type": "code", "execution_count": null, - "id": "84e6f76a", + "id": "70f4ace2", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +147,7 @@ }, { "cell_type": "markdown", - "id": "07b038aa", + "id": "36530024", "metadata": {}, "source": [ "### 🏗️ Initialize the Data Designer Config Builder\n", @@ -162,7 +162,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b7e42df4", + "id": "905f2f4b", "metadata": {}, "outputs": [], "source": [ @@ -171,7 +171,7 @@ }, { "cell_type": "markdown", - "id": "600127e0", + "id": "7cf2e515", "metadata": {}, "source": [ "### 🧑‍🎨 Designing our data\n", @@ -198,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ecebc077", + "id": "6dff3604", "metadata": {}, "outputs": [], "source": [ @@ -226,7 +226,7 @@ }, { "cell_type": "markdown", - "id": "6f24c511", + "id": "4234fd35", "metadata": {}, "source": [ "Next, let's design our product review dataset using a few more tricks compared to the previous notebook.\n" @@ -235,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6cd4a4a5", + "id": "88ba4646", "metadata": {}, "outputs": [], "source": [ @@ -344,7 +344,7 @@ }, { "cell_type": "markdown", - "id": "3fa250c7", + "id": "c4e716b6", "metadata": {}, "source": [ "Next, we will use more advanced Jinja expressions to create new columns.\n", @@ -361,7 +361,7 @@ { "cell_type": "code", "execution_count": null, - "id": "77895d82", + "id": "e5d763ef", "metadata": {}, "outputs": [], "source": [ @@ -414,7 +414,7 @@ }, { "cell_type": "markdown", - "id": "236f32c0", + "id": "6d009906", "metadata": {}, "source": [ "### 🔁 Iteration is key – preview the dataset!\n", @@ -431,7 +431,7 @@ { "cell_type": "code", "execution_count": null, - "id": "719d3d7f", + "id": "93d03e0c", "metadata": {}, "outputs": [], "source": [ @@ -441,7 +441,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d25b2a23", + "id": "5db37270", "metadata": {}, "outputs": [], "source": [ @@ -452,7 +452,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8cfff7c2", + "id": "b00a32a1", "metadata": {}, "outputs": [], "source": [ @@ -462,7 +462,7 @@ }, { "cell_type": "markdown", - "id": "acfc4317", + "id": "fea40d20", "metadata": {}, "source": [ "### 📊 Analyze the generated data\n", @@ -475,7 +475,7 @@ { "cell_type": "code", "execution_count": null, - "id": "02a90c0a", + "id": "abde79b4", "metadata": {}, "outputs": [], "source": [ @@ -485,7 +485,7 @@ }, { "cell_type": "markdown", - "id": "60bac583", + "id": "034fa4e0", "metadata": {}, "source": [ "### 🆙 Scale up!\n", @@ -498,7 +498,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fd92ca3c", + "id": "4d0dee23", "metadata": {}, "outputs": [], "source": [ @@ -508,7 +508,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ca5eded6", + "id": "b9e7aa79", "metadata": {}, "outputs": [], "source": [ @@ -521,7 +521,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29f4b884", + "id": "2ae9984d", "metadata": {}, "outputs": [], "source": [ @@ -533,7 +533,7 @@ }, { "cell_type": "markdown", - "id": "18914be2", + "id": "69cdeafb", "metadata": {}, "source": [ "## ⏭️ Next Steps\n", diff --git a/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb b/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb index 91c13986..d4178400 100644 --- a/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb +++ b/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "30b0205f", + "id": "5f8a3321", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Seeding Synthetic Data Generation with an External Dataset\n", @@ -16,7 +16,7 @@ }, { "cell_type": "markdown", - "id": "fd7184e7", + "id": "ebfd9603", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -28,7 +28,7 @@ }, { "cell_type": "markdown", - "id": "f229a5f3", + "id": "88bb567e", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -39,7 +39,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3cfdeadf", + "id": "38896128", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8ad3bee9", + "id": "0e1906e0", "metadata": {}, "outputs": [], "source": [ @@ -68,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b7a8d675", + "id": "3eeb8d05", "metadata": {}, "outputs": [], "source": [ @@ -78,7 +78,7 @@ }, { "cell_type": "markdown", - "id": "e52b2806", + "id": "b3aeabba", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -91,7 +91,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21ad21d1", + "id": "2dbc44e4", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "e313e1c7", + "id": "14306419", "metadata": {}, "source": [ "### 🎛️ Define model configurations\n", @@ -117,7 +117,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5927e232", + "id": "e177a323", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +147,7 @@ }, { "cell_type": "markdown", - "id": "3fe284f0", + "id": "09feb19e", "metadata": {}, "source": [ "### 🏗️ Initialize the Data Designer Config Builder\n", @@ -162,7 +162,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0475564b", + "id": "dc0a824d", "metadata": {}, "outputs": [], "source": [ @@ -171,7 +171,7 @@ }, { "cell_type": "markdown", - "id": "588837c2", + "id": "4faa9a68", "metadata": {}, "source": [ "## 🏥 Prepare a seed dataset\n", @@ -196,7 +196,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e8dfb164", + "id": "b5deb848", "metadata": {}, "outputs": [], "source": [ @@ -214,7 +214,7 @@ }, { "cell_type": "markdown", - "id": "ca5f46ea", + "id": "2e72b89b", "metadata": {}, "source": [ "## 🎨 Designing our synthetic patient notes dataset\n", @@ -227,7 +227,7 @@ { "cell_type": "code", "execution_count": null, - "id": "830810e8", + "id": "01cb7b88", "metadata": {}, "outputs": [], "source": [ @@ -308,7 +308,7 @@ }, { "cell_type": "markdown", - "id": "cbb1e2ad", + "id": "11989122", "metadata": {}, "source": [ "### 🔁 Iteration is key – preview the dataset!\n", @@ -325,7 +325,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f9c39104", + "id": "5ba842f3", "metadata": {}, "outputs": [], "source": [ @@ -335,7 +335,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5750e220", + "id": "166b02dd", "metadata": {}, "outputs": [], "source": [ @@ -346,7 +346,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b3573753", + "id": "f9a74a7c", "metadata": {}, "outputs": [], "source": [ @@ -356,7 +356,7 @@ }, { "cell_type": "markdown", - "id": "14937896", + "id": "69628101", "metadata": {}, "source": [ "### 📊 Analyze the generated data\n", @@ -369,7 +369,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cd3adb37", + "id": "abba1989", "metadata": {}, "outputs": [], "source": [ @@ -379,7 +379,7 @@ }, { "cell_type": "markdown", - "id": "aa4fee79", + "id": "bb462c63", "metadata": {}, "source": [ "### 🆙 Scale up!\n", @@ -392,7 +392,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29024ffc", + "id": "966bf86e", "metadata": {}, "outputs": [], "source": [ @@ -402,7 +402,7 @@ { "cell_type": "code", "execution_count": null, - "id": "73da6149", + "id": "395c0e5b", "metadata": {}, "outputs": [], "source": [ @@ -415,7 +415,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bc2f927d", + "id": "0e254d59", "metadata": {}, "outputs": [], "source": [ @@ -427,7 +427,7 @@ }, { "cell_type": "markdown", - "id": "29990c5d", + "id": "05a97070", "metadata": {}, "source": [ "## ⏭️ Next Steps\n", diff --git a/docs/colab_notebooks/4-providing-images-as-context.ipynb b/docs/colab_notebooks/4-providing-images-as-context.ipynb index cc10ec63..4771e543 100644 --- a/docs/colab_notebooks/4-providing-images-as-context.ipynb +++ b/docs/colab_notebooks/4-providing-images-as-context.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "911877e5", + "id": "badc7c3c", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Providing Images as Context for Vision-Based Data Generation" @@ -10,7 +10,7 @@ }, { "cell_type": "markdown", - "id": "c6756afd", + "id": "95f37174", "metadata": {}, "source": [ "#### 📚 What you'll learn\n", @@ -25,7 +25,7 @@ }, { "cell_type": "markdown", - "id": "d73b25ce", + "id": "4e5edd38", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -37,7 +37,7 @@ }, { "cell_type": "markdown", - "id": "f05ece3e", + "id": "a10cc70d", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -48,7 +48,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d84f4489", + "id": "06094049", "metadata": {}, "outputs": [], "source": [ @@ -59,7 +59,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5e4cc2d4", + "id": "3de8fe04", "metadata": {}, "outputs": [], "source": [ @@ -77,7 +77,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4e4e8d45", + "id": "947c8ca8", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "0cdd2a8a", + "id": "8417ed40", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -113,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4bb0ca16", + "id": "3e4783c2", "metadata": {}, "outputs": [], "source": [ @@ -122,7 +122,7 @@ }, { "cell_type": "markdown", - "id": "bd17820d", + "id": "9cca8959", "metadata": {}, "source": [ "### 🎛️ Define model configurations\n", @@ -139,7 +139,7 @@ { "cell_type": "code", "execution_count": null, - "id": "301f2bd2", + "id": "546ced2c", "metadata": {}, "outputs": [], "source": [ @@ -162,7 +162,7 @@ }, { "cell_type": "markdown", - "id": "ad04f82a", + "id": "e02918e5", "metadata": {}, "source": [ "### 🏗️ Initialize the Data Designer Config Builder\n", @@ -177,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ac8e2885", + "id": "684816d4", "metadata": {}, "outputs": [], "source": [ @@ -186,7 +186,7 @@ }, { "cell_type": "markdown", - "id": "7b8aafc0", + "id": "b5dd8bb7", "metadata": {}, "source": [ "### 🌱 Seed Dataset Creation\n", @@ -203,7 +203,7 @@ { "cell_type": "code", "execution_count": null, - "id": "432edd4a", + "id": "af5cbcf2", "metadata": {}, "outputs": [], "source": [ @@ -218,7 +218,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c4f94627", + "id": "0a1ac12c", "metadata": {}, "outputs": [], "source": [ @@ -266,7 +266,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9b697311", + "id": "4ddd2e32", "metadata": {}, "outputs": [], "source": [ @@ -284,7 +284,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bcfc97e8", + "id": "2171f019", "metadata": {}, "outputs": [], "source": [ @@ -294,7 +294,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0a3bdc13", + "id": "5d3fcba6", "metadata": {}, "outputs": [], "source": [ @@ -306,7 +306,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f9665355", + "id": "81e1217b", "metadata": {}, "outputs": [], "source": [ @@ -335,7 +335,7 @@ }, { "cell_type": "markdown", - "id": "6d900aaa", + "id": "684a5a40", "metadata": {}, "source": [ "### 🔁 Iteration is key – preview the dataset!\n", @@ -352,7 +352,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51a80346", + "id": "a30e634e", "metadata": {}, "outputs": [], "source": [ @@ -362,7 +362,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ea217964", + "id": "9e6effa8", "metadata": {}, "outputs": [], "source": [ @@ -373,7 +373,7 @@ { "cell_type": "code", "execution_count": null, - "id": "be0e4ef0", + "id": "4c8a78aa", "metadata": {}, "outputs": [], "source": [ @@ -383,7 +383,7 @@ }, { "cell_type": "markdown", - "id": "0c75f531", + "id": "5186f4c5", "metadata": {}, "source": [ "### 📊 Analyze the generated data\n", @@ -396,7 +396,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bcbf86d1", + "id": "efa93f44", "metadata": {}, "outputs": [], "source": [ @@ -406,7 +406,7 @@ }, { "cell_type": "markdown", - "id": "0ab35029", + "id": "fbcfd3d8", "metadata": {}, "source": [ "### 🔎 Visual Inspection\n", @@ -417,7 +417,7 @@ { "cell_type": "code", "execution_count": null, - "id": "03314ae9", + "id": "2538a89b", "metadata": { "lines_to_next_cell": 2 }, @@ -441,7 +441,7 @@ }, { "cell_type": "markdown", - "id": "e76a3e3b", + "id": "a7de9c71", "metadata": {}, "source": [ "### 🆙 Scale up!\n", @@ -454,7 +454,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d16566c0", + "id": "850d78d8", "metadata": {}, "outputs": [], "source": [ @@ -464,7 +464,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8e7796ba", + "id": "3e953262", "metadata": {}, "outputs": [], "source": [ @@ -477,7 +477,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14bc1042", + "id": "0a5c00e6", "metadata": {}, "outputs": [], "source": [ @@ -489,7 +489,7 @@ }, { "cell_type": "markdown", - "id": "1e676330", + "id": "a6654664", "metadata": {}, "source": [ "## ⏭️ Next Steps\n", diff --git a/docs/colab_notebooks/5-generating-images.ipynb b/docs/colab_notebooks/5-generating-images.ipynb index ea9e0b8f..345f8655 100644 --- a/docs/colab_notebooks/5-generating-images.ipynb +++ b/docs/colab_notebooks/5-generating-images.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "3b8abde3", + "id": "dcc80c16", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Generating Images\n", @@ -24,7 +24,7 @@ }, { "cell_type": "markdown", - "id": "1da8d75f", + "id": "0fb5f66a", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -35,7 +35,7 @@ }, { "cell_type": "markdown", - "id": "cc461005", + "id": "062e3558", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -46,7 +46,7 @@ { "cell_type": "code", "execution_count": null, - "id": "206037bf", + "id": "f739c774", "metadata": {}, "outputs": [], "source": [ @@ -57,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "db5a4929", + "id": "6fe9738b", "metadata": {}, "outputs": [], "source": [ @@ -75,7 +75,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b3cba8b6", + "id": "21635563", "metadata": {}, "outputs": [], "source": [ @@ -88,7 +88,7 @@ }, { "cell_type": "markdown", - "id": "444aa9dc", + "id": "19eb9d17", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -99,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1932342c", + "id": "1c0715ea", "metadata": {}, "outputs": [], "source": [ @@ -108,7 +108,7 @@ }, { "cell_type": "markdown", - "id": "aa7b90c5", + "id": "f5846789", "metadata": {}, "source": [ "### 🎛️ Define an image-generation model\n", @@ -120,7 +120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "df7e4385", + "id": "35c4fc72", "metadata": {}, "outputs": [], "source": [ @@ -142,7 +142,7 @@ }, { "cell_type": "markdown", - "id": "a1325e38", + "id": "18655cb0", "metadata": {}, "source": [ "### 🏗️ Build the config: samplers + image column\n", @@ -153,7 +153,7 @@ { "cell_type": "code", "execution_count": null, - "id": "95064ed0", + "id": "202e5463", "metadata": {}, "outputs": [], "source": [ @@ -326,7 +326,7 @@ }, { "cell_type": "markdown", - "id": "c6fe0620", + "id": "7d3bb84e", "metadata": {}, "source": [ "### 🔁 Preview: images as base64\n", @@ -337,7 +337,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7323dce5", + "id": "98a82605", "metadata": {}, "outputs": [], "source": [ @@ -347,7 +347,7 @@ { "cell_type": "code", "execution_count": null, - "id": "510b933c", + "id": "b16858e0", "metadata": {}, "outputs": [], "source": [ @@ -358,7 +358,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0c8c197f", + "id": "52d0cf2c", "metadata": {}, "outputs": [], "source": [ @@ -367,7 +367,7 @@ }, { "cell_type": "markdown", - "id": "4cffd205", + "id": "7a2411ef", "metadata": {}, "source": [ "### 🆙 Create: images saved to disk\n", @@ -378,7 +378,7 @@ { "cell_type": "code", "execution_count": null, - "id": "308bf2b8", + "id": "dfa4fcd0", "metadata": {}, "outputs": [], "source": [ @@ -388,7 +388,7 @@ { "cell_type": "code", "execution_count": null, - "id": "02610965", + "id": "691db6a6", "metadata": {}, "outputs": [], "source": [ @@ -399,7 +399,7 @@ { "cell_type": "code", "execution_count": null, - "id": "189af389", + "id": "5e3dbb92", "metadata": {}, "outputs": [], "source": [ @@ -415,7 +415,7 @@ }, { "cell_type": "markdown", - "id": "51558182", + "id": "46678986", "metadata": {}, "source": [ "## ⏭️ Next steps\n", diff --git a/docs/colab_notebooks/6-editing-images-with-image-context.ipynb b/docs/colab_notebooks/6-editing-images-with-image-context.ipynb index ddfe9d37..9c22eb50 100644 --- a/docs/colab_notebooks/6-editing-images-with-image-context.ipynb +++ b/docs/colab_notebooks/6-editing-images-with-image-context.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "c7129daf", + "id": "20d2b9af", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Image-to-Image Editing\n", @@ -25,7 +25,7 @@ }, { "cell_type": "markdown", - "id": "6a438ee3", + "id": "875e0ce0", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -36,7 +36,7 @@ }, { "cell_type": "markdown", - "id": "1a022157", + "id": "2e02e601", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -47,7 +47,7 @@ { "cell_type": "code", "execution_count": null, - "id": "752fe3eb", + "id": "3b111078", "metadata": {}, "outputs": [], "source": [ @@ -58,7 +58,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49266cc2", + "id": "0e5f317a", "metadata": {}, "outputs": [], "source": [ @@ -76,7 +76,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d87dfa0b", + "id": "7ebd55d2", "metadata": {}, "outputs": [], "source": [ @@ -95,7 +95,7 @@ }, { "cell_type": "markdown", - "id": "c99ff426", + "id": "0a63a3cd", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -106,7 +106,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9be6231b", + "id": "ad455680", "metadata": {}, "outputs": [], "source": [ @@ -115,7 +115,7 @@ }, { "cell_type": "markdown", - "id": "3e242b51", + "id": "3ce04024", "metadata": {}, "source": [ "### 🎛️ Define an image-editing model\n", @@ -131,7 +131,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34dd8eed", + "id": "5f5235b0", "metadata": {}, "outputs": [], "source": [ @@ -153,7 +153,7 @@ }, { "cell_type": "markdown", - "id": "98abe1a9", + "id": "10989e53", "metadata": {}, "source": [ "### 🌱 Load animal portraits from HuggingFace\n", @@ -166,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "233f483b", + "id": "bd526010", "metadata": {}, "outputs": [], "source": [ @@ -199,7 +199,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6b1a7b59", + "id": "6fad55cf", "metadata": {}, "outputs": [], "source": [ @@ -216,7 +216,7 @@ }, { "cell_type": "markdown", - "id": "2956a5a6", + "id": "e2c2f374", "metadata": {}, "source": [ "### 🏗️ Build the configuration\n", @@ -233,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f79ffa72", + "id": "82f39386", "metadata": {}, "outputs": [], "source": [ @@ -321,7 +321,7 @@ }, { "cell_type": "markdown", - "id": "0cba69c0", + "id": "80f226b8", "metadata": {}, "source": [ "### 🔁 Preview: quick iteration\n", @@ -332,7 +332,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ec669ae2", + "id": "ab29a2af", "metadata": {}, "outputs": [], "source": [ @@ -342,7 +342,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41ac4a95", + "id": "7b1ab5bb", "metadata": {}, "outputs": [], "source": [ @@ -353,7 +353,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6f041d9d", + "id": "3d299b09", "metadata": {}, "outputs": [], "source": [ @@ -362,7 +362,7 @@ }, { "cell_type": "markdown", - "id": "483fa24a", + "id": "05351d89", "metadata": { "lines_to_next_cell": 2 }, @@ -375,7 +375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dd4d7dff", + "id": "33886f95", "metadata": {}, "outputs": [], "source": [ @@ -411,7 +411,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af08dc6c", + "id": "9ee971c8", "metadata": {}, "outputs": [], "source": [ @@ -421,7 +421,7 @@ }, { "cell_type": "markdown", - "id": "9ee15c83", + "id": "b6fd2b7a", "metadata": {}, "source": [ "### 🆙 Create at scale\n", @@ -432,7 +432,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9f0d27f8", + "id": "44046581", "metadata": {}, "outputs": [], "source": [ @@ -442,7 +442,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cc17414a", + "id": "23cf23c4", "metadata": {}, "outputs": [], "source": [ @@ -453,7 +453,7 @@ { "cell_type": "code", "execution_count": null, - "id": "849c03b6", + "id": "934fd9f9", "metadata": {}, "outputs": [], "source": [ @@ -463,7 +463,7 @@ }, { "cell_type": "markdown", - "id": "b7385f02", + "id": "c5045115", "metadata": {}, "source": [ "## ⏭️ Next steps\n", diff --git a/fern/DEVNOTES-COMPONENTS-PLAN.md b/fern/DEVNOTES-COMPONENTS-PLAN.md new file mode 100644 index 00000000..9846e70e --- /dev/null +++ b/fern/DEVNOTES-COMPONENTS-PLAN.md @@ -0,0 +1,276 @@ +# Dev Notes Custom Components Plan + +Plan for Fern custom React components that replicate the experience of `docs/devnotes/` pages. Use this when Fern enables custom components (Pro/Enterprise). + +## ✅ Implemented Components + +| Component | File | Status | +|-----------|------|--------| +| TrajectoryViewer | `components/TrajectoryViewer.tsx` | Done | +| ExpandableCode | `components/ExpandableCode.tsx` | Done | +| PipelineDiagram | `components/PipelineDiagram.tsx` | Done | +| MetricsTable | `components/MetricsTable.tsx` | Done | +| 4hop trajectory data | `components/trajectories/4hop-example.ts` | Done | +| SDG pipeline diagram | `components/diagrams/sdg-pipeline.ts` | Done | + +## Current Dev Notes Structure + +| File | Content Type | Key Elements | +|------|--------------|--------------| +| `index.md` | Landing page | Intro, auto-listing of posts | +| `posts/rqa.md` | Blog post | Hero image, code blocks, comparison tables | +| `posts/design-principles.md` | Blog post | Hero image, ASCII pipeline diagram, code blocks | +| `posts/deep-research-trajectories.md` | Blog post | Hero image, **TrajectoryViewer** (custom HTML/CSS), code blocks, expandable source, tables | +| `.authors.yml` | Metadata | Author name, description, avatar | + +--- + +## Component Inventory + +### 1. **TrajectoryViewer** (High Priority) + +**Purpose:** Renders multi-turn research trajectories with tool calls (search, open, find) in a structured, color-coded layout. + +**Current implementation:** ~100 lines of inline HTML/CSS in `deep-research-trajectories.md`. + +**Props:** +```ts +interface TrajectoryViewerProps { + question: string; + referenceAnswer?: string; + goldenPassageHint?: string; // e.g. "⭐ = golden passage" + turns: TrajectoryTurn[]; + defaultOpen?: boolean; // For collapsible wrapper + summary?: string; // e.g. "Example trajectory: 4-hop question, 31 turns" +} + +interface TrajectoryTurn { + turnIndex: number; + calls: ToolCall[]; +} + +interface ToolCall { + fn: "search" | "open" | "find" | "answer"; + arg: string; + isGolden?: boolean; // For open() calls with ⭐ + body?: string; // For "answer" - full answer text (supports HTML) +} +``` + +**Visual design:** +- Question: blue-tinted background (`#42a5f5`) +- Reference: green-tinted, left border +- search: blue accent +- open: green accent +- find: orange accent +- answer: green, full-width block +- Turn labels: T1, T2, ... monospace, muted +- Parallel calls: grouped with vertical bar + +**Data format:** Accept structured JSON or a simplified DSL. Could also accept pre-rendered HTML for migration. + +--- + +### 2. **PipelineDiagram** (Medium Priority) + +**Purpose:** Renders ASCII/text pipeline diagrams with consistent styling (e.g., design-principles SDG stages). + +**Current implementation:** Raw ASCII in markdown: +``` + Seed Documents Seed dataset column ingests documents + │ from local files or HuggingFace + ▼ +┌─────────────────────────┐ +│ Artifact Extraction │ LLM extracts key concepts... +``` + +**Props:** +```ts +interface PipelineDiagramProps { + /** ASCII diagram string - preserves whitespace, monospace font */ + diagram: string; + /** Optional title/caption */ + title?: string; + /** Max width for horizontal scroll */ + maxWidth?: string; +} +``` + +**Implementation:** Monospace block, optional syntax highlighting for box-drawing chars, scroll on overflow. + +--- + +### 3. **ExpandableCode** (Medium Priority) + +**Purpose:** Collapsible code block with "Full source" summary. Used in deep-research-trajectories for `openresearcher_demo.py`, `prepare_corpus.py`, `retriever_mcp.py`. + +**Props:** +```ts +interface ExpandableCodeProps { + summary: string; // e.g. "Full source: openresearcher_demo.py" + code: string; + language?: string; // python, etc. + defaultOpen?: boolean; +} +``` + +**Implementation:** Wraps Fern's built-in code block in a `
`-like collapsible. Uses Fern's Accordion if available, or custom collapse. + +--- + +### 4. **MetricsTable** (Low–Medium Priority) + +**Purpose:** Styled comparison tables for benchmark results (e.g., RQA validation loss, MMLU-Pro, GSM8K). + +**Current implementation:** Standard markdown tables with `:----:` alignment. + +**Props:** +```ts +interface MetricsTableProps { + headers: string[]; + rows: (string | number)[][]; + /** Optional: highlight best values per column (e.g. bold) */ + highlightBest?: "min" | "max" | "none"; + /** Column indices where lower is better (e.g. validation loss) */ + lowerIsBetter?: number[]; +} +``` + +**Enhancement:** Could auto-bold best values, add subtle zebra striping. Fern may already style tables well—verify first. + +--- + +### 5. **DevNoteCard** (Low Priority) + +**Purpose:** Article preview card for dev notes index—title, date, authors, excerpt (content before ``). + +**Props:** +```ts +interface DevNoteCardProps { + title: string; + slug: string; + date: string; + authors: string[]; // Keys from .authors.yml + excerpt: string; + image?: string; +} +``` + +**Dependency:** Requires authors data. Could be passed as prop or loaded from a generated JSON. + +--- + +### 6. **AuthorCard** (Low Priority) + +**Purpose:** Renders author info (avatar, name, description) from `.authors.yml`. + +**Props:** +```ts +interface AuthorCardProps { + authorId: string; // dcorneil, etramel, kthadaka, nvidia + name?: string; + description?: string; + avatar?: string; +} +``` + +**Note:** Fern may have built-in author/avatar support. Check before implementing. + +--- + +### 7. **HeroImage** (Low Priority) + +**Purpose:** Full-width or aligned hero image with optional caption. Used at top of each dev note. + +**Current usage:** `![alt](url){ align=right width=500 }` (MkDocs Material syntax). + +**Props:** +```ts +interface HeroImageProps { + src: string; + alt: string; + align?: "left" | "center" | "right"; + width?: number | string; + caption?: string; +} +``` + +**Note:** Fern's image handling may suffice. Verify if custom alignment/caption is needed. + +--- + +### 8. **ResourceLinks** (Low Priority) + +**Purpose:** Styled "Key Resources" section at end of articles (numbered list with links). + +**Current implementation:** Plain markdown list. + +**Props:** +```ts +interface ResourceLinksProps { + items: { label: string; href: string }[]; + title?: string; // Default: "Key Resources" +} +``` + +--- + +## Implementation Order + +| Phase | Component | Effort | Impact | +|-------|-----------|--------|--------| +| 1 | **TrajectoryViewer** | High | Critical for deep-research-trajectories | +| 2 | **ExpandableCode** | Low | Used in 3 places in deep-research | +| 3 | **PipelineDiagram** | Low | design-principles only | +| 4 | **MetricsTable** | Low | Nice-to-have for RQA, design-principles | +| 5 | **DevNoteCard**, **AuthorCard**, **HeroImage**, **ResourceLinks** | Low | Index page, polish | + +--- + +## Migration Strategy + +1. **Create components** in `fern/components/` following NotebookViewer pattern. +2. **Add CSS** to `fern/styles/` (e.g. `trajectory-viewer.css`). +3. **Extract trajectory data** from deep-research-trajectories.md into a JSON/TS file (like notebooks). +4. **Convert MDX** – replace inline HTML with component usage: + ```mdx + import { TrajectoryViewer } from "@/components/TrajectoryViewer"; + import exampleTrajectory from "@/components/trajectories/4hop-example"; + + + ``` +5. **Register** in `docs.yml` under `experimental.mdx-components`. + +--- + +## Data Extraction Tasks + +| Source | Output | Format | +|--------|--------|--------| +| deep-research-trajectories.md (lines 141–174) | `trajectories/4hop-example.ts` | `{ question, referenceAnswer, turns }` | +| design-principles.md (pipeline ASCII) | Inline or `diagrams/sdg-pipeline.ts` | `{ diagram: string }` | + +--- + +## Fern Compatibility Notes + +- **No React import** – use automatic JSX runtime (see NotebookViewer). +- **No class components** – ErrorBoundary etc. not available. +- **Server-side rendered** – components must work without client-side JS. +- **CSS** – add to `docs.yml` `css:` array. +- **Pro/Enterprise** – custom components require Fern Pro or Enterprise plan. + +--- + +## Open Questions + +1. **Dev notes section** – Will dev notes live under a new Fern section (e.g. "Dev Notes" tab) or within existing structure? +2. **Authors** – How to integrate `.authors.yml`? Build-time script to generate author lookup? Or hardcode in MDX frontmatter? +3. **Trajectory data** – Manual extraction vs. script to parse HTML? Manual is fine for one example; script if we add more. +4. **Blog index** – Does Fern support blog-style listing (chronological posts)? Or do we use a static "Dev Notes" page with manual card grid? diff --git a/fern/DOCS-VS-FERN.md b/fern/DOCS-VS-FERN.md new file mode 100644 index 00000000..94e05195 --- /dev/null +++ b/fern/DOCS-VS-FERN.md @@ -0,0 +1,90 @@ +# docs/ vs fern/ Comparison + +This document compares the MkDocs `docs/` structure with the Fern `fern/` structure and what needs to be migrated or generated. + +## Prerequisites for NotebookViewer + +**Run before viewing "The Basics (NotebookViewer)" page:** + +```bash +# 1. Generate Colab notebooks (from docs/notebook_source/*.py) +make generate-colab-notebooks + +# 2. Convert to Fern format (JSON + TS for NotebookViewer) +make generate-fern-notebooks +``` + +The NotebookViewer page imports from `@/components/notebooks/1-the-basics` (the `.ts` file). If you haven't run `make generate-fern-notebooks`, that file won't exist and the page will error. + +--- + +## Structure Comparison + +| docs/ (MkDocs) | fern/ (Fern) | Status | +|----------------|--------------|--------| +| `index.md` | `v0.5.0/pages/index.mdx` | Migrated | +| `CONTRIBUTING.md` | `v0.5.0/pages/contributing.mdx` | Migrated | +| `concepts/*.md` | `v0.5.0/pages/concepts/*.mdx` | Migrated | +| `recipes/*.md` | `v0.5.0/pages/recipes/*.mdx` | Migrated | +| `plugins/*.md` | `v0.5.0/pages/plugins/*.mdx` | Migrated | +| `code_reference/*.md` | `v0.5.0/pages/api-reference/*.mdx` | Migrated | +| `devnotes/` | Not in fern | Optional – dev notes not migrated | +| `notebook_source/*.py` | N/A | Source only – Jupytext converts to ipynb | +| `colab_notebooks/*.ipynb` | `assets/notebooks/*.json` + `*.ts` | **Generated** by `make generate-fern-notebooks` | +| `assets/recipes/*.py` | `assets/recipes/*.py` | **Copied** – same structure | +| `css/`, `js/`, `overrides/` | `styles/notebook-viewer.css` | Fern uses different theming | + +--- + +## Assets: What's Where + +### docs/assets/ + +| Path | Purpose | +|------|---------| +| `recipes/code_generation/*.py` | Recipe scripts (downloadable) | +| `recipes/qa_and_chat/*.py` | Recipe scripts | +| `recipes/mcp_and_tooluse/*.py` | Recipe scripts | + +### fern/assets/ + +| Path | Purpose | +|------|---------| +| `recipes/` | **Same as docs** – recipe .py files (already migrated) | +| `notebooks/*.ts` | **Generated** – NotebookViewer data (in components tree for import) | +| `favicon.png` | Referenced in docs.yml | + +--- + +## Do You Need to Migrate/Copy? + +### Already in fern (no action needed) + +- All `assets/recipes/` Python files – same layout as docs +- All MDX pages – migrated from docs +- Favicon, logo – in docs.yml + +### Generated (run make) + +- `fern/components/notebooks/*.ts` – from `make generate-fern-notebooks` + +### Not migrated (optional) + +- `docs/devnotes/` – blog-style dev notes +- `docs/css/`, `docs/js/`, `docs/overrides/` – MkDocs-specific + +### Recipe download links + +Fern recipe pages link to GitHub: `https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/...` + +Those files live in **docs/** in the repo. Fern has a copy in `fern/assets/recipes/` for local reference, but the public download URLs point to `docs/` on GitHub. No change needed unless you want to point to `fern/assets/` instead. + +--- + +## Quick Reference + +| Task | Command | +|------|---------| +| Generate Colab notebooks | `make generate-colab-notebooks` | +| Generate Fern notebook JSON/TS | `make generate-fern-notebooks` | +| Preview Fern docs | `fern docs dev` (from project root) | diff --git a/fern/README.md b/fern/README.md index 5ad2221c..fcc1b664 100644 --- a/fern/README.md +++ b/fern/README.md @@ -14,6 +14,12 @@ npx fern-api --version ## 🔍 Local Preview +**Before first run (for NotebookViewer pages):** +```bash +make generate-colab-notebooks # Creates docs/colab_notebooks/*.ipynb +make generate-fern-notebooks # Creates fern/assets/notebooks/*.ts +``` + ```bash # From the fern/ directory cd fern/ @@ -25,6 +31,8 @@ fern docs dev --project ./fern The docs will be available at `http://localhost:3000`. +See [DOCS-VS-FERN.md](DOCS-VS-FERN.md) for docs/ vs fern/ comparison and migration notes. + ## 📁 Folder Structure ``` @@ -146,6 +154,44 @@ fern docs deploy - [Versioning Guide](https://buildwithfern.com/learn/docs/configuration/versions) - [Navigation Configuration](https://buildwithfern.com/learn/docs/configuration/navigation) +## 📓 NotebookViewer Component + +A custom React component renders Jupyter notebook content in Fern docs. Use it to display tutorials from `.ipynb` files with a Colab badge. + +### Workflow + +1. **Generate Colab notebooks** (source of truth): + ```bash + make generate-colab-notebooks + ``` + +2. **Convert to Fern format**: + ```bash + make generate-fern-notebooks + ``` + +3. **Use in MDX** (import from `@/components/notebooks/` – inside the components tree so Fern can resolve it): + ```mdx + import { NotebookViewer } from "@/components/NotebookViewer"; + import notebook from "@/components/notebooks/1-the-basics"; + + + ``` + +### Files + +- `fern/components/NotebookViewer.tsx` – React component +- `fern/components/notebooks/*.ts` – Notebook data (generated, in components tree for import resolution) +- `fern/scripts/ipynb-to-fern-json.py` – Converts `.ipynb` → `.ts` + `.json` +- `fern/styles/notebook-viewer.css` – Component styles + +### Requirements + +- Fern Pro or Enterprise plan (custom React components) + ## ⚠️ Common Issues ### "EISDIR: illegal operation on a directory" diff --git a/fern/assets/images/deployment-enterprise-gateway.png b/fern/assets/images/deployment-enterprise-gateway.png new file mode 100644 index 0000000000000000000000000000000000000000..8b337406d3b11485b3360888f9818532607f8ea1 GIT binary patch literal 59037 zcmeFZ`9IYA_W&-DE?EjG3Z)2HDqGfMON@QVGNiII$TEyAgi4}h$-a(d>^ox!Nys|( zF=NZlSjRqmU*o<%_ug;!{_y<+zVmp@%xis~=kuKNJkPT}-)d>7&{DHflaZ0psyS9i;V1)qOF30ma2jRr`Ag+D_aLkGO}AAeIr3?!{*sYw z#mk0%H(RvzZc!I$HD6cOljwTCRoAs=?FTOMqo=VamjVmzkkOtF>8{1~-*~^=L`b~r zxXG_?es5yA^SjiQ(zN52&14?V;a4#$UPW`sL!ZtayqFu9l z6$nJe2OOUzqd3J%MhP690=}}R*#0_JK6Q)i^vQK{GO|!xGK&8_qXGPrK9Rte^v<9E zr$2>|Q2}QcfUjpN`Tspl5ubYc|BlJ+fO}-}x(ceQz`ySEmzI_gR~sic%1Qy!g+H7h z8M%^?UA#&9o>JAhu?Dn1WUKeY?FmR-^0||vpv4QPXO@DVj?ScZ$fP|bfkQ`2Hw#Wr zM+bzpT#xY^5Ge*)6tRB(D}$tfl%EGT?kmYS23Q~Ko#D@h$i z<^R17oXK3badUH)6cU0$p@PtRf=(~3h3?$He_u%WuF&1P0>Bdju3iu~3r_)vE7zZm z{9ikYmaflV+B&=0Izc!|?OHr@a(9!tew_s9ug{C`7s=bVpEF z=&!bcSEWf;CADllEgg&$Z5;ue0Wf66?np?VJpZpde?$Don@|3J^UfXd|JSSkxb^$1 z`mUBQ6`ULam~OIv)9Zh5|9SI&FG>rMhW-y!{E6q2s{o>9silSfBAP7q>A(OIPhGK9 z)YJq10WKqbPKg3PxBmPm9oyF?sbsE`kv$+&Rg~BBJhhCaieuDg3z>|-g)wQD&^*zi zP`aJ-=9$nNTJOdWSDBHohKZES%cuYpR9jwKyZ9NU5@pM~cW#Qi-BQBM%MPUp%iIa7 z8&`x>#lFD09X#f+uJyT<-$XO6y&@w&%lX@nmTG_<1t^;7g$D)01F}=U{b;^Qeam?2 z_p87|oLuA-pvx97uKlY)pnVEZ;_3goe)&A7P5W6c8u@?0p17(V{@pqekJp7Ll0e2**{_FbP>koQ9pI5#Z_#Z7k==pa4 zH@uykMxH+v2!AhJ%J3W7PEG^4yZ!@lWC8EbGKg5d{uul_B}gqYh`7-G*Yy{QuTpur z-ruJ9j}`$Ex&8-2|9=wwe;^TDq$^-uw_g3Z9k9t?<<*hq9B8afYcekYQ^Hu2T`8{erIk@ z7IMEe#d$p!Vmf_1PtSg+Dln*zjiD0pyV0{lBLhDYF6#k@88jZ~nm z7mxkcyr}-gy?opchCdG%Oa#HV=h)*tyS9u&r^D2gqP$?Dxtm8&?a46ce}ys$?1Oy zuFn^D58J68H#~V!#AMo|=mftm6*TfM>^-~wpn$t9kUIXm=3d{2x|PznL^Fpo|7wgW zimW1@`nJupVbeRl2c<>7v*kCnSE;#Q1Gz&wTb9_~7yOR9)M1KuzPqOKv`Jhxe3ee- z+bq8kAi(nvfRBwzZoFr}1z6!;wlWC3*-Ic=`nv@bEng{uI3o~M%v ztFSYu^0E_(Qe-2dNG;q?}%^O!@FqfL#US?{yiNyV}SLJ0#qV>XXF+z12_fA zEA0Bqnc0e-VhYCFJjR^w*^lJp4&)i3^9&IXwK4x=V%6}hkg-h)DB)SR-&UnBaZ~TW z8=qh8Ad3n38)FZm0bHCH(O>wZwH;Wm0({QrtTbinCluIZua03xtIW`>L-tSI?FK4Y z2N1EN!8h^IO2-1FH8dB8-;dk_Plyb%d;6CY@6V;^pf||>L9yW5Bvk1Am=ixBv2Uh; zR%f2k^giu!Pk~3JulSNxC##qk!R~RzLTme>Kg(!@|B=-Y>hKMUK;YRPpK8hN>T>%B zm;I_Zp@~__R|jbTN~G$kfv3hLs`uD>5D|{Gv$cb%8rTbALlN)&2VC0*UhMBQ9Gl*l zu@kE?(;#5o;>E{AR?H!1u|J^Z6;jG_a|17_8@gTpAxfyVNo37s=(gYf10d@olhk;c zcLtErF~RX;PmCu)%m8Us@bof`Y2)gGKhZ+de>cW=i+CeJ)W48LB3DBfHCo}A#og(c zO^Y0!brdU}rA?sNTxc5$rdj^zQZ(|zrO22#>W;eRx!;>%`O`>v{OF5#tCdjP9P>q? zJ6z`fSdAk?Pblr>Ia=BVAOMglB8rgIrMB|U+NM(GkAl!CSFn6Y~Ai# z%ycOm7&q_rD3|Spawim7-UC08J!Ci6Ule?B9+;8LKb$tj!(mMjw`25)Hq*zVMdmhf zQDzyKhn^OZF~vQEXZy>otIMCV@s%7$yEn7aoTw9xi+cJ5@$R1;N35DZi@*3tQ*PCn zU_X9*R3;;NM9W4Pp%8k_7s;W`D{LvwTL$HyM&r$FZ((1r z)|VnuI3`1uh#J?{-A;Y^d%kjJpWyhu%qgS$ByO?~PxvZ&ieSdf9DrV zYIwY;%`d#o&^Lpe@g6&5;MAnf6sv*wOjU}*1S53BS`JjARJbNV`?u@}@B!3PzE$e$ z93Rp#$}o|cwt#2%&OhKZAgA_`%&+`FD|Prj^-UFw34HX^ce{yaHD)9s$10q)@-h12 zpfv_tfZ0#VSIU6j#7k#?eeC{4*;fpQOP22SOI%OrOca-TV%51Qnx9LPFz8V7$$k7O z<5Hy=k5FcxSZ*#|fBEHxQ6sKU%#fVsO@|G=-M<%4PW%auT;x9BQnzL#7QesuH4^=xl$C{Q)(f#?lJ*z7qwb_ro1hx-iDwpsEsm@ToOp(U^<$ zLtFbL>BWm)EP68cCqYH{Pw)JA?B7MR)|%N zE*Xz$znHLU4OBReIyz9*NUARtmBNu_rdRz9hEChQ?KH3M!KG{j%j|X`w%6&S)H~F& zEA0AyesHls*MnmSKkBWopTF{3|nAWf1s3G&oMi6BK=shq*9StTi5%TH$J;y<#a7d_mx_j3axv1Zt)a ztm3{<@fw`T^3zav?1*t5T@;(-oH+A}!BsfV_lQbsS(wxUck>Y*kR9KlMJf5 z;$B&>f;Y=fgV{~j?~Q5rPrv}_2k|P~l>J9P?lYb&awBc6?|@}Bz=0;m zWyn(RuTaJPc|)PJ5t`XA53e?m?Rby-O%5pN(BBkA)f;p~uN*N7Zo}mFFbHE+fKKqC zhTR_am(9!ENXf@E7l}&^SEcQaW@*_l8f+dVUG_zDv;}O0JqjTaJ_ID{VvjEow7eL! zUpwjLj+)_pD*1F+X?r@QK^wu#F#E?}oqR9`SZ8PBr#v)DWk>>6nIaDvY3*zH^q`dZ zq0HU5fOYIVX~B)_1*4D`RJAy*s|HTbS^cubT=5RxD`E7@A9u{Eir~jj z{c@P|pN>SGabFxg%|ll#R$xrke0a3e^r#A3bmLq_`sSwSUmYeRuK^?kpgOd8wMvq( znolF*H2m$%Dl{KiHl_?`1=+cp(!&k8>z%|Kjy@ANcFti?(*_1Ory}7T+}%m20P$>TLTNJu(hbPL$cF@N2H24i!;^mEI24>SQH5*1YYXq0b$6 z5fK=3S1Yfgm5R1O%QGP3u0l4a^XeH7$DDi#Z)xH~JU2s+d&})>Ej(&7un;yUTLC}? zqE2qN3p+aK=B}}Ta^ZXjl>PV8hA^tl8;64y6=EKkULpwEp_?E!EiSb?8&EjBD|d85 z`ud6CcJjl(e=3kW5dI`7(%~F{^~gU02Kmd&*~Xl!2nI&YCW+)pB~h{^r!>bb{i~<2 zerh*uMl;X`9=U9O+3=ZL43eG9GusIvLk05>ZxVb){Wn(}E3Ej>P}YftKg;kRbG}bo zri+4aBMKeeuT!?o)sjz6XZu*t*Aw5d#fXp05|Lk*1hR;pzdkq0``#j?kM8W#)RF%> z+V@iYhyJJWr!(PO=xH^9MWAn;%?)c|R_?X8% z{b&M=wS4ss&V$zmwi5Qo;A*3V88-7f{Wb7b%lwoJ5&W3-?+{HwD0OHx5mLB zI}G|6C9|iCn4|VyMb{*(RHz!7V3~P}N*o@g$e~kQk4jkNF3;hgiGPP5%3{k8vo#v} z5I3`It!l}CdDF`b`5nv&`Cnu|U#t(V9x2B6C428z%r2#5r+-p772N$cq-oYU>dF5& zTc7VlL(Jm2!cQ`RN(EAB@CevW1!M{8rZANEB?QSkUxS}fshi-bJK6~3rc^*8d?jgk!@j-y5Aobz?5Typ~Rt!-F8bheXAHJsm z5j|EML5t!WF$rCABt1H!{um}%ChQnP36b2m4J@N7P6B*_%=Em`c2sW?%Y8_u?!!NX42^zrs(nUtD6o!z~hnzIz3u#*St;3_X8@ zMzHH1heFDs-2Mi3+Z#b}V;tqLV3~-uS$!eOV7c*{u(2eu#S2>^?*yNq7YbkQ)Dh}H z$MzvK5sz8+3s*wA-HkJnu2!r-a2ZW}*n%!6>T`a((=1gS{FE|TeqZi4!|0L2N6rfK zd>AvCIY|@C;ET3Gvn`sfBSu^y*)rym_wxHQYaHhuN;HHYZE&IM(HCf#6nZjUrv_Y$ zXdW5`*JFz$ZqC3gSwr^Q?<<`C8u~rFv{4hw=mY-9v!7p6y7RWO1c`Xz1%e{#<~ms% zc5_h+m&yWM$Lj}bs~5y?SO}%+1;S68VzV~4rGH{(ZWd1UO|_`ig<+j;4lO4EC4jiEN*X~LmTAA2vwcbd^GG{Crgt&dR&kKE!?0N-y{gWZC!R=Br6+( z#x6J)T;%t@mn)7fHsnxqI&~+-xEW-Qmn+w!uUiF2G+>XR29Hfr+EQ%=pwPBfO=O*- zwm)r{_e|N&$?~cyr@2}0KR|;}Iq1SM7_9`2L%fK*Y}=1LBF@W46?LB2BM$^m7VMWU zxDt}ziqoFlw-SrhcqzlNSeqc0p3!s&&GiUHyDtkU; z-=#j>ad(oOZ|%CgE@e*~lQeU1h0JEKeDuAnUWI$uYr6JY&quos~Au$k6 znI%?qn3`LO%or2&*zcmE*c3F~DncN)LS?3@|3HL(Tuj*ov82_W9_AICUo6$|wjWH@ z1_mtml1T$AvG~X8)DxMv7l@a1igK`GxIUz-*=9&MRtvR=<0}`WVmrQh->znH@9vI$9$opUkQzJhL%7*H zs&mL6TQyn+m~Lj8AuRtunT|M9wd+?z#5mRBa#&af9r7wHuNIIh?KyTt}eWD z=dad57vC6=(hiRXG;KWgT9-H1Mnp)jBOZ3=9B%cA{Q~|0vt#WoUqB)+!?iVGjp)N* z8m#2Ns;!r9b$JEiwCloAYb{KdIw6J*^+jUEL~0U$>;+9Vk#*nizI$0@0rHeK)(b&3 zqS>Vkof@^mhNF-h?xq>xL>cQ!aX05g%qkVQDBng4j(h6ovZ0?)QJy3|Dm|PVE9={B zVzn0Nz0smM{>4nQf6%C;!dAiv6Ky~UxhIx8)WZY^9l6K59@qUuhTj}zp|u=yD#KSQ zhZ*3K_Um0bv^{g-$S;J2%FrFt2A!OtFN@IJxIMUip?%VzgY{BDU5H`1G4#66%cg|w z#kpxvOFpA0<@=?A(vY2)MV)Y0j7ftB7*o{YQHgIyVBUorIRtXR~a~sAm`Zhxu|YJPu|80)T$4yu}9m9c0WOD9?hui z9QRp7a}b4d>y$DN{qC`p*Sp|~Q$r^xBXCv;61vzKkYm7s*(km=iqX&CoALGmz!~TY zs+6gaDnU8%fvAP(x2vIs(Q7p!O%NnRg-vP!{Zb*98=p8@rfVp^47mqQG=9?Y9Rz+L z+70j8ZNYL*7(z1whL)!}xMQD0P)Dr}WqGf~&zi#u zstr5bPB$Okv^PnZK+HgDeDK^E(2-HBC{)0LC5Z6Nh0Y?m?S|XymyXPiCk~1n-}7&$ zuu8HT@!p5%KkDQ`M`F{R(;V`V{ShO^z0W(cE*caHBk9_Pjd;kb^-3& zoLlyHNKiX~6#@Eui|;X(zEZLyV~0ev(rVw_4Q7lL_49YCPKrs zg=e(AP(GgoxDIPWn(}7j;}?@0y%$u?7_v88oD8MC8^m7^982~-=MLmfqH!|!QR^|$ znN;aco8w-Nj6M$`l%U*diw@iW zEUAn&eIRL*lA)I5G)alz8T2Y-8Oavh+BOMa6m?iu693Y5zJk8~VD#Yj+V~C!b5n!V z#NbmK*V2bGizVA#uT0cC^wDe912vl_N=bt@C7)2vgU5$0!)T5V?*0a+oLu3XUGiJ|UZD?u!GHKZkY^+~oYFWPh>ed_C|GKdkU0Uk4RU27{8|Mp#ecs-o0 z(6!$&7L!tl*Htp91k`UADoBZfb3mNKf!jye8t8P8FG$=*r#mKF*0b!}RFQ0P(F)E5 zaiM4-(72g*o1#+4D z>>PAgj?=PG$OjyINQkf=Elv;GhF8K_m3m_|dNOe;%@Gp!=9%Y&)Y2@|AK?8Aq05h1 zaIX^{4w1|c1N-`CyOu0^w>RoP2+mb>%dIt)Utlzx2%7`nDu&Zg^(VWeef8>i7!cdV ze!(G(x__s(a5?fOqkI&sT?W0H_f)7~YNe?aLQ`M393_Bpu&KK0tC2gM+5H;UCC563 zyFY8hwlUR=vqRIyiyT5*5gI}zZHwo#B6&xxkZkO`3e3cvi^#~$uWVGqbA{Gv}v}|CELgfgo6lQ?RGqqG7q!q+BV-{&ro23*6*J)JiP>6MYT)V zp4zzr4Ok$51ggZ;J{_cAu47R+p1)@*qc*S=`Ucl5M2G6o1=Jt1fER#zFgZRO#XHlu zbS|kQDCisa^;Zv~&&~%vVNpZ6kM$uO%RSqkiTD3CovTfIzT~xOXOkEh1X*$~ zbvJEvY6$(*Aai_F^blci^ZYQf;q2<2 zIw4l_oL3J)749OY3sgf1&*S;`^Br|)c5F7xo@_@1#;{~!P$+3u(u6l^lOZz#SXc4`fHBUFY?CZTVA*6a~?*1 zkdb>5T%Y3Px6(GT)GmA3tB-T3cvKSijOo?lp?)_f;ZRpIRRCO+6@V}6cq^?jKwO>ix zPj~{jya^#;cCua^FE>*;Z{yO4aF1pS>)o(&G)FZKp~74@k(`K}v+#Ig^Ck%=<^3x> z3;!b5qV>QHKiipygyEMS8a3#DSMAV>koWE0UCBS+yaL#Ud*){%M(0Xca90kmJ#1Bw zmppNIr}~_jGaKC(PGZ3u>QCv15Hn55DrcHC{ah6$-Fh?jl_!f8o8N2=Egm5`rf4g4 zAI*ppJ+)FZ!fu1#BY16Uo597rV?ss1(PrExD$OU}phIeKz+Y!AjV9@NC8mRtA5=&4 z$lr+Mi$|KGyRDG0+mrBX7UH$+ytRzrPD=$<2vngJurh3msx&-D^-@BE;Ciz)t&|Q! z(sI`)Gbf#u8FND4;?Kat2AnZ&1@0^L)3NkktSfe9iN8Wbl})w;pYy#cgekSfhg_2N z{jQI@dLR-`ehaIliDFI{xT1B-ZDeoJuTZ)7{nOI<`gz&`>Du04>Pgqulxx~OnaSdF zaxi=6Q+Q1TtDa^e%*)w~Pkq?RxedoO+4Cc1HK{Gd3l)?upL^`*3Fr)&T8@XQn4oK} zwOWIeuH~Yx?q3VtW#*X9$t;Gv?1~o`k5jzX>1u7pxaFs_>%8Hs5V+H<$@J}3q_*y> zT(jrQj)t|u4=O_X`_axkL#JEi&Wyri59t3OpW-pqEDd8v$}HsFJui}5l?dzH11S`W zZ1HL968YsWX>8RuX$Hqk!p)$0YMGJxlwIQPffblIf*=`wfc)5Eq%>v886)+R~5Gs46y4Py38WJ=iG(5SmW!i{# zbdq+gs(qDR2WLME<$%_^y9LJ%Y)#M3OBJ4ly~z2x+9tDIVTLc-9zEm4y3=hC^g;7U zsv%3<&=ohG9>zucq31afCb;pB#Py4xo8%VGfPzeAE)mYD^`}q%lpRWO8m(6p7-PF; z#PAW)Qy!0q+UhHnR5i|C6k7Q3>TSGWU@Lts*QJomNtX=S6b5xxY^)+GIq)quibUSB z*68e5J`a3%2Iu6JA^vYpy=EMfJ&(}up-R#Esb-Cer5BTVSVb1nVNDarz{w|*lLl=u zEWR?v*;cXkU0WgE%Gv0l-ZX6!?)xc>oos#}ltKC=3P19kM= zW(xjZega$lZ0H-lkmG8E+N?Njn4afV$7mNx|Dxv3)!aOWT#O}8G;4jf^SIEJJDBWML#Gtg$W<1T`xm_cu&=0>rawHzHZoJ}NGcuhbh>1uvnKBppMF3)?p z{72BYx~Ci(f_IdPazBJGj@+>CO;uHB*Qjx_DQ#1uWar!U?XcIRM29MHp>mS7dy$Tm zj6&WQNaVJ?z|247k(Z16&=e3W=iALD8T+^q?}uc+H0H(6%I--+ODxlgn%y4t8lo!t zES@P4C%o}%qQIk^W?S;Qr+d|Ll71dl4-W5rSW1kbfnVPJ2Hu`Oc-!2gP66uuD!{69 zhHWbO_344NrK8Q#KyLr|`3%@b{-WKB1=F3dRr|BHa=>)n~W;Yr&8r4E3uGxncnomunDpPC0%qRC{y2WeD(2Z*P3tIjs8A z>wQZFNJSgbe|X>7#XEr&p+UgRcaQvqfA+J(+1B&163>XQUHzx6BW4V({wNMKg$3ce8v{ zOxo0xWrdv8@d_=6st8CTQ5(cfY1bCNC~~z7o7{*=GVNiamFlC7Pe;Ax_SuQvq%+W7 zd~0`p;h~OzanJoap}glA$i(P@ba`D7iFaKHoIM9!z0|y@_y$(H(5g?S@dYsdh2Sq^ z5o^+K1K}BCNtiBUkGaaHO8B>3s)VoclGEab^SdVEdiLq4RXg!LXxV1=Rgc}vmBcd% zF!MY=v>}>t#{f+snrF!Gvh%exBzCVub8GjTZ7dZfQJ+n%;#E2*peca`cWI1LLjgs1 zsouxN%5$H@8mu78f#qJU=J}rc%>CPP^zGE~TVBPv2+fK%))fcB#ezC5y+)gRJ6=qV ztOj1oNruYim2QD>8~W=6vmZ=ED9O5gdiG4fkz6`Ob>H-e!3Rczk$8&&oRI*wPey{Cyg=G*_1dDtM6~c zsMQ7bHGZ$=$vp!aJ%e3uFEY7{0k7?N$fI}gcuQv6$<*BZFq~e@RXS_4mok=JSMJM{S#Tp?_1_$B-cR%g*^MXDJzPcmh%YP!=|gP+I+`bL!w)bEbyg zkDw2?FkC$JV$5Adk96O_g$0r-)A%)x*4knz%5o+=N{t`Q)lBuV8Z3%4ha0TV+W`G3 ze~F$ITO6A~Rp8yBsdb~wKQ$_Hc|O6$b8qe^qqT|wm8n@{xZbcQ3SMVv4Fr$}efgY{ zCU=-CmkM1b;mCH|vsv-)su1Ft57VG$#ujOVuPnjjudc%63mV$vw*eOy44o?;1ryD^ zw1;8AUODFo>gN*F)i|i>-j+XLpDr*@>6WVb`g0ZKNwmnO3SQyxKBi?``<8@rI}9Ra zAw|ZYL^kZKsdY(lYp+|WW+21rXN8kA+AATl`~E3}D(I>FG_l2nObgRqgARhSaYjo! z&N+e>8|)!76!ef~#n0pyet!$=B~)Tct{^AY7bzMC-~HRX z)kSUIZQH~zy!(t$(JfDPej6OSu5^tOG%1zNOX0JM>FyZb{Nx037L621>~jJrUDu<& zsTc&GXSm{DGui5L8bU)IXb%L>TK5X-098%!Fm6#AZ2A#G=pSy9*;k-y?pRM*O4I>g zX`l+Fu0}*6<6(UZUC`W0!1vsu?Ej_ugLbLqxv}hsm2u017f!Kzu=3D2ITyfL;kyb1 z(=&2=8shD|ZFf!c3hGjMO%KP+;dbo{AHpQL_syYu=uVo$=~y81Y*cp%h~s-g*|)0F z@>K}i_oo}#9;?6nI*Uno7lv4Hbw|ka3|rY3u`u1CXA#~B^IJNoOjwc`woD>`wfA-F zyaq5DqRHtEdDiW!GE?78CC0XLJg^-Ragfqfvs`NAq2v>i+X#7(B7Xk(BrhrR#)BRv z;zINVlS6K5==~4lgC$C&c?h9!gJxj%x9A1i(@iERl?qiN@wc||;wfJiC6q#GZIfXY zi=Hf_d=&Y~8k1=dm^q8;B-!I4FW=0uB|j`9vAWlM+5J2Vgf!b?sT4%x-Oj@&6_3m( znmg2Iyhx-$D%g%k(aNc}WR=S6dZ9Q(${)!`jiygnJ6OQ=G>qSfLNyU5z3!bCvGwZS zMoLyA(_kug!^0e9Rw0JbTV6);Q4hZ&u~!ccw->FaS0oMPmJfa;Eo>ci5JPLb%VThv zU(Uv-%T(yeEs&fI;x|`Rl2n`$<)nAj^?J7o3q#TJCir;)e$yR?9$6KPAGp@LC}j@T z=UAfQZD{Vqwz{O4W&uy?&rp>~=+{$wESg0M%Co)v(O0adaN3=xhrGsgZ9B@JWa;OT zGJ_&hu26vV*H@Y(f!N15g^0S6O#GoSZ!I!%@$NvL_Pw*O$~9}B9?plVdUS4bIV^HC zj22M2A8mzN4;ZRH7h1N?K9l(IvQajE#X?FfA(<)cV#YkQ3QJ&ew30FEu$gRTcGC&*X;_*SPP% zz;Nx*ap}+2(m=Sb$7nb07YruBJVjT^)XT1kj336*_yQo>!Uy|uLQHmdA~6&k=Z4uD zfp=d!rt_~wn)5*i)oUN@d_T=&=8$9>&MOM|WR^VGyeH%E?Af@NeoKJ=LF{nN4;L%s zC)rh-kE&;xT?&4VukJfI{YtKY{oYD{BQuxbq(=j<;=t8M(1qffolh_+I`5e~$Buvc z?R1z(85)n*^(^j^LR7~TdQr;I`y0)dv3!1Y{2pW8VeEw$9}!a@RX124S5Mk3s;1I} zec7}^ULWWPMK3&4XDR0wwVkDut$pxTjFz!iSem(EK%i+=a$DL2F~f&@vG0tTr*@%z96g}Xgjc&nI2uc6K5(Vq(^iqJ}q#fE$-+TZ44CM!vy3IWBw)#ex z7wyL@h+0AXrCg;Yb|uhOM|bGB=3`Hqz4kMpFczW8>DBu0xG&OoYK>Rz3C7w*=Ngn& z;RYXja_YR?aQIbN_$`WP%TlIx;xgl-AxXO4TLi5Uxid)^@|@Pi^Hj)*ZUaVhVeAXKb=vqo^wfQSf4 zq5ly6YAq?ui4ekN=kI=$)9EHp6ocFX_j95(jAQY#*~p&Ug#!g>%T`ba7K^20B=Bu= z&vKmj+%iu>a;lniI8r|M26*BD%dLfHt*cpSWX~dz;e>bL2{5SaQm@~=)xd{P^34Rn zb~l=xgK@vMsUhixbo+A1`5-Yj0Ci}|d77dVGF zOQ7qad>>MmyU+>}+8Nqt!wcWtL*^VhEhqp)aFs2c52?w3o-acEze<*!L z1mp_6D#hKHglIw^bRWCR82h<5Z#M}fIdexpn^-iu#^OJmCRleM=avE5#V_fwBp-R? zaKE=}&S`_CHbt&ypo*H0bHv}lvT~ko1xH}b@^D&`2kA4)_96?Sw2jN&s}*pmwLOqU z_TUmdW8V_1@`(3t%h3(obFVb*w{K&8wFjm18Zyj$kHAfzf)!cc5$1VgVRJPXy~l@H zR`TXzF4hjV98YJ1tsKE*IdJlx>}oSB4$Y)x-<9Kx2V4GBNe)tw>Mu}!Pr?>2|Jd5x zCP&3#_QO-pMLw@(t8HDP%h{?AyuduRZ2}ME7SL?fQmAio+=}J!y4GIRaQtF1ct$+f zhYwP(l3h^OO((bY;o@mImryg0jSmO=Wk%`g8%f}v{I8lRN`7h=MU)!4*-7CR*PpI9 z>jXKcU6-oxs&aceNnTIYaj973^xV+G#(0m|m`}fvnE&KMT_QTFw<(`*5HSH^B7X1(xS$JH?+TxGHsko8JV3-%{uGS zv7|Z52QnG8Fj;k|eL^c9tg{Jb$dYo;W$9OBxw0!~z!W3Bg1?Y0DPadg&AHFThtvg| zcma{vaV$*F-%!OmX`+ZYUQ?^1G6u_q&LP)I`@Goc;`_cheOmgM_4W2x-yhcLEMMQ^ zC9^HZorf=1Dja~C5$l2z$8p4}ozY=c#+Ap0DnFc92X~u{j$N-!^a*Pe`QIh5tscwG z9X@w7aByU+HM6l7q&eEE|4d$$`%nc>k=9C}L*r=e6OboeX!wD%fr=9Nr0#LlmqLQDMPkn=kj_Mv1)$st|a zN%H=9S(r+-`WeuEV)b;~rusFrbT}LBk7^)S7UzsnMEguHo+7vrUL95{C2V_So506^ zAzY^mR8u@rmxiGAwYdRJhLp_+QS<$pPq%IQwjvVmR)4dOk16nC5ilFoEnAft8tw1F zQm)>|KP0(mfZW)1Gli~_l%suYO4he&#%wTzx{U~b zRf@^-RU>HWuE1%$*(Wt;Wt6++jyu(*Y-_0TA`bl=lJiCUrsZS);VLKMryt0GIOGrP zuX`rNEE@mJ+h$l8hz_N_3i*&T7chSMLDi_N{Ws?!uIFB0F4_gHB!+>bC%xd$2*VJV zaP+`d#`Cl#{Y;uW%*z8n`I0Oid9Q583o7$oLl>ZX(%^AzotMbgO5SPB zbNIrU?$~8*l+hV!Kb7372p|!7$2XnTdPbCaNUn|YaxeRxwpraCsr^F>d965z(*;^v zHrL5;`6sE!3ipP?&lBD^qU58tVjd=I_zI5fUieO7*A~c)$~Kc+EQreIo!6O|5oZo**q(?g@Lj4njQu2=H-x)S zKdxuhyJoYRF@N45?N^!GQ3#orO)^8DFX>FLzJqb(7`}$-s=>lPVJ#LECFNZeNclN$ho}JIoGT2 zZ63?QdNZCk+DnfgNc~5d=G59U7%RtS8H>-CFRzaK_~bc0tB&J~n1rJh=U+r_d%Yjn!JxAli}j({Oh%=qh+} zN#DRmJCEvPj8F#$xOlG+fpI^Ww~nGSM>Ci+r_1)yuI5iChbEBQ4FuFScXHrS`UX9j zd!p~PmA>n@^40zXJgN(#?{{+?p*z%H58R`-YR8$bd~mt8!@yFkNka(P7^j!Z!+#8+ zKUjHB<+N7sL@a8K$ZLbUsyTi}L_tSS4~32ku1m9N^wOjQxyN5gzPh)B4NAaJef40G zytjLxfA02sUPO&~pc^)FC!8JR<$rdB6sQe+pvJ@vH!{HFqH3LSyws$_wloa|0GHH|3=9qN|mwtn5)ayic^!T)@KY<(3hg%RRzK zFxIhD-$s0_Nj}Q&hU3!FyF$yvxu#Dm*q%ELJ8%4+5NGhh(A|O{)Kod;#x%dW_N*&` z!K96FVPna*(?w}0qawYDCUSBS;@K++`H>jnN)(00Km&?u4SLC&C-?e%*{@1wuRXjL z5-<2hU&5$)Tg2g!MI*F9sEF+|zV_}N)VnM85>bItlwLGrYdrVu;!+-_>vi+5U`F{1 zr;eOfRb0QL)Ez}#ZzL^T=nLfjk~f+mMjQpbAL|-v|hiov~*dWuMWNXh@H_Q zcC&HEJX?Q>Z)C$n#O}ftzF%bjw()#m#tYE3J;?%NOdjM%F*R)I-YU%wF=Nif=8M}q z9PNf1i`&9?29Xm9_zoyX$=YYrZ}CAm2nNFWGOPd_G*X3}73MJrp~5s4o_RBV@N79# zv*7p#8x}k>djreZIpkG9SSe51sj9>mZ7hCVn;qitt~&g1adVp`JxyfsPH`;K_47k+ zY?9!Zn|u-SR_7n!OkPEp(}bhEW{^L&IKGwVy$6b+))QK>QDt;3DlpowQ01#PzR;@; zmhgJ0bu;h$Eqd%C-gy$fsT>ml=gtriCV7Jm+1vSHXiyt6(;|xOrmRlRWN^-3B51 zfSMz%o8+J|;m?f+SLZ^~94|2K4X&|cWqnG$^M6L1qV2X@SuCCJa_5uA(=Tm6)s<3? z<7CiC8?Tpvfdwr`Cb$fIs#}M)HC6NUM`Ci=;7W0pucX^`)Lx{utuhL`S0x}J@H z<|aJM1Y~?BeHB}REaR0u0t*4E5ghq+a&EJ7QpVwqJ zd~yN5lr=f|9?#m#O#f%zUbUHgy|!(O840sZFcaVi3W%jSo|N@$XjZw-zM4@fpxM@vZ>T@oS`%0&>z{eoeqHq0!GQ3!xB@Q=5}-ydpMBuqY@_JyM{HyIpXwRnOxHX^2AUk{Z94U zXK+2pd7s70S&{RV;>jf{IB(QXOz^>y9UdZZylWe4)S(O_#*PUiwLJ)%^l%Y>VMd@w zCuz2%pEqAlVE@!m6(f~E{YZWlQZMQ%lb|tTD~It$JE}2HZsXE_YyVJ>&nYmh8_W)49!9M^bhg-|f`YZ26>Gvx$CJbU^`{ zbwyoXiw~rPWEKGv#!ibles`MbN_R$J+Yy*l-w%~Qk%Ip97qFp$E$#Ehgb(Q_oPHuG zI3rh244FMYx-ueZIn1KSGc1CLn$GvEwr)hs9%m|5 zzSK2XZ5H$t92R?93`=(aq=Gp^-+FiRlNFHhjoA(4*s>MYRC(*%^e*7vp!5%KT&Ow6YKUNLMa*xYH;*{~pZg~HO+-FgN zIiNw~p~6Mq^xVj*alH=P5}H_%Un?Aj(x%X)1K&aQ)npHTF|$U(GviJ|eBK;0bI~Mu zfpfwyxV|QQ!T(tP2UEx-sVtBbkpu>621u*(A~S?Petsu)L;q%~R8PUkOG~!8)jNZY z1fO?e!5V6&LOh0QWA^QNP401r3xsV1<9HR2RQ6G8I|FLfEN>VcW zoA6hG86oaVBY@M;1|>d@Gp@Pju~5ExyxRg{CA>y$?@^dc*xeQA$m}zsK>xF5#s1Hl zm6TM6D?m3H#81&8bXU2VFN_2Fo#Px`f(ga|L2@&wIpzgsesQ%LVL>-kWy@W$ck)?J zV-Z%ezaJg{k>wv#;D_!>i_$XgBMu}VRuy^SrzldsHgaNqPxp*%8*DfNY+Jjxezk&R9bQjp% z16bofGQRzw6 zq?@B4C7~eQDvER?-Kex6NDD}JH+*X!ysrDY@B16i7~dG*ulIfaFg9nem}~Z&YpuDf z#M}n$&Vc8-obNPDQg8P;xV=lXnKO05@k&c5S7(yxp9^Ytq%kB8?e$l{9K0m^Vv8jO zE;V_79&>-uGkt)c?~Q>Qx||D$6R;U&eFn;(nlwB^{@q!bW~5>wVHo~=fnhYd4@8k? z>>M!;>ofVbIz=0hQ@i1hHQ9Y^g0rid%U{2~I8(0?=AgcdQV7lX1_z;@XKSLE z9K^|YpsF~joYjIaGXDVh1T+&BcfCFcwp?Q`;R|6ym}M@-sO18&+k+q_0HIHS7nGPW z@0rwQlvG`3aM-zkX(on2&{pS#49w_A=5TO_W@GPHvr#WWyx~*kpXV{l{fQ|D(7f~8 zsPPYGi@_bf4A~G{H#n-nk>y9c52qVVazxj9>gEKDSACHL7G}336cq$>X|hSGzPVOJfuRgU2*9sXy)mdi zmx#pBo`mr;9dVXUivvX#KR&UJd{qMnG(Qy?oOotw=jk;}zdKBCNLS+Ju3FjH8AuW8 zJ)Gjd`9uC$hT8jB5(v?$k1+SGg11LZ&x&OP-(${|>Qp%AxT>1#vzn#?MuRdqtksqE4F^ZkiboCm=RzY`$CJRL-?n zK7T7bou#m|gdP43i-GsODuK+Lkwqd)Yoa-flf_S=~=pnmL^76)ryV>7hQO->6f^ zcIjSibN^=G^QAYfDfYcS202=JR>!%Q%ud7^pMG2KH_z&*wvOH2RifYi?Xa_6xjMWX zSyONjN~cvxd^2B+bkW*c=uuvib4G;(tI4+pS~yA*Y5WSKRB>1#CRe-1ju63YY5O8I zVfMuFHKzykm3!r?)*%cPgD(20oo6XCIa))*e0B$O%e&dS0Xxno+Zt|Sku5bx@!;@0 z$>*LS+o2xe%C$Q=UkOn;o27~=+ruy-s)5w>8v$C+V5oh|6U8xLt+u8x4xxRXu%H;dHE<|A@$<(I`$8??PSq9H! zp&5o7pJB8oS0H6v(jU!Ejn;vq2sP;{$v8&TwLMV7&Sgo@Vvfi%%r{Mh*7_H-HQQ+F zJzR90lB)L`xW^VP^9J}_tFzM_o~wwrc{_@wzYx3KKz%unp?&j9j?1va6~+=vriwPL z+^W~R70)vKiD(`%X}r^JZf-V zpg-WDE7|*DitW)rl1u)RgA~2=vdG$S?{36wil*~qa<%p96_Qe~gG+QgyS1_*OF1!e znla0z1qa0Ru6ZAdB!iPQz;EO5}{# zRv%?mnzOr#Bslh4W!soScb`jVbGL^yKAK0*&~!`ud%sp$PMO#QlBX)!ahF}xRw$Vg zVJ!W|7-PDqUSmudOH$Vo(V5yaCj1TdHm7qPyzzTymwK%##hm#HNNZ1zLXzAJm_(Kk zIKmz{tE@!v8Qy};e4+R-CG9yKVnyuyyNFnRg1b?lV>uS*h|K8z79YIi9qvT%UjSK4Tk#9#PL7 zE@6GewyKe6vfr#SENiwn*gF4fOYY6y{`#c(@1rRXxI5Q~fR}=6Y=0)2>=@CsW4|Hd zhw-?O0%(xRo*-)(50LOa?l@quGx`Q_oh-?o{5Zj7yoGyhnVjgjR6I*cYRcdV}WJ%$^-N;w2yWb9GR*K z4yoOq%-VTQ^u_zwz5;c)Bch!q>mcGXM_u~eBB%NnSJE*-o*;b}L40A_Hvxmb`Tt8g^$IZ*>;2 z(_gI)hND<%7HamI{ZX_`9>LbN12)dTg5l@GHTh(z7=z{of#ek#_&jqd>F*+ zaALt<6H-0semCJ*k)Tt{jTb#M?p+;TIKy39O!PumcT&t>$ibvz$DMHB_8kSndO6sG z$>k*OglBAOZdR|bGWT;mU-^WEw#yW8^}u(6_+o}UvEwSI@h7yFeZ>kv#`@m}a@vSD zEB9tt#un$7RDy7fCy0QeNG-f`rYI;Nv&Hq1GCt|7a#AV}q$sMxM4O03w69N?n19jg z&??~>%opg0vYH!_E*mk<6Wu9ZcIad2&z1Jb(kwEu+b!FbuQhJO)0r5RwfS)D?k?F$ z8d*~!zgj*(bGn;CT=OKhOpjOMO{zlUjL?{&;Nr|mlt$ZV^Gc-s)N)WIQ~pkZW3eiK z^|f+(U14zJRq&*^o;_)LR8!f%XF#VclsC4vPCRApmDsH#p<(m!<{DyyArEe#ZoT)GMe6| zBv0bForYh}3bN{9+Rg<}y2Ki_b8E95pRsjpE~@k{=VsaFuht}6ZO(RV(x~RR-&m~b zPJ2-G@2)nRE;*!b8utrauIG}Z)N zcC2jeiRo|hl{~_EP;|QAN?fGfxNZ1JbK$s9Z!H8pzSe}@U+gBBAmNfc&*z#|&f)>I zE{A1*@tAg@YY^)6@h4it8@46s18tmxFs>qruhDUxi*VGu*>zsb{MRwgC-szDmpCTi zr^-aK;Gzaaqk2*~6BfV47*isq+g=%pUMpVZUIa)l+fQjWV(O{ry z!Dw>GBn`;M41dL}zedeB7vE!HANxEX6&40LQ~X-P9}q zMmKhw4aUniQo^|NPuMAm&dPdW>g(7`NUe{HKg-x-x>8C8*$@zQLvi9~F>Fl{2nvvI z-~jHT|4*C>uK$05s{m06kz?0S-m}FCU`m%o#R&$z|HHC)Fg)9yMs?eMF4EE{)XOXo z2k3;KANGe7DH$zBdz2@JSmB+4sY&~@!WwV5Vp&d8pOl+J30)A$J_JHQnwic5jWbFZ z-NB$h`U6GCnT#__z%Pi$Ftu`qX)AbT++w80B*c)+HW(b(i?9zeo&i_L1;v`qYcpbn z6(lrzuCOf=uBs>B7|$>ZGTk6h5;3JwJgd#a+yXnF7P4L+G?76r(7cwKuYj2!io3WX zQq95X@8vJUd?Cw0Eq?+qkUZJ8dQi<4{JeGfOOvTb#>swG)>lLih+}Vv(>F(-A%x5U zv*y^8bSuWQH4oC>o0It!4KPM>k?ul0XTHUm zKXsP#K(A2&0;Z2uYpd1 zZN~dqBvcB~Vv-)Z5Ax4-gDQG=eaDpA-V+I!eP0O}r)+2C9( zi|U}VEF~fc{n*hs<$M*iYxED60Mz=5gCF;LcyASy*!;bUaCy7Q>iUn0F@HEi4PIWU z!*$n0_97zaMVez~60;IUg8xBn?c5ywHa|m&cwe7a+5g`>&RmOwA64jbgdbmM+V=WPmH(3!(CNT~;QG|q z(fME83=)wb!SF4aSMe3pleI&Ul^56KyHv%abPLEO^=qm0%=K}w7Z~-fl zE!OWep5hK%(2Ccox@U{%YNb8SA3aBA1DD0n%k^iu6x9WUS;Rcv zG`(%r{lLmrame7g@!rePy0N+$M-*2j{t_yONtJr8R+{#gq;U+PI^bplQeF`n4fM)r zC7vWDt$@AQT`N#amFlf8pCK;ErMcshL~5F7XW$H|f+VThA4_m00BxiSFu|B~Z-x}TlX%>>(fVr>(SXDOd5 z3YdPw&y@*%p3u+d&H~<5pX*)Bd?Apt!^QdQlm!_wA(inuwjD~B8r}09PZJFMnojl+ z2D)(}O{nqrU=tx1s$2YD@;5?NUm~>8YqBp({UmUI@3`heWy6_y$c`D}CmbF#Bwrzu z0NwT&C`&&ganfn8525&s2)7K1v_U@?nD=qj+6xC3%Kl%N;*RuanOf=1)}fJ0ME$|Y zPhf$Wcds+R-ca4yl70~K8vvD$0ZQ!w(gpUgo61_}i;V0Zl-K;|{-^6F36gAbv?Gwu z?>&ZEa4r_I<&8!DV4{oq&!F>J%r{{{e1^rWI$wUzxaO;4V69v2eoUNvC4vdew{M^L z^P4E`9cS{lqmxo>U8H;o;ed|(5lfflL|T}zH%`_HK`HZyR@f0qQY&6Kir)e(FJ1g- z$@3vW)VB14;kvcYpM)v0p{+>{&U<6Cf|9BV{5246K^fU>3HVy;&?ixfY$7z2nEk$K zXaVQCdas{Ufj5FEj<7(05+L-RIT{qv;xpnxwZkB45)xd^N}<)eP%9m9!!-;P%>^a# z9C&`dhEa2@UY4uu@K-`JdO8zf!43KEjWNNBv30&Qom?wKgOPjzoVvbvZ*;lnAUJRL zEt%!QDMkfEdP?nBBJcPQK;thFyn9Oiwq&!_cZmTz`X!+HRmwZlq*(DW6jWc~ZVc!V zaH35b~46RXmbQW`Nr`D5#w&AqJ~Pev*tVjUtL0Vd5_Z>kO7>PUC~rSx;L9oY(R|s&V&3djy+3cROC%)l`je3X-V)`J>WBHpu^MyDZFy;)p*+e6Km%% z97WmGbkLLf%@c9>H}a2P06kt4#XU7}qlcL59g?f8UAdT$&xj&(4f3!eG||CFBppna zf*$7|(ZSvTe-+#&+`JMYlUl`BE9S9N{2|4=qHjd!OVs5F9Ei)_!oKnu3Og+rF?nsh z%BkA;wzJtK5u?RUEAak~D|CtSOF1%qJWa%9#fp+mX{~?SM_pc{eYN{Od5s*i3jhsd zTRU)g03XgY)9M%Nx{Ur9-V|A_>dinUm{bhwl-vI;MQ&&r+Uo~A$+sOch#7?YoJZ{C z7l%qLGrLT!31|i4Bp7+f!F+2t6vrev@f&u*E_(U zf@mp9+oxYI6&B=uXJOK)s6(je1u?z5M^0*{N{V>2nbwOg>banL5H%q?aVWfIxQyto z8Tf854)Ej0>$y$8@k{A~qi#$1Spkls%13UIVtKo6t+z78;K!Gaxm$+x4{d(nHDGim zMpr=}0!WZR+mm#ol^*n?m3w7H{AS6p+pRsRj!2kI=UQWcyWhh zjgM6wYrff1D*|o`1_Ag(nNT+}4r%72Mn3oJ55W00*%v-_a(X5 z=5vRQF!&o$eDR}GL$)5-dl$2&&j5Z3|90pJbxl9s-%cdD$k`3CcRT)GN?wo@TTWV4 zszUGHW7YKW`fYK5oloHOOXfnhrYbr9p2XG2V_#4dks>nst^I-)3FbTkvmoq18mY-r z!-QaP?3NTVYzp+_=3R9VpHQm8xR>`bkZ!ey859U}9KwXYJ%*ttznI`p!6~UIL3eeX zKv+ef0*!#Vk1D9Tk>mZ#d9lA^D~93hNcA*)EACT9WgDv{xC{|rRz;L<)Hdq`1Oh54 zPZ;=(FaTU;DR$&lOxXrR`x&o6{*e$Us!#V!7-U%@?p}*h?!JoHwwMesx}PY%bQRbm zP3AXUy+C%xbIT_QxwGUWYvT^u`MHZXK1@MujZC#dtBI$8ow9Fw^sAuQ#zTlu{sN0g zgE(Y`B&8k3n|sI}q1>(Jq}?2kp)%$Z5MTJg#7DwbJS;{E#a9+XXI!fF^zz9fYI+udiOK zf-FH35`c$`hekomq5!yLMS{@45R?q3ntI-tL7fanonTPi0ZSnS(S4>}7^?@U;_eSp zK5CCzBda+#1H_#2xM;3z~!;zrSiAqX6}O0RUD0Gf+K z_NuqXP#X|17EBvFZ&sA&&kR7qmw)B;g&uUNx~&EkXrxN3OOs9A*%^#pIQ8uT`Il(WwA4tG z4@LlfqDWn%0JtTi1T+1${~HL9CyU&pK*s|1YCbww;Py9?B*CUT6cc4neH{aXPJouw zodrfJ!C>b*^H!Lk$HjjRbceY+PhWyw*pcH#JjGAUL=lQ$BAtaI8#oA9&F+rByiN8o z9{}V(9}5Y^mkRLQAEXJL*0{$4ni3XVr-VTTfWKW7H@gb7OZ(==3YS>zAI>& zh>x$92F9T_EaHMFi~{2$>SLjl0JKq?!S&4K20oNx;*^fy!9}Zq&JXSz(m_pcg6^N~ zO>-dJx`F3uf^~7&I+!(hVBNryPzOL%M|!OiWH8(yc^U`^2Nn9!FtC%#_3L?CF7-Pw ztt6wmGgmI?Y%eANxzo%Ba^0XLHN?!SV9w-L{~Rf1aEMG*;G&*9GZAR`XM+&=0ze`F z|7r-Sr}NN;^@XTrmIzSVF@p!Y$Q9lInnIo;80v43ApHRZ|7*-Ia`X|{Li-jz)uc`X z*&XzyJI;WC2_FEMnMt`tfJwlCSnwR)E8V|iQmb6ROi#Vwk0c;b1k2<6OuP!Ih|fU9 ze0wjxD97vtrr+v_#YPhXh#tmHCJiC| z)bh3y3L9zwGyO@$f^RzjSy&D%s@(_q(Wq7Vh~+L&PdB4VSjP$k$5aqbUNP7?18ue; z2SBkUp{5V^1sgL>LB2?8Ae;|bgt8z{_B$B=F6GOw5F}jC?3c~dpZ^PccVt0lYSs)W zb@&t{E2HRL`hx-bqrgyV$z-cn$RJhg%0OD0Q3t8lqjNCvXKh3_t^z zc=?i+B$$@^_haJ^lb{AI)xecTOwnu;z+}*g8KfxaKk26PlMDVDup?{agcH0FAUO(0 zjV1`p_&1Y5LI4TG3rWpOFiq$Lgut~Bd1gqBoy8u1gMH=QL&@?J*$lyA81k$os^<9$v%vssAgY z{}j>w)1oJ>9OoLZojrGVb+`>II3wZZDx3~Zg(ChaN5PCty@TvggPWhng297vs8G$RE#zi2_YgO* z=~aG#UP*Lg1Z2$D6p=j$Zqi{gW~2cTM|`OtJNhOV!S=UN8yS|Y>Ya#AW+d|iWDnKF zthe}N_+h@2NinRk$& zO%!;M-Xy9IOHf3nnLF$!9DD(Q8)v+*kxJ13}9`uHkf&XW)VP zQ;*b^zps?4zQs4syGYoRtEt_H7Ok>&iU=eV7+^asy8SIYmp!%HQH)4mA=L>ClygN0 zivUcjNO{jpi!O;4bfYkP1IEa54b_2#f?q8zWs=`D5XSP)n72B@z-j`19a^Hh<^^?( zB;Y`xW+p#S%yEGxx&1Hq>8^qYN%buM*S>IEDX;)$YV+#=C-vm_%;ztj z?J67a%YwRBEIl-kF@VOHt|l7bubTlpk7g1%Tt>SAxI3i3I$Z501Ean`(uG3kr2)T| zocqDS3J8HfBGWRl0_9lH)D&P1ELM@PeWfCIF=SW)zbx2{#tIVLF&KeU#kc65M%Va& zhXW1MnetZf;I89lq6?$AAPPpW*C@b=@`bzK1p|JdHZ7HhuwVSpw*KaGfL)V&H&GCr zn*iwe@JLeyh*CE|w}+HB#R&SYq;{)?r)Ve|umISJCN_+qw%n|#-Ng?^*w_G$4Q#** zhqoIt1df1Ms_dWYm0kj3^**u~dJ3@a1&GzxswLP3nBf9M?X+m#sR-~uoGHYsI18}s zR1@5u3cq^I5R7vr*E56j4%+ih$(~O;}`8n1z5|+D4%oFiJ2!%6C$p0&uM`AXVlS>c>J5cdLOH4}JI4VGMv< z`ql{rJQ^yi1T>|qqW-2NQkbsA&kEV))%m=;oA4(f$lh!wm3pjz{GUQWrFKB5<#UsJa8*m$dfBLR9Tz0r(6^9=L0; z4Zv3K8ljR`0@P@FW81^ZjP3*+wBSh{CkbYF2j0tRtkzGo0A024W7riTx?Y4{qzSPQ zARw%ezm&U--UrZiw0V7Gz<9}kBw~HKkC9Ix^{Ud$p^z{l8$mZZTnk4EvpDc;n{TMzzey4=fMoO& zslydO3}yi6x2vLeQ<)$H-B+1;$_yTGAH1TG2AKM%)B=$*KljFrTuv4|sQs{hd@})( zfC1lm2s^P5{)FjkUjZ4*j{`#pMb?-r8rXxWLIgJn33yQ8y}6?vSpA)US27Ta2@_}) z40-`Ft?Aq;8Lxl^Uhl6k24a5zp_>1Ou{a=b0cc659jZN615w(8IQ~=*{Ic-SDNYlR z+z8Amr#?ZW0i&2B1_O+#y}9@X2^jQuDDywG0Dz7eyv0GgRY2@T4b4t&7WoP(4f@l; z%n=l^0bp68QKIsY3Z4Nd9=f!W!3FUx4d|%pcccd!1;h5`99+AJ4I}}vMp)_!ke&y` zal7K6y$T3`0a!M6u1<*19f;`J>O?;tf(PB4i<_(v&f)%St1yVaMj-+3aKElW5epd6 zobIAT43Y={qUCE51yP?4nuzSqTL`P&*TD2I&~!;~ExeBFxqPxJgA3_z@gFGA@bR%B}3#hcn`e@9&WmN4{(60wVxa_dLJylqwAX> z)HWL^px#eG3CR$G|CW$$Y6t_F1e@0h!Nc2+CPbV-5P(%vRpIWmf&>A=l*a3nyiub6rWhON+- zpRq%N2VAu{8d)FQw!67O!V1#5<2@>hJkQ7KRe_DZQ z6LhgN_|eT^F5Z^5BG!8lsdzp0FxnVmjGW0EPa$CcHn;|=kSq{7Q!;RaSqu8BrXg8? z<{&LGVg#~q9T+jXCpp?P;*cyjEXVDRL9(FxChjpHkPHA@PHm9pA&>=XFo52}n7B8{ zduAJK5p{%M^*Lmv3K&xrfuxCN+)BmsFX|HasLv7uRu3@95X5yfnp1E7WgcShfQ4SB zkpL?5YBvD@Iq4&=AV!)0ZDannJH(-Y{@oUR8(>A!U?{}9p<*6D!oLhx+h3;ai7ABr z46b&_dqU?xOZIOf#b~>~tl>W_WFO!UwUo5$5P$#6S^>cm1K@1$xs|}xAtwFNgtF~} z)~|a^14uk~3#>m(BVsVp7cTdg?e_S~njb0vl%{N(|&ftrNcH?I7(!rkUoIQ z+hr^5pTQtNBmm05^nX3hQ2|Jm9|puhYURIPA4BG6yh3pHpI!dyovI3!EBuhb1hO;# z)q5)jFt*QPXCh<~{_{0j8RAeir`U`C2@lk=TOZ)*I7h?*crgC~E_f{rY)+=?kH;qe z4K84S|6S}~X!*a7vbVFQj@oK(&c-EU{OJ<-O3@TbJ;6X;wQ^x7L^xis2w1n5adpna zO9C^K0g8O(Py43J)&SSI5G++K?u>TmbU9>nt7RhL?~h_?9~eQ-5E7=#=KD*HCW&Ow zG=R_4Wxk4qLw7rAZ}-V#PNtZ!ZX%4C^F`7mZrF#5*l0?A%2I?$&WF(IUG#s?-B0n* zNK1Nx2tL@s;NySFN}mxGktQq>uYr@IdB=gd3+ zCWaEt^CBjE|At{N86326;1dxKQgn-Gv9CcwrcAHz*_m@n5sN#j0lbc#; zL0A`j@D();;uGT8p*iNr(S;wC6LwdmSAOh|a9{fq!*nxa@dzDWzBP??A^PqiW3h&g zO?r4Ft}V`6Ftf0X?@pNS+&lkXLux_cH1pRuy@p>zwSkra(r@&N0bQNdk8-Q~@nP`z zWQ=@92h2i&<}33V3?|ScRMM@mY!3mtm=LCEq+u5yzVp$KO(vt2;W*Byxi8@2@&2c7 zj0?{rm)d678#K!hadb^=%A5VOj7@>XwiU3bNVpjhfvzUs(m6}y%@|prIHTDNv~NjN z^x&g(86S1%>m~zt=1%HGn5OG$u(7Hhm;8K%K)%I`;sQvUJqkl=WG7sD#H-<>2l(-P zq_6d)=e96m8fS>oqwAkNMm%r6fP%*<=5GI;0`BLT_I>5=q3}e;plDbgS8b``_k+*k zqc>)NMSp~YZrt^HGL*JboE5Gm`?Ol|$(ocZql~V@(Q^-t+a`|yQtG(y^WVkhYbplo zZ;e&H=dC#`9RAL=(1d^#g%RwLXJM4h55?fk?CR$qbtr*D0lYSL-=8wB`DVPRASq-LW6i~fjgHQ|)JRGw`TFvv-D_#_^DMp47b^)<9)C-~6LKva^?qOB2w^EgDrSOugH zYLT4yf!WiqBL{bPlAva?_}wo`&40Yp&yclfbT~sKl0NuXUkin>S7^5BG6vodvQ%Ox zf>Ea?Ty}(cNAVYxdQ4L*RH}6rTTWc5$qG9J2n`ETWN7RAlbkr3OsuXqz%1t9z{-iw zH5Is(reK+|$UUF|HH&76V0d2k_(znN;QTqyxcGHCXz>H&W{ua&`<2FYp*9QT-PiZF zk8oMRe2!kt2X`S+VSrhEM4An0^m1Jq?B^^Sz(^xDI>YORv3%(VT`x7JU|S3`I7a)b zqXr&!{PJT9cKRYN;<(cy_yijE0LiR<@ZfMp z6%d#UZS@1;JmGy$N_CjTs?uFBfgb|Unyl^5-WYah5CBdE8$% z0a_EN6Sl->i|{IWYMZmycKbT`≠`E%X#iywV%_3bl#=ag~`Wc|yE}M#2TMl6y*T zF`7{uhAPeisk0n!K`3<+<-5F4=f}1pcr56&HDFQ-2s}n^Hl#v{ec2^pan#!21(=_N z(yT!x@+3}Jr``kw=4Zu^$(kVL<%wq|%4o{@noeR37!3jA`1p_WAlnhfR+ zboL?A`@)&Kn>rnyFr9Z}hlJ4VdEL+9eo1^3if`3yicL@%!n+r1cVD5AA6o;X%gA>P zsu_%WoyG6djb7Ra>wK7b5fl0tEVaF+j>mm^Mf?02Rn6|2Su}*-p`ZRU3KX@?4hgP8 zE+ogP-9Ah{FqZ`mVf z5L0j>N*qitW>hn zAV&b)Pm0}(5H$ZYaNUTXpGpis_b#w#PQ>;RjnE-9!Vm~&`xv#K0Cjft9^@eFKGWle zYy|`GcnMLhVZ>xj1Q<_001H`wcw2f-5oHKiq-me z)IcNrTY3#R{%SF)0~m~gBuJ!`s)$Q;0i+}@K(P&$TDmM)4=xl%|#fgt7)0O)y+77=jYwh}RucG*5I!XO9= z?3N2|m_X}s0&DoeI=79%Ch<@>(KH(f;D9FN#F9|7E%VWjXlJ z|0kC7AUYlJIo8-a7^r2;{X7%D)?w={2*Q*j#a~4!C-W&JwR<787i{c9wEeWlHlinl z%08p^22+j)|(a(FYk?Dm)=WX_* zUWw!07qh6tjTbko9=q0IGm_bO3=b=dH2*zA?)Q`9@l&zw6bGhwp`F^(!`jBKqxLR8 z{e$=VgtZ&owG_}{+^tsAq6Xs_T^ez)AJ!>sy|7zW@gt|9k?CYDbNtXBC>jZjqN%R5 zB#$BU>%2Afa;+rKi}z@;_QdiRdOziKKV{C8~=N#X~jswpcKRq5- zu~#|p?l%q8%=0;3Y#j4g6yyCXPN3wZL9jnCn5fXK)MNoG>b zV24fg<-@Vv8rqtTH+g>iUh|3*!Q8p{Ri~LL2bq0gwF?jP2rE}>4%KPE%}Vj@OcmC} zANx-oM>WqUc8cwe*d5G&2#8b>zb!gR9{%X}Q^Y6pYtlgdH^*J4#jP4SByE`4L?z`v9J&B#|+CZ6n zujyj6VE}_ONAh;IDRqp_)33#f{az=_wmy}Cqy>|JrL^te(vN;DpA}oxYzi=ytU~#5 z7*V@?Zqp6Em1=QWcnTO8VVPeA^16>MISpx8%+C3RpX#koD28V-sL);sRE!QOt36(p zrP>VPV59D__!vlok}WofkJHVa9>J5?tDhe#*Lmx@Y^kp;)AliLpwmGr@jbuZZp8rg z&Cg#^!4TJP>P+4Ss|m-~*^7W6YVWJax_Cn-~I4K&(6Fme4T z4upz(GF!<$&to(zu!tj4S$j{=Kg0i(8yfBoCBCOV88UAGc=&_%!0A^9@)#8=`^=FR zTGakd(yFJa{i?@!Wnj&AaJio$Uz){THA^OLL{8e8KiGkQdRx2?Klmsw_UWfdg7pLa zgCWbj(Y?>k_Y)jKdG*4f^DH?f% zkfj{cx&i7;QsG^)Ly{*wrv*Bb-m>`(zdB}D%fJ;|XTWQ{PEA2y>~rq8VcNMs481*5UZ6_j-4L zxTCdzf%~^jpSRrz3GS{adw-JZbrkZxg8s85)^bFu zYZhGjgsSJh6}ihLu1V*?5{vU`3%9%6l*!A!^CfRYedr73*3a&aQgDr`TZ2;WIv+Ij zy__iG(x+A@(?j@%_I}^4Io@lPNcKdJ9Q2p*rAMya1|R>X^3Mp}-^+m?K;rC9+%hU2 zq5+fyPR*N1IU=0ZMfgFteLy#2|L}v)!3U>wtxEMYb7Ft~tJiK}{HgEP@lic#-deOL z!5UTR2K&S5(Wu1m@rWnkVnX}Choq|uwI#or4+D+qr8}imM>Z<?56}AQAiT=VezYHLFvf>T)dUSxpX?~Ct;m>x^eQTtJE{OO-^t^P@65q;DBAd6S zG4O$tDxu(gfc!#bw)8j-|^s+?#8g5~rKySwA=!bEtX6Wu$cy7}E#s0xLWH`F-}=CO{L1Tbu2I z(vE=_CO2M^F@${R>gUtikiBH)FE76Fij(&{v}w`Ef*f{FVX&oul8RdX!s}Jo4InE? zu~R-3t+npBJn6F+;Ddkv(%ogNOP+qWVhL2d7E~McJeH|{3MV*=bY=vU>)e*PZozw4 z++|-!ncfv{*KuRO4;;UhI0C+drbU?>kAvV87x7(6f?zoh~(zxYua)J?$xCFOV%4v@&w$>l+nGfDw7bbY=2VD6U>LmY+ z@_MPjN_X5LQ`{@S8IoXF@i{#VU~n0^9wZA8Yq@+?==xroSD1M0_p5cS;yVS1)F7Js zEW+``DJNSWu&sAYB@Q)#EUX{qz~xjR_T!BJgIe)j@?5K*&>lZfcRf8EXF8eIKVe1n zuT(7NPH6aSYe?<9-4K+z^ z$iDSF#_C6tB^!>7<6HXogX?%=bCMH$E}s=GVx~ymvaEzrnaxti3z@8z0V{=1i0WN^ zR@mKi^<1p^GZWRy_6I3jOe`ibUUpW7=ubk1XLvqnxbl~ucA z9^432>k3H5)rDh1WT%tXdo=OM>n}~;9Tg&0=*Rcu%6En=d$C{RT_L^86Bm2K^jsr$ zWPLlz=4m7%_VGS08ZS#qRcb;4#P85)i%_gL{%k}2mtwEW_kBRgJ(>(RriYt!Ie?7G ztks)uEIK#E`Z<=I+l=eP<=1&p61m`;HS3{*i-dRqMTwW> z1bFgDkA7^TQ!gTmk%Jdh#!>%^LonOK* zVeAxdb|ceYON0lI-*^xtablu=8rLhnh9)gv98r3m45Ck@!BS@(b-eqUh%2z93C=9; zeQy~bHaSfS;tBQ!H(^ID7w!(4rcjNm0<~`bp>x~Ko4NJEQ*Gvzos)&^Yw#`})p+0+skywWeRIfP67F+SA z+rEx+Pplqiru7Dy-mM@YJX=HGq$ne^>?xBj=fYyWH(uJBseV3T+%1>Sl8W@2yGT6& z>?b*(3gg56;_R^c^q`!gw*@~D;#hcw54aQ;{sVG`{Cg+p^{Ad=PxG9r^2}R&ZAAo>OL30 zw|6POEKJI8wz{Z=kd>cm23k9M<33IN1=l3+gZWL9j!i)FFtGG^JxNWobX2GfYcS+N z$&Og11uDkVE=ae&?`{UJySC_W7E%Rn*qr?7Jc!P={7gNi6PR3l_@GdWM*rY@OmMlT z47t}fwSAD2J5$VdM_@fTa)A~n12*M%u7jY4hRq4Vo>?$VwS0FC+Ov(@LQ9C#bP(T6_myIe{U)Ma(fK8`v8Sx zLiH~4ADSIRO6*7eZcivB^HJQ=UUmE+^1g zR%3ORLj$z6d(FL+>~16sgVkrSpjaT8KenduL(mvI9zqbxH-RhWn2hn|=pe4I{P}Z$r+2c<1TU zJ8gBdUsb%H1{STNd9@aogacx?`^#!>EQd8o3)s$fsE?fw`+0ZA#b?*W1L<-}^Ju56 zmSOJNG*WD0O#mR9psBq5j z3hs3!c((~9S-e>ad1TGOkGu?r!KR79E^p_9(c>Tb#}99v9&K7K3%BOpB?V#j>wC?s zNlDRU)nuiy#dT>pX(I5S1rdkOe2ZLV+Mc?$X^&ArQKFDn(=OyvwCNwZH$lou!<#04 zE|Sq>sQ}$U8z#K)u}|eaDV5jad!g6jh-HV*q@h2lc%`KsHOXD8TNyq4ymm)#9*f5< zdDRo?3wGeD1XMUpk6+t09j z(+={B&cDdLeNr#R|MmBhY1;gcJZ~pg5Md`f$3Kz3Qd;ZXn=nQNuKNTdHXG{v!?;Kl zO*-A|prf#;$p?;n3C<}~VhyU4m7Tf{9XFs5mB{L1+mh(~2CbLGd%WAj4Y`+P1n1Ub zeD-7L*_xTdG_#-#h&jz-@^01ai`kgkt2W<%a&)h=+nkVXO;W^%D(lZxS{%qTc{wi3 z@lX)#c0vwei6RGSzuj#+(w>QLEW*huf)@MLt?M8`GE}}&)ZLWouRD7IX`nzCLlsa? z#pii6#iSx~u}6GU{>!q14&N(3R+GFzx3D;8TshQ|aBJOMl&UhplG@!$C3@7}w*Kk% z)>8S89Gf>|Wykz{#0r9^^zX65=VG{yhVqWJ;_>&F%LS>r^4z3`mrt`r)p~tq1XC_L z4;sg4##nEQ?ZWzr;cU-3g%`7(W<}=}RPIW#F^wI~+*IlBSX5r?FvIxv2Zh}-`qb+) z3FAj57H5fAVTqFz=tAdcc^B-3yy!v;x_i9j5x6%zMfy_DHt} zPGg8A=`O`@o1fqCu>WU?pR+wqE<&ehj@5i&fwbE`mLyvREuLw**d#PzAB9Q655zK|~7;2PU0E$evc zkxcn%9?D0=&lWn(9zkiI3W28jo{H7C+l>{gvx5_My6~EdJXl^pyO#{g_kuWaHdG~X zQ`^(ITS#8AH1ifqY^~N{!BA`aa(D4mx|hxov2uvz_t_p;`Ciiae$r*FQK5#^MDOAm zT`uLrZpFu&>30m4#G$0}njq3_5{n8oO~e@Qgk4G3d~pR4n@LYQaODBB(8Nbi!3F{B z&?CEH^AAbEQ0zW_Z8${S&tc@!#K!Tek5A*Csh1{7Kh2!xdaW%un%vuhDC(v%t%-ax z5ZaY|%qvBv{CNISk)qis8E;&0q_S{kV4FB0?wu@>Hhk1);?SZYL57&J(UY#phkuO% z9lCQ&*EDMwbl&2QPqbE%@YuQ8Gnomc9){;~9e~u#hKwI|jPs;Z_b#0;wv#Ifj~gC$ z3)|eXZP;>UQ}yJ?+q9*k|4{rAi#_s| zc30bzjcq8ku<^Jg`=aBGV-Cdo_msC!+`{aA;}SJy9LlT)Uq^TU-sMYbyDF+qQ0|y} z*Xa1L%8^80ac?QGlJ32+!D3hZD#HnKjp-Sacr;0$J+7YeE8~lM&n=$bmb!fss@I5D z>w8U(Fx z0rECs?7W#hOZ`JFtqhH0PwLeyul9_?@*d%vO$<8^?z`enFqK~V^=)@6O|#`rO2wsL z4D5>En0U7;62(sl6ij;znB4ChCZ|Bl|ERtUxl;yNM;dLJ)?pm$`YNofx3_#1)H9kS4@aV8d#l{c7De}>h8yO>EgIjY zH!ebci?D&+{c1dj|`KrayL!w zws!3sbvzjfcZc1ydCS(X<-cuMg6lGUXuURW6gt|tcWUf*T=r@=J}3`)M0H)g9!GvZ zu71&&S2nHu&OzIQGSX_E&lCDLokQ4SI=6o@%9~B5(YSo(NOw}wl534$fg*HSN8DtY zt8_lN-236XJcI5a;Tsce%W{n+w~1jPIHqRn{XBZuFFb?qmv+Y6@JEijmE&HcjXXBo zA_ccXMExA#DaT8uy>urVjRL|E?fU$VI^aw06oKrA5js&nT{|^^S;~dv8x?|0Dtc{S zTFSLSWjHN>Q7fP4vM|1il(S6=dCAmHzwC&4cA)w_Sx0ZJCf$|?BH=#C<=Q$=#N&lJ zy$;9uxDc#ARc3O5AIq@(w8UBO4IDoyb;|I_I-M1q5L3ZX1Fx)ZT^D)6@Uv*{L z4q-lK&;i2UcSuLFoz|xy_t+HnVs=j8aoiRys#Sa|y{W~a2JFx-xSyv%-C}H# zbkKpR|B4C=xJ0A;h<*M4Y45GWqWro>e+5KPdT2>OLMdTr9FP!^QjuR_JAY)^ zF{eITZgVn!H1oa{o^0_LYdmn6_0F$8xcz%O!aZG0S3G=Y z)yd28^!FZB@hbyG%S~2N&xbp5a+VqNTWm1ZifBz-gkE`z2bs+aFCPxNol1vw7cFbgo)=4>7MGTBZ3Jqb|M}1E2L>)>29uic0(ix$0gYXf1S6XM_f7!MiTuddhgd}5^3)WISc|&iExVLT z&9h>Jj>x%F)xbzf`Ge}SeJyHLD8$pZ!708O9Z|XQ!+IOU*USy2j#b6)Epav);H#@XA>oCDqT{$?eYCTW>c3jU7Xb*vf*jyGa%Zd-llgkqqU}gZ!Tw% zbh;Ys-=)7y=&on3BbRv?AEKbwA>8727T=g6gKd@q`A;!jUoJaN;H4e z2D?t+fya}O<0$_}Xdc`9o~<)KQQJ3iUpU&dPqrd{F~Qx)@nFP~Go_TF1r(QI_!cYt zLq67~Rn6C^2oqyO=bgntVu#~^2L7=TSR_g@uQQ#y?zykaZ}6TpL;mx){@aHsRv_?4 zzYRY*FsO{6NjjV3q>8&4?0H%~@AhIea6_U_+IROSzpvxOCf$8>YynWwlCS8WWvAWq z2Z)meRkUsHa@+ci{5199Qk&kqKGN(&4YXg<={V>9@em(RHCK#tVvVlPQ)j_LQl1(l8cdEqs1=-SKQqDgInBvc)F zU(t>nwH|GH@dfyq5Yc$)McsQK9@(Pl_JN7F~7K5 zz9x2`&o5HAwWMz6(1-7B+;FL*o}S?e@Lqb^vILn})%V^xn-V79s#mNH^z8so1hF86 z9+!|+*EN{l7few*5wCWdd@qu@j&RYfGo?s=G4XM%f(LGDDx5tTp}^#KeC8u>^Hopo z$)J6pdymgDwT^~n^t$3I-#eNpN?!uj_8)oEay$APhQ^90*>T?MEJsCO z|G)EAQEj%>?i@oRWeZnM(0v*OY{8de8zDMsFA}cYv32rfDO)caqvkP9Gwj0kx6ZE? z;l13k-5cZw;za2rv+ZAeda5P^VwV12+ zAuZVR(lZHlkBGGVrSl5-)Z#KD-|%akbnD3XX7&^7SQO;~^6L-(v+fqzlAC@N>d!=f zE9B+(wZvyd>f0Gp7ot*$LOPE{JDSX?wWzs`6ZV`X=4);pTUC-8QpdHevt!r%c18V) zk3b0jy#Ik^VaR-PEKE`4mKitVPxx63hxnMsOl z0c_04SkgiUQbl|?6oRfgs52q<82FkU$7EH6_bPVJY8sp@pXnh3HG9%8UmKdHVVCay zRGl4U(OV6I3qv#P8|BqL!lQdNYw@(wT;_y8WryIkDlM`6o4M zC-dcPIPFrhFkDOdR;A0#iWLcSzviBbf8dSV@zlArEtN!#Is|pUf1WXGPTw{T_-Gjs z$);=!JyjpRfB~hhI3M2H93Q?{8|jO<$Jj-OCJG;(`@okJhkVyD#c-!*%^f*=hr>@f zDTYwb*Snl#DlrFE9;urxlN5hIhlR=iUU^zAVb$(zn}9%;o`3Fd;r%`xc1?AlN_D4C zH0Le!B(+a5wudQLSq;0Pt=12m#?Cc6Q3t!W_TKu1jU) zhhz4&-`-~1uD3_}tWjFUH=fo;KtGg9Y2w}3bDy4`h1vA@$(aYfb}N~>)it*l zcy7Zd=K0)pPbYf#H08$D{e-JmP9>IOW^Z2?*d17Y8((>2YG`o{MQBZB zUC39xN3oGUb8M}8??Q!7a8$VZ*X7KsOTa>q4t|g+RfzMB<{|&e-rdFJ*+Fuz0*Cfm z>xdX*-{a`FQ_IR!W z|DSC+U&y6**49nHg40vL)Ax8$YL1Q6NFs;FLdK$nkh2mmiXV?x!ZU}yy?=R$jsD{e zX{1d~N~aucPY#2$B!Y*2B|WL{D?$o+kKIz-FqeeMQA>wpU~lCFH%+dE$7{e?(e;7f zh>P**=ulox*4I*Iteo>bvue>+>1P#Za$euM;l{R3kwsXl=*ymNasI@I)axo0whO{Z z(`)psuj{W2b!Rd=Hj$TJZ7r#8$FLcZi)yLpGzjWvD3rQ13ilWWTPAFKNUc^rqW!IOymv+=1BV*r{jB# zEhlktOQsyM4GVlJ@eL)l=UA-lFt@7;P}!Q%g^GwPh;!4?`+2Cvz;Ph z2vc@;gWx2RYXzm7BVS%X1G`juk2)pvtC>R`brTPN13v*rxDMC$rKY`v5r>GXMPcl^ zSfnlt`F-(8ljx6Is9cnZXL*53Xg#?VxpWO93gz)~SD?AgDp76j!E(p0A+WHDA67Wd zh1`-A?)M~^4J=~#bG(>j9PYaSG-UBt4r!z=Y3o;q53{X`8*(f6V!n+Q=4XBnVe@7R zidTAzVpM6cBEJ0M8$~dKwznpqYvtA$H9#1qwv3L}0C9NUpeMJj+%Y!rC-q$B^`gy|MHw0=1rVF z5+=)|fy`gR2?nU*JZfzB#;hyPC%@SEXmg%Bjayts0uQNBqW{u^lpkRX)W5`^z0Oo> zP(^(An!7LyTahEZSKbOzh({_0I(Ehk)UbN-XJ-^y^ zXSd%zF56NdYs&gc7%9VmkkXr9jmct%u{mhehR|qd<>dK>)l0QX39Q+3Ej6+dvd}`K z)bqufjD&^nfs^zbrt-Webm=YC6K-Gr*}4@`Uccm+iVi9>5g1RzpptZP{sHgUhMe~6 z%#}IIPVN1NlML=;JeKA&@u%^QH_c1O5esbScQ3wRoPXDN4|Kiwy~NpKd@u{QivQSk zV+Ne@FO^Dgq;PA~g!5v9hUsbVkm5JAx6es4Fw`z7@ZZ+yZDRScu_m0L$CK{R+gg`| zpG3kTbdM;J_VHbv)XTH=Z|5l&Swc*I%CS|n8N3)>|>@7AfMLo)N`?nV!8J^W4(ISPbmU z?bC7lvFGX7WFkM^PcYt!7f6}=l~8Dkk2fc+FJ{crh}mPxf4LIMF)QV~^ozAC0Lq%X zvVH~EZ}d6-KGAbpP?YoGmV%2IU4J*i+|l)DI+$j$*}}CrFl?TSQh_mr5LyJ zq_ecw`+$n`9V%|;z`Kq@y(_AejTc0Kigv^8ndz@*>QAg_S-bwU=eo+X@5Hp^2U3wS zvn#VtQnDjUwOV&+UuRx>)laa^((bnle>5QGp>AG3>Z;-%TiCSCTFftY{5*(BLmAn& zJY2nI!LLlJLNDKMP0xY@LZA3^lK?icT;zi0GUvRopH^0y@kCMV zePc-BCpUc$^{q|F?_dalb+OEQ@-E(zI$)&yZ0k$hQ0F85#XHP}xh5iaTUyAmViE>o z4VEIO>CtMpzwQewqrSS~J9$M%5S3w<&BY&@Xx;Veyi~Jt8JahgGi%b>Gu*` zG5RbnGcE!Z?qdkjPFK->IyccZ!P@ANP(n}{*r5Cr`Sgm@M zId0z~Jvs~y4wqgSq-0k-YEp@@aDZC5EB%uzlyUOm=A!Kb7?_cvHqqQ4(Sh1 z`YWh8r(JBeQyLqi=ooU7Py%;h&G1jkL?)aI)ieceiRiFWicQ#v=K2w^PSfY32`w>uSjGqVy&WOVaHsg znGU~HObCtDqJPi=Ra#LC@9Ug#52aHZwH$pa(bFE$muU9$&4`rkZE6jlu45u!0&PRr zfl-sIi)AH;%tv)sd#QU%{D_>6&dEh>_YTJFjmNAk8B;=)!q3KLL$;p%Z%;{#YAt=& zO<5RE&BcDccACy%qQ7md;l`fxQ`g6EWz=m_$`IJytSU-=&bQ31PI0Q&RS7c1eK($L zIOqP}DI&|gU*6I}4J!OKW=m>q#^uJa_uA$tCH;GXn_rRU9Ot9gt(A@}pT5jc9T|Oh z!HEOIsFaNHWMneOUg13H2@$VAXHG0io}~>EVh`>vuFvkgZL1wq@L})qom&;_CE59bD=2q~}A$~j|@z%+l)Sy=ak4Gknn`z zW4awD&9a|S({d38;L5X?oi<Sd_07$1M{a5S=< z%1Uq>{js{?n(zHh74>1a&+QcGN$;vfUw(7xm+nBB@+akg&NpkDNps4azL2~D{uTgH z8q+f%GRu6ulS$a`+9wM;6bxAPkv1(=_`U1BBi;;(Z&beZ(PRs4x1NXM`iwr|__-uE zi~GjHq1?*EDf$3o(WbMwUHN>c%x3!0$%usT=3B+eh?i|hl9CCcsX}Cq~;g6)p`+U?0*-_|a>JYdomS;{DnG7=21cei?Xe$2c z+YwpcRd~&z!)g$7m9G80miwpn<em!{qyLYEE`M89r0TJ*1fy%Bp`(60m-D(F9YE$CTdp%jvhb#Eh)n; zTbJ~F0&$0G)OcJ~K+cI}=Df~|9YQdqH>9SgitNgw`w-X%bi4w(xi5hT=b4uQx{a>m19m5r(+hVBz}`b8uiTFXVk>M z1XUMEb1N~H`s_{*S&s#BsF599hww-8(DzgzTK}0LSiX>#j zCBzK#N-nioX-l50p`Yel{2{T<+*W<+rW12R2KhPj^@`l5U=wjWqgl?^orEN} zxQ!!X*l>SHES&A88+VXyVwG=DqaVJvKXcTK`O4D!q}l0sX-)gDSoCer{Ql)=re{dy zQr=P#vm<^y41~F+%^FyDS_AX`^MXXjrLr2;qw_`=4Ky|Ha%GBj#cjD72yN$Y`d_~4 zi$SfDBgkVzq9;l7BSR1rTU||S{D{4;gDT>@97LZ87r!6G4OMXvPM2&GnUycn+4La# zKritCZB0}I|FhES24;HdOFBGSy!Vg4%{sX0ft@}HV|U?`xR*0Oe^0p^Yx*RGkgp(D z?LgrEODY9BHgPSqttKQUnMJOYhrigdRvZLvRy#k4Q?l1|Fz_AwH%v1^plWp-Blj_#mxyZOdDMGWnauF2lyWlD{Q>&J{6I z(~lx?<-eGtt9&BHu5#dSUC}(ONy>`U1&%ptuTNarWx-O{3mfjIn?EfZjSwh&+Ct%> zbuw@^cR0Ve_;-28AaXtF21d#N6@GMLjEa0A>%&;v8?rF;_2yvl1SjUW38ofWNI+K2 zspF2G^!3aKtC~N`3L~K*70$g&o$ztG{*n31pQOZ$Sbnlc!}OmSnaQ4bbWa?2x~mCI zjU(p#W1R;VDLfvUwY7@7JgP{vpl`b0_#lgFQYH6((m}E+dB<`7XipdMxRhdBKyh=p z>^`fMdet$5{5PR4Dd)5$OW*G{Cl=Ub1*Z4?_MVH^@Jnp`RI5J@`D;92x+zAN7o@wa z*~QP_!D6&wiQFXHET`FBqfXl0>hu{Si~BlOvQV|P$)gQUg#y<4ujZS!P-+u9sBTN3O39I>-3bdg zDg+`iZ=G27u2pID{>i;PE0EGPrW;p*I#tyFId>avF#4p4<)Xl;rHK15rBLFhHq75w z6g#(*ekbSBr!)E78BvAW>)3l**WO9HrFsL;XOzh6FV_aK?{vx{TQ}_!5q##+JNB+o zLvL?->iA73wC}I!TFr%=a#M;xT6qe3dV@=n!&`CtHV7Zw{B} zyN*O4NH6Z>m7D36SSRuCk}cd1kSXf9WX0=G$B$7;YDVj(5Ej0_>g_od%}hPd#+ay7 zDf1*=fmut($D#JaY<>;W;pYw;lg@GFiVV8Pg)?RGB44liE(|^hs!H=03I|Q=WNw6% zk_3<-yTn06oqwXu%RaFmC0Pqf)o%GIhY%5MSY3@ZN$!~c9@rfz*BeuNHm^h}vzE2n zMkH2)c$hRPQ0p#{S|Suu-kv;bLigm&r0hgNZg*4rq144G$Rx1*=r^*rN}$Rkx9Hop zK27%>P4hS#!>No=g^i=l_v^oHOGbxZKenK#eC0` zRGoGflt7`vSZL#OaPD*9HCJjB=26Tf4gW~8E(o*Si#<+whYuqSsS7FB$?la#ax+x6 zvhEbmMU=%$kGf+1Xc9sJ&$c#(DlTqG zj{he7Np$pR)40e2oARM9b<*VbTkt|bj;yH25F>M9r~>u45NyQ~j&p5m1f;j{uStb` znqDx~<}6r|NX}! z45ezncS?6%_UL3xzpCgiofUmB`5pV!lc_<6Okm0!Lns8o@REh;HMCgb7guf$eqG^m>YPr`YU1NO8P`yHtd+ET6t1 zmEA_4jLMAi;g9Cov2M3OGe;QlOD|>0RgzXC&{$Gg4WVA1fjWsdS8uLjcLQP)_-NZrI^+Z1sBLr;%SJ^~l> zTP!@q`(hv14rPczWd}(ZrLvFK8x|N-LDcxLvpryVqW+E(F8rToy&*T1)SOM2dOqq_R)a zvea=A(ica27odjfV$^*(;MN7XXe}&Pufnp}UL*1Pk%l?V6OPxO)IJBrNvdd6u5{7E z_Pd*u&5Lub-TXK?YjWo6?dYa!F;%rhhb2|!!&oB35!5787mR_n=)m&2Y&pKuu?_X?KVz%#&G&k|U>~-RMgOSUI zWovdf?VDNsMF_gTbA9g`A2KHacB7rle9Mj0p=l5_qhWI2vuqGt3rK}{~ za(lC6Gj$yD*R#lm;)6jMOVeew5ab6|gpM3MoH*ZS{*Ii%qoIcvZ{=}RDME@PSx*?R z_h>ijm5kT+{23lSEv!zqt?r5tB@A9Gmiv~P@;8qYFUOJw0>_GH zA=nohV|1?*MioK_m03?bul2mYC9m8|#yApO4rePJa11*oyUN9?mlm{CApOp04@)_- zvj|oDW{QYid&U}95-g@mHiIV{g89O#_cgFQp0CMYb^VOaA1{uB?CL11-jm?+oEL8- z-ej?!plu{#jefchl+s)o9b#j4-hO&qGxzFFEf=?*}iK>xvm)ittjq;e%H*Er)HWJ6wQtOzM;(<}?0+njXwyx~ru`6i3g z6~5Fc*p%^enRY@buzXIUDRlC|jwL$<>iwldzS*j!YitTPz8&G1_i=p@iB-v;NwuBu z9Aa#M7*IJrq8~x|Q07B7mc-0J4*F~OH-QsGlfGPj_cvKgb;ebr@%y}lkDx=noQvHj z37;NE8;U5}Pu-X0QD${8_um9PU%Bs*&C+jC8jp*>o8}p;wi;PDH>&1dX5t=pek!h; zlIRq~m)FGS>4Bq(TLJ*y&jDnG}QHeMA;n?m9UJ~;H5Bb`LM5K?x}=5Z|})lXVoR&KDne6P2h+d3NpGS+3RmhkdyY{ z+zwSIa!KWnM_!^0!^5y|<+)ei@d23y$^kKA`b=%RX+)^hE5F{~C7l%ncU~#vP6^fh2`kT8WhEFlnO}v@~?3nf5~3U zIPbV73nMn#3{YOo%W2@ibWZCp-{SXF5}f~e%#59PSm=09G#FD1Biqz{A9Ws9fX}!) zRV1W_v6|X@XFWVYuxrBbs`zcdkXTc=nP$D-(T;0%Egi4SCP6`y5aPV-q%r>9n{U;3 z)cfe%mJNjDKkG>Z<8FdpFs>cqJhr#7F(VN-EQ`bt7TF5%lsdcouabm(MCUB93{b9M z)-7j)V3=dx?8DFMpH<@>Iea^`9|}>vRlo^@)kWEsh9k2%JNJkcrh6-p8?V6Fdy?ub zY1g^)ZlRKvuNE^fUp=sq?5f5hFO{@{!b_ek?1j4LAE;*a*fq6QrBW>^79p;@gi7)Z z-^_%70-tAEis)vV1@Ul>UVSa=s%kOjKinT~9FFPah7&K*bO&}fNjNe^tR)A{-e+=< zqTH?oL>f2th7{g_D60f1yf5WvTnPdB9D}jN;i8T`M^m&Uf7HYjpVz?`CJ69dPWQ{W zq5(H&d+k}p_bb+1v)5ERUfRf_8pwG9uH&)d3#~Q(s>A)mEOyas48x_wXYHZ4U&%`w z-zD-b?*jILq1f)hK9Sk*J(y+U5oK$H6#7owX#?31|M%~?#(nuL6 zo~9#&ZTHid_P;!EM}7XlEFE3<0FQ%E^~k>+rhmgwGH4Szd13Y7hT5U3AQT+gN+c>t zes`?FEv@~_*30%{UdM_D^uJ6BoAONNWf~h~g6|WN%9KUi`oMVeCc=@aa&}@tMI;RQ4MchjHD(Mf^GL$`5N4 zrO>%`Io_`?*cVFccl(00csmU-Er*Ji*6+N|T>Y9ym=YQc3aF6gOT*i7BeeL;@S7nj z1Zq_eUYd!f?F?@EblF@lp6zj$^(rVDSa57%)Rwml;?BDYGVZKimYg4qW$7~2h=K~) zzcgHxYB($WCo%0){kN{D@nM1j{yv^UckuCmIK&SCO{$A!#f11G^-)w zgQ6I!?-AH-9n(5?K1u$fD)AG$-qh^a6;W!!5?PZq+33!?G{%+3vars&k)KOTX_0Cq z$dT6si-ltA&Eg&gA~Z8a;U#S3l@5+fozF-1QGd=4)zxbMOp#u&$dli$0+G8%5!W0k zXjDUSJFOjAd_i51;7_WKV2$ysOdcjqA+*d*a@qJ;R{#6t-EHP7~H} z;=?u*JHGtr_S*Dnz8KlHlChH3HH4yGPf<1h?JTe7-f*1&)|hRI`%x+OU%!Ng;0z2rX^fGV)u<}2|1wnl zCBb<@qEub-QSoK7&fMO;g{HJR^l#D%@*29H&PTU{O^vi+zez=3FOu$1b zQS`$Ab~h%eA-JW11C(`Y``Bh8#Ow_HoQB$`+>7tTBK1k5xXd0p{Bx~Rs zWuzD!eEB+O*#xqnVG+8fF}P-2c77(fM8*A=Z+tQDr(D(wTR3vk%iC`fyP=Y1DVAhD ztLjI{D1;~{HTESQlsq@2KVPX(J%E?{@&W!Cq)AS3WO14Efo`IFNhFS(V`S)*pd|vS zZe$;ab33(C$YTPGgzFMm>K<*4*7IA>-|1E8$TApszEI1N<{pOD^ygB)`s>Z5D* zv7!j6;o})7o=75nwl#OkrU$2j!Z6nv8#P38cJnTRm`lH3!OwG<$~NV&^9Y zm?)p%CJUGBaHRD*mao$0_mY%4s45o#^+m!O;;v{o@B1*6aGcMY!V_-Wc85V-ODZU^ z4XS^nHAd}4dZ4=ebB^T5OdY?8zCYC1C~DQA?KW!F9zs(OKv1dKTZ1ceAAP2J$Zr>r zFqoof%FXK^CfW149QI&myUFGUL{|~L%nKf8txZppQ%Jd0L+`o}MoEXc1D4vjwe>!x ziv3)nWqKL{FoZ~YWLVJXBtxsw1*{t^91=~bR=`n3nD6oP&ijl-L9|X@eeHx4^%;$! z%M*{m;zX*MOv%gAs^y_&h5GR2wCvEzFKmu1XHACO zIKGi?pg2Cm>1Sh9hMkyy0u`-TH7HIb;psCW_8dp^9UC`@dz84vxmHX94Awos2RWNvGDBrLPF!L6?=1R~I1Rf%ZMqf& z`l1^oP!*fc{PQ*v$S~r=mOqWVaiXWVa)L1^g25ND0%PWsRy%rNk;XV906l03+NPlK zXLE)*=B43`Lz^L|No$}2rKEBDwQ&hC%)RMA*n2s-!N`_xT8$W9{a!n|;=Ds|Ah8gS zJ1(2eXZ#*m>50Xr@?Z{>*t+M5FaMDG_)oE)fqV=*rwugdy^-D;;+b~{L3aN29g$}y z6v=8PSz>Od&vAovFH5VeE&0rZa^mNH-f9e-Uj~av<$|KU7K1N9j=2DyIXJI<-|W8P z6s%#VV!w)q(Xau%_F6xQvT!2EDCuVlxaLOIz-p6FS41SD z_eqpR1Y&%sV&_p~bnWFfc1B)_ocju~! zh&Bc~N<_={32Z!9hRE8p}GJD_+*KH_7X^(P4jMcF`rWjC+YHW0?be&7)^RN{H^53)4$W_4;X;g zABX>iQvO%h0WfNM08Rv0yDeS|7wAU>L5LkOS1Tp(NTwl5q)~$2O)yYe@I=8gb>!6l zKLPiUWc<%25TK<&O~zX_h`Hwg&1DuJFCg6uNP#(O%+XM0|Bul9N9g|F5IXvo-A&)h z*D*qkm82Eg<-j9F*PLMWcja?=dC7K_^>pps#me)=cx(hIQAna=ZS;!#Y$>E??2m7D z%a5G^W-+~y?@DoFXQiF-n~|7*-U@~=sW;>kvcKRK+!+#F?KF9ql6^&?27%1Ec8?#H45BLslz3fH+12D39o5j&7^EF z3Cx7>;{sd&yEL&mi~~#oUUJkALv)Y#CI-`amkatSVBJGQaNH~=Z?|&bK6{^(c_J?O z_ZAcdhK$hfYD#{X(#l-+&o6`X!td^O?vOy9o)(?IYd3Jk@A?8HI5*r=@-K8YlTgP>5<%mKyiWOEC_HwzM?Px}pjW^du{s+=T#fqOW`j+#MHhtIMS`qa_%)@EpGl z?@*AsC+58F34=hRx#V@^N4{4#GrCJ2q%ZQ*Qj(XvSn*Vd1E5cnK0=d@CS%w|tdpq= zK_b9hK~tP01Beo6TzrIO`5gJXP35Ob$&x*!tKn`2*y$;1i5jxes68*58gQ;y5K4 zZw_#aQ8QZI+(%B*1b;vHju5_%C)M&fQdKO!!_gD^Ap#F@O47W?t&~SiQnGg{m7i*` zhl55nw0ACli_ObFuGfV>fg@*Julb^HokHB-x|?{TMaVYKZf*~b=_k+&Gk9@MI7X`C z{2+fj`%&{DBt{Et$*`?eaX_|y;Vp9ELyR!y}6wwo(i0UacSo)HccP>0f15mSsb;yC0!xG9?>+ zD*`JF9r0ugAf06@0vo(cC7#v*a^aTCDPxuP@L}@3G2Rb_(4_&$cdWgYN=zpubY{W7 z?>2Igb9VnP8T_2(B^6htYo6YxA!7~c4N_-d2HEiWYwG(;NY+;D+pd7C`VlO}SUDLI z>!DJAd2&DeeDQ!I?SFq;Mlw_&lB;&5yR0@!Gp1$luY9YT6pt#Mhr^Zbv8G${6 z)B`k_8dbz}B)IKAxuaaHP8cqWN|i0Lh(>%qaq*7R2DTM1PRM3S7~bg54(e%9Z-D`7 z%UK9nTfpOAWnfqAzSNFg+8ZgcQS}1WjQ~4Nh%!)Kt=sPVZj0u$>)Omzbr^{V-pPk7ai~if z9>?gt2t)hb9h0yOmq0Y>XtCY)KV8c>3Fj=F7|3Jxv_<`d!-3F*6iqdGpNm@DhI-?$ zk}ajxJ)2LwYUH`~^AHL|?8E;`-V&pKQF-7Mjg!g|*Ook*p!-8K{8fVSPVKI~kM<_} z_vwZF{%Q>6C+Za8Y^<|~4g$On(?BY`%0G4*-r2cwoQvKX>1@dM%K*Ex5( z-u~&V+<5n<2sCOK%jmqtf&Qku%j-W4lX={BdHd}B^}I1I#p{_W7RPC_8m}2&X^_lc z*1zDnu6|q%E-ZudS&1d=(H!e{b%xzZSND!%pnWrn0XVipq6BjO|D`u?N>Ez70I-nCizuB&DN1R^`*?vO4lR5kTM z!krr^L;XqXy-qMJn`B8MUb7hRncu#x`=qe7V`BGwz|Ij|1;CXazsP^}->_`xi(qtr zr5^gzLaUSO4|>|LQUZt-JqzP3V*V2ci2vH~r7!1?SIy ubvOKvAlaJ!M`ivC-~WHb;xP3GH#jLkS>;2Y5omCGNl8IXK2O%<)&Bw^+)G&i literal 0 HcmV?d00001 diff --git a/fern/assets/images/deployment-library-decentralized.png b/fern/assets/images/deployment-library-decentralized.png new file mode 100644 index 0000000000000000000000000000000000000000..cc3a29623d85a621b514393a714ed489f02484ba GIT binary patch literal 84692 zcmeFZ1zTLrvM7uO3vR*P1`8J4-Q6t&_b@0|a*q65K5~!Qoqzckgq~ z{qFt+_c`;-u-5AC>aMP;uCA)C#TOMNX;frFWGE;oR9P7bH7F=J7bqwgFybqqq#!sc z1_}yV!dhHhMOIv#Tm|fCVQps)1w|j@6Rju@qlyzkHpV_oo?GBwW@Un9G4=B`6PPVU zRPt;86sJntm_pFxH!HH=_m~@{hH0NfF|SaTUSY*6?Aa9fO#1geoUJ;ehvGj57OEvc zsZ_Llde`x0ad{~UF6#`N{0EfD3|M3OEjxQpk#E&5iBLt;jOYC6jh}6?iVM>e_d#-j z9lp1!$<~5qs&dePJ+IuYhI2_Te(EJjr`^PMEyD)UhH9A|UJzMu-W_SzZ13D~%(YWs zr;tXuefifY+0dbA+8%a!GfC`nIU&CZx}{47G?m$!#Ucp;ZC{!`RSkONDwSSJDGi?S zh^zMTJ4;`~DqnOITNr*r)*C2P*pP|V)miF@#k@pK^+5R4L8@3wNO-q0s;4kP;k(MQ1HMVH1HFG zCi<^?DQJ2q*uUj4P*9=PP;mchqXb+bpJ?C*f%)eO8yfZV|Ga|dTDM;F_kATFQ)>7$IUGZYja4de$ct44hW^gnN{q2r>X zsK96HXwPC|=J>&!#ohiRq#q~&cRt|O-rU86+}+;J!I{rpkn(Q}KHwfw%t}fAw~33b zAf=9?3c0u=*qrlx{U25Te^5yf@?U@g ztdOby4^sR?=f9-@p@onISpSP@LdfwA_m}`5iLE7+HGnH1WsnavFYt%{pDW~kVmFYv zF%$|)1WHyyRKp$mI0L~OM-#t~l?$okX0_}p>WXsH5+Oef1{4O5p(=`lDIoy|Z$~aE zs-b~{sRDNZH1Z0K7Q^~}nwd0e=2j>BNmdLCOe)O&dn|47Md2O4<*6X|oZnJVB%I9W z|MVqa#E_8`rCT&$%%n$(hWZcRH!wIZ?o|J=Dq4h{N<@?{#qez=%YT3bU{oeI1Vvz| z|I>FU0t!CE>{GXU>wm1K77-1-`A_%&y;5@kIK?i)Zym(`=N9lJtFZsM*XU71l}T9( z{D>m?|BMzeS@i#%?EiB1|B&bZ72|(a)Bk&GHXl*ca_o7eeHt>s=zPRk#yBe340Pp? z|5I6#dtf#f!I2R73EYJbR73n29xSQ58*%s4_DifY)%DyZqv%gRRB zTZiDV4(~tgDxbQVa3tkxak5a9fKRooZy48+ZTf`==i^BQQCw9BoEkf{{CW%i50Wsh zuYbI0aO~ACNXF)sU~bU-@o;-2GrUBXYks7X2|c9sjCDAFy-0`_38#U|L7-GF;7bnq zD#QRSRg3F_@x@efD?bROZ(atNJ3ErdtZY;dnnb!}TbEo4eq>cDoD8`n#^>fZ;$RX) zUK}U2AI!t}vM>yZB1N4QW46jjG5kYHlO_^M;ZGggD0KcfisUVb{1Jn)Q%wxU0!aBX zFhi0HIni;Hj>?@@STa*^lh)i89d8>zk}paZeQ_e;bZ3{g4fb6b`EY?^ zN@}fy0{B?u{y)g0KnHqeTd4%vx5b3n)Q`{hh=k29WbVyTy(P}-4(1Hg-z%}Hw2vE|vlgXv~B zUftNVfLOr6e4#m18I=wCIwlSPQ!_%I$=MOCwd^1h6@i(;Zq}xB1)$W6=Z&Xk_V6Gh zl0&C`U%>2+Ud3_mxuL zU>hy$B;mc4p&D+8Zp(iUkwM#sjdIi=!GT0kns8_=Gm9v>UUdH1JZG%O>2oLm*Bc)HponP`mvKSg%d;JTtJFOH+k^#iW^g1@d(^jr!>|xSV8GCI zd6KUhGvqmMk}x1p1EcxFVNuG3PVq9FNo~pl%nEaZ8RC+o8$tx|{Bq+J3kxtTUj^p; zHv~rL!dAbtIa5BelT=3)kB@M(nl#X`|TEGpEhkUuu0Z~cXA;6#S3mFj66(Gkj7UIKk5%@gD0zqS) z#{U+>Wj||+#^nS?; z=_2#xn+JA{!6{l{aHHE&*0q-6#GS)`<@Eggvi--e>H3L7WBh?E)}HP;HZQ4BPGqnE&I2keOZ$f^KzXb* zppze+Fel%#c)ea(KWPYBpPMnuo~9`Uz>1r#x-ps$Ii`)aKXkO~A8jQWksdCI=+&1o zThA24$nQOx_wV1;dTn90RAlunLV8Pr@bON4@*Hb{#6>xcHeq{Wml2kffw@86BFTv571TA(x{N6Uw>=Js3!BfozA{?2xn@NT2dkD8O3Nr^6Vm9FCId0p0e zvM`*Ks)GP!p!pN8N+;Pv64Rp-vt$Mj=I%H(h8rOD?PfBffQTBogc`(6aaXw5>Bx9; zGTY=jW5jGJw&h^*7qKFY6kVbZEh}2mL>%a>uA3TT$!GwOC&YfOoSkYFgMrh{c5Xw5 zNz&7WXfNx(55fR>8g3xkGwXXkHJhnv!TNL*Qq)#U2G9YS9H4`>p_#;1{egyZ>y@J^ z3iD5bp2YQ~0w{u&8?VL2!_N&%N7jT5JXSLWoi0ST!v57;OOI7>!_k%4JCi5sQZnF` zadXHD5{IabbjTte?LE2GqVVd!V$U^&kNc3YIB|!LkZbQ?)9LU%QvW{k!d}$R81-Q^ zKa+2eX6O=y+}&{oEkZ> z7-Wxf@j)rlt0}XF$T$=BO2>zNM^f!2bL|0UgWI+O4u2d4S;X5Er^852;XS>_$CLY% zh0^AYFK|X3qA+ux1=`*b$Jn5b$drantlB=WQsfz>uyFS}&@hV#5MjxP{?%VKf2SUJ zl9+tCSeH;>w3rT>m5|yADhYEwyg4rW{wPR&bGW3SJ@URGkuL@mt+O8mCJw{T(QTWg+;iOMG}NweSW#)I!pl91t3 zW$+Yd0#2jm-Y2%-9LwE#&AZo;mgtvIH-w8pgD&6>Uyh`XyV)Q*Q|%RmZhL%1N*Q{o zWtK`+H)Jz|DK20tdI<|<1JKK;UKJ)kx=h`GZ3?SUl_6U6#)NW0N7^?}p9||N=Sx_U zBiV_ZZwERh3VL(jw(y1@3CarA=tv4Y^T4}rFg!e)2x;(@3=7cH=Hpvnk$#{uvLR58?0i-sc-W)RZG8wZI73e$%2B1Ve;=K$n&*PNdlpA` zh!aAVQ`-`nKnyZn565fWDbW@AQr(C;Xp@w&UtS?m^?1E;dvbw*S&}S#QkH( zLNFXwIG{>S2YbkhT>G^1Kb_Ml&@diHsyt*@+t%8xX)L;WelgHiPM=J>{q&P}`c;hJ zLBj>^bR7uah6YvMRVzlIt`gNRE9ll)@HXJfZVjc!tkPlxZu#b9LiSAU)3z}9w&xWU zp6c@X$r51OQFB9rBjdQ@aO0Qaq+C*iJY{jA<0F$NYlC-jNd*SiA85gJ>W#0l@I2KM z#}=8kR?~MH7GQF$250xSJ3R|Vv%cAC!{ob-sBTuR@^YocC%MtxROh*Lx^FDH(Dnlz$-T z7YNv%IAJ^$(p6eIbl>p6#m=NkX^3Dg7?BAh9WL24ht70HBPU~y-C(f ztX`pUImP^JekI?oAs4h6M*Pc9YSkUw4u|0W=jOX-LQ1Vg^bjM0?S106FXOt8Bd>5n zeA&}fuMdAUoTkPzmEYdXN3#2_GZh-@p7GH9-RJ?s<}YzVPGlND(XBgfOsRNT1*ET5{Q~2tg>9Sf`AY0W~ z{ElJ?&Y5CQ z!BNh%d5_ehYtI_fKy_Q;oBY`D;5wPA#J&RGMdDa0_1RLE?o>TjNz_<2mgnAe?A?); zx?7n)3VLXUkqHz`x~K;_bS&^A-&5M)taW4-KLEyAGe1P!`gt>u*-+($nVA{e->Aku z3j3#=UvWKc4+f9z`mhwntS7~>$}?DN>=H0!G-YCj8Uzzd+tBE)#v^iA_61GhXDcLsj_9hOW8FcfvhSFI8yS&yU3%2+N- z+Up6Vgo7-mXyHO8Zn`ir3NB6>n`%N@)93p+^1J<=Q)O6LH2I)}C1UHEo&tg@7peQC<<55akK$%iQ7E-KFH{-Gz5-#-qVi>a>$T;fN6@&Z-mI2;_WO`MLR=n+23P< z5KnB}>R0;MW*IBRwHo1s;l4Y|FlAJVV^eYJ1y2c=GiV4S#zKU>vr?2a-1pfO>kU}a z!9+oGS-nUp@4zr+%t?Nyg7K#0Mu|2OM=k?2-E@2(`IX)BD702tF+ouAE?j_b57=~K zL4P-$;aNL9qx)~anNn)+Q;@Zc(Nmb^eSeu{y$??9Ghc`>DxBr{k&IGQDT9aW7cB%I zxRt!*cb4+HIp;;3x=9Ku$0TBWZjI}x!pE!qsajyVBj$Wc^vl%=YG~RZsV#pIKw`Il z+yRIgnJn)1h&q&gpehMAk1kB5)Kt#1IbltbSLB`(G}ertNeV3ZJ;+5H+i$|{rRDl& zC^w;$w9HV1?;SmL!G*bo`o!Qc)4sc5nbuRoPP%v3MbNPzF1ddzG0WCM?*hkdT%sy! z{0QzKwIJ6pg0;TPTI~`WZb0Upcbsh32O6fY$VJrA9|5tG7b6y?}h8z-c2W=VzaDKpUoEr=Z^TG8FrCTf}d;?2dhR)3hNOymmn zV?R(Ywt^4b*-!mO2%=gk^G{{SQ%h9VLtXi1r5!jvIO^(1NJf@Qqpty|!fnOBTG!WA zPb+r5ep(}Bv=i$ivkvX+dtI%uHH)nGDW#HT&B%Pd1U|>EKVPrT_lh|XQXm!%VI0kr zK(qfK%{ObY-G7m|Yf68nbvam*TDoy?fXg6f{<_-m#|taBL=O@%am#`j>?sgR>qxl0 zA=PG1)Z}}|MBN}~F@II;Qh3~aEeH+L@u@iri^xDYMc6YHK@6Cj7LiYNvsIHQO&Z+Q z7((t4=QMDc$th#^7-CajCHCc6tv5)S8L1;Yzci@J{j<YQ#V% zFz_liAjk;>axwUWT3UxVern-+l@tUsmf93Vs*xL~ncR+{L#@4CgIT|%S`-@sl$C1I z=QP|8tRBJM^`kAOZ3Pcmt*z2pq_;+B>BgpVu>;iGJSSi8ULVxSR&0Kg-D00e)nFR2}P=ur+)STehRwFt^SXy3asU|bGC6d|FKeE+-hfX&%b%O^IGpDfD^ zZQP`Z&U95W+2Nwql!Ji18T~=C!h}HdO1aS>rDA0ir$_lgE-+o;TU+~|&-H#KlFiw9 zn1Y73s*_Fu{qR`LUm;2nDJ>w%?R#{8(4o5E938@3`MR@d!8_ilp!niXrawnIXP~3X zXfmYMNpLg zDX`t5{FIM2{rz6ymxEf9m?r1ndGP=i4CP`^7HyrflC?@}od)A$Hu4y6UcPh=H>GW- z;fnM~sAI&k?w*PbF(&7=+^0#z)WTTF4o4mcZ<*y`7!J8t~-9%1sF z>tiMm&T#;^8gv;*r#P+E=f96~E{Q+ zJ4~oJf3ECK8pq{B;K*<4Xpz5in-~yf`F;JXx-O%N&ul?#yyiDT`3V1Ptm2mDVjfMN zq%0w;pn+f6wHjL#jCQae%E$8(ig(dVSv~5ccA-pPpsxEwL9;kM%mS68EH+5L>j>t z3Qx*3WyV4rcKa3Y?>x5JYL8|=_?(&r#7BUgdckX<1q9-lYC z#%Ul9(>uU~45Emhh9{Be`+nPOSYdMLr9Kx`?msaGahnd;6?k3EzqK!zrDE!R#C;$3 z*ximNb=J+^vwLaI!0e+p+LLSjmR88ewd#Cn(e>U~J!*Mrmq)P*lUEYFY#9+Jzf)p6 zUG!0jy9Y)=Za!69c(`Wdm!=N#jp4^-{7d?x)z00IahveH7WJso7Pg0TbhKQf4D-EPQm4Q2LGm+8Ebo%K@Z>F3@a-MnV+IqS$hR*X{!uXRO zidxGDn>T)g9L6KMf~2_~5T$j(0&!(83IQwXZ9J4;lOeB1y_WCOXR>E$a+#S749&pW z?E%g9lH6BA&a1_?=&wYJy7@&#PQTGKwRO@X-<~cmrbW@G3tKoJg?z$WKap$K5#pO z?s4bfJPmGV@^9oUL_`kXMFp$fJa`pUEMQ&4OtFIWs`pX@r{&{Qvf23vvtxtqdt)d> zzTQ2qrX);Nb$TyJkCKC{FxMArzMs4)&F!IuLK%(0WL^jvqTw2=Of>lxHPQ{~cd-4- z3*k_;_Y`9E>;2}7IllaWn$OLBL~8NiFXcu988wuhciu;vv$atc*dCT9rK|Onv+>dZ zJEagHnAFb=7h};zhYh7nhDMI2l$K{ZbW6-nJsdhdU_d<&?KsaXz?Tsn#?|?rJLo>m zf1=|sRW^nf61E&Xrh6;Qq9V|Q%+;ib%ardzF^3{GtcBzvf%JRzBVfopF1la6PXP{+ z)=wZNHH|GY9~-QEQHhrQ+@t#sbzFrnPFGU!Wx>BDMxu4S>_ziKq}G1W?sHCSlpGK; z1cnHs5uqC=3`kdTj6?~OKjVyKbjNczYFB0Bs9;-YQw-Dy?>esDE04sKr4$$1kNxr< zcfaBBJi{4@6ScUhqVk}e?jWZWygqnE1!{@vgG9-0DWYTHNp4r;ht>5nOP_KKnUMpK zMx*=|uQGvKOo19CK1r~~Px?&9F-d<6=aVJTI~Ub9dVHi@)ahEl+_nd4n%FV+G0N?Q z;fDS3H%6asY|MC+kG^pI)g4IZ)em!F&|4MZhW@F=SA52FuKsR2!Eg$7=o+wxhCV~$ zupKf7(?tn<%%EW)3XwnzD$VXLzujCctF=BbT5SE$MZDrB=rL~`{P7Hj@wM|hgI8nodQ8@H>?vQ6Fw!kxfC#9s^$ ze~xPJMeprPQq2OTgX2OfiCAXoO4(D##$jt*$#T53>^LpSR9al;k|ae`uk>q#s$@Bx zpKs3AhB735z@^&>mW23;(|V{Q7l$?Xx3+iSq_hQ>tZ~B z#1|{*Kb6LnRMQ2ZO*a6}`iFl1Z0FrWAuc7~;e%ZBiy?}gi#673IhSZ}UHM;_qFQwM zx*5&2>ca7BiMhO`R&Zw9p`Wfqam(uF(FVg|IkwmD8XCLMhLhbKZ(F`#iFK`4AF(_C zhGPEGiT4#^2oJ?W=->7+EXY5M{k;J`gAV1lyJ@VRF+5x+-_&XuB~k%83ef8ESV-)? zCQ*D$rDJ_U56yM@l{5cz#jc_;jk<8Uyl_UF2b+62jxxjvZvr~p_ZNL{txr`IacCf` z0z-_-AotX{V$yC)+;^`7PZD`$ZN$)|;9Jv5fY5^hhc7WC5Ab5{~>1r70unVB}lc^Z|X6 z&}oIIngNyTTf8i4*6yA$ZOz2#<@vU9C6h~P zYKWn<5Ks%3d#V4)G&D&(-66vmipH|!${!THttFIb+tu?JwJGNOq%l@zxxBEyWGR zm>=;K>Qg!bI8=emr7r>7l@){0L)xBz!^!DYTbQrum=?XRqRZ=-ra;82TxrzqJ3rpL z;y-B+9tK0!7c#Q9jqTkKAchPSH$Z0%au*x6exa1v z26NXDSJp@!@H|S0(*O|%X(%4+jva^8X#H+uny|h*las8ql2F35*5Yk4`-6%fMLZz% ziA}~=&sEM>g7$VulhIN!GCWH4WB)B$Sa!K}fk{XKogbAtkZ-t7s z`AEIvLtN<{)9k6{PRX85t;! z=T<=1>y|DKWUg5lupTt5=_+5J1Pa!d$6FJ)UnRI(Md@ZB-;T8tcsHO??^(XYulCl< zgTq@!CwtW3CFEFrimtK(*xwZDRTQ>@tjsIQ4sl&*_9N1-h1ahz$Yy-cYbsPzEVsLd zpN?}nb?zgGyXwb`##2hFUt0;z8$J7sAexg{8`(Raeca7?FiHw3Xj|x5yXP{HsMDsd zOSvJ_%`C; z>l63ed|ZMCII#i6wKWuaM}D~Yxi_((S@FkCfHLpFmlc1)phLe1W6O!~sVepAt~oIO zM%MD4`N6a_Wt=)C-1Nt z(NR5A&vhy{?yWm^q{Hi2=6zMWBaI)G{jxAVy*+itnpS0q9L>TN`eQJJNOs);1G2N4 zBlAtcGA8$Gch2FIfZ=<|a8xvH)<+F52Kwt%KV3*nTB~zadEa^8B4~s$^G7|5*xhi&m?38P6H^*y!q~XsPBSO zY4*l;88J#GF|6T4E!Pg9>WGbnG0ZvTS96)osBdZooMF3svpYtQdApbRRc+zz^4BAl@}7+}0I;Isa|n z%X0S2Cz)<<+(02U@pq1|uq#(ES6$rRIP>*%3!eASFiCo!C_ErCR#@f3tQ154)1$GB zSHTA7DXZbl5l)rQ%|XG%>8dPYK|TM^XQ$tg6G}LS{U4FWVyB9tbU8?EWMp%Nz#+Ri zNU1rY+uA1?QvUy&F+JiI`+^is6Wb-eZRa?!lKPV9Siddw5n4_NB zQ>5dkR3d3bJrlU?cUI-6<%3_{-dFQtC(l(i&T7Yu*9dZgBL_;rI4;CPD^GXvpnY=i zD{DF3rwhV;ZLU_c>F`WKL$JWGWSHTkELFkfPmV2J{@!m0F~XGzWd>`4nAXk=XU$B% zUMh|S>kRtda0zIklg#ht`SSIFHRGhrGr1@=?m5Sw`J$ex;4)(w^mdSaMV#zck@&rF2z|7;CPR-61nq>0Suw3RgJr9LTZ)pIfssx$R6Gp?5B#5-ehx|_$o+2^=3 z?BRFAPd&ZV|4k!FD31^E2T8rjgyK7kF}KuvoSBi&)z{Mq%5q;nHJBdme){DYmpEMi zZFlImm0B(Resf=cf^0e@WX6gC+1&-RF}cDVM1G(U=2{yiH}_SXVYFq(>5*2ZC<`5Z z%K8N>mW}kfg~5vx26hT`gFBWx#Tml@wZK4q+*rQsCM)!ERQ-MQ4THi-5Q)TtOR=%a z(i}5=Fc%F9ra-0rbD}f*`+FtPSygY&c+IBiq4IMTSri7FFc2?rO7wozKy;*fo+ZJM zFmc4%m173ii$!)|;vG+g`~ga_OXKE~l+=~88JqKttcPj8(tzgFxccN5yfMQ?hz;Pr zDq!eXZn$zd+VFG{iOd(~;AEeP$Y*}Z^%hgO@@e<3|8zs&Iy_}|is}7D*5SnswY49(r;PL?)AQpXCh0O&9zqHI<%!G*E>uqKd zu7mc=`FPaDxcY``MD4`nfDK6PLNRG*HK@2~B1yph(f>H%Tp;jBM`}^ueq|#oW4M81 zXP9!!cPUql^;bI4hu!`>=ViC6_x5%S+PaSyG99rb$1!O8K8MM%=8o$TK$tWtak&4_2p0S+4RP}Q2YCSn>&kJGAz79JM)j)%EM#b>p7QqJ>19PI?jbmNvBd{@)*wB znc`&F5_>$2;{3|Pi3txoVV*bX!XA`RWuv#-tXy%=1J)T0!1AN) zwuYo942+Zk3E-aZW!&y$P-7}MX>KR4GTRjc#Bz7bJt_AE3b%L*yVeKY^FOONZyRJo z0?&_~8xcsn_Ei=G8ANzk@kGi+7FJQHAp^n{LJY#CMdNIW1;}j#*F=`?2j5wXjDM+3 zi*Ww?Ef$?$&RbnMh^ z=;rXe2T$q!dY<`q0fL-FH}p#(p(D!_!f^3Tj;$p!U!}y!2^dhQ(VbWIgp?2Y!PS)D z%?l-}eUYG#5DD11;Et^XVYD=+rAoQ#Mp|%_q%jgjJr+)Jo2a$CXDl%;&66#VOXF+j ztY|R)SlpFJ`|`{E&|ntfIIW4?iXVkP>|WGN75n<7B%ijU7`&r%PmI2U^2zs5E4?;q z>tI%MlHl30&MMMwqwhjyPE?6OkFkMrKI@mXmuIv4skA8FYK#>Bd%FOodVa8YJ>Y;! ze5DoTjxW)ZSbc6yW6+FJ+n$kTjb6k~lHjG43#a0H6?2?(-?I1TcX5_$MeSDD{ZRaz z@^ts5lw>ZEXih3AF1>&1xT`p7lHuy^SWWc(P8qzc*!75p+Xt*t14Uh4ohQrM6z7Gr zQ&MJ==L`b=;h}yLcv1u5kFrT zriv+UUgkeu!;@%J8YrPb;y4}&0LtjcCKZ&|PD$ahY(2&9=TRty1}$yCSvO$v$9mK% zLEqgT$o+wKTor3be4o;Fm#&}-R=}A0z|~%XuOYfS_l~BZ1(}`R@+3Jmm^JB}5n`+p z3fj=LAwE2C>b3#4?>v~1!B*_`tn%R!l-%CFcaXNRX}Wd=Uy(24 z`U1f!XdIoe1Y18Z8=EL6XeAR2pRqJuaPo#fcN=h5*{$zT`sD`_n&|?Rf?*V0+hsOj z7_^_e_sqW)sxq$$E20K&py1whLW{mS5CgIwI5bm<;R*5?Dd6oEJ4O}4yB6n?|9Jz7eEcGRDK**cbjG*pVfCeO#)2(W zQnI8f(e>*H!w+j?K+X5;&5Z*YIXye7fLN27gx@G>62>P&XSv3d^KuZ>?4;f74Xz1S zy7Hj~5liH7`sEgSF61Znbve9^0b#IAPVZYDjxMsx4avd7Yo-osFi;k!Cuj*@ZYo2| zWg-}$V1>Oc!?%mW4jq?|)+%FH6n_6;eirk}{v-~BIx^TzLKlG^`La`N%oB0HgUg^@ zQOsb{Y1?Z;u=JY<3GFo!8C|BYFJDtuXO!3o8_r8Mrx zHvQF8wnd{x3;KN>Y92+Uzw*-nk)QICL;|uZ2DL@@(#Y1@Ilc(q zE{#Zc{+@3Of544e*@_mFUD{VWj6b36gys4ty_IFcw^LWi7_D)F18g?2dluYtkEuxA zrBy4|d$;^@0>0k;f>ZQ?&^lJ@$tLeWPs_`*hgP@tGN8e#u&SOR2i<&Qe!b0nATm@H z*GCYe)TU%Y)s^{@GoPygjA?lS;>S9qLIVuY3&hAFRe~YdfdepC1 zowNH9yJG9G2{vvU2q+9cl9+DK&UHxHfk{vvvahbx8-(KxQo@tj*#5wC={`Fs8fTEs z)7+e{?BnZSYE5>#3lb?OZnoGY=TNY82shQtn1Jx@7=pn;FBzg=&VkHv@X|@MifmaR z?l|T%O|K&JDH^QxfEEzpD={c8=&%7&!_q5rxC4|t6-mlew<#(>Kc&h@@qX@CybgKz zGQw{*P$Eg#*C4*Bw^J&&fsilK_|X_mBZ1G|H>1QtD@VkXY8-dMQ2ZsNFwxmlSj^0z z)iL~Yj%i?$+TCM}0%lC`5sBeQTubxpq>Z_jzRwu~*R+r&URt8pI3Cx8!3v$eGE;Rc zucA{{!aRMwULR>vyRMu~!-xTxZ9Ulpy#A=S=yXB$MR8J;T6e2i@p8rSvm_7SY>)$6@le%s%|$I&OcY4zlgUyvz`)5%HDtS$gr$uJTXz7boRgE9#$4 zzrEb&8Sd`B_JN-@W?dYPnt#>@5E?Wcstkl#78CF0P2lb+8@=1>bQ@ty;qYi_A**;! zr~N0mU6hS2SRH(CokaPTwkn%%o#4oGDO0O1VzP+konKrhg$b&Of#gc&Rz>7UH zm-6?YByo@_b?>c_T$GpaCp2S1ZMJ6+khNv$ydC2yUQD+L?kvt#soaAO*+|GZs*U5q zTefE`8Rb7QQ$(pomtL<b5Dfo3~23ZnQ_!_n~7 zimuo89#>JWi+^=3SjWAY@kcWy>oapNXK=ZG9Hr{L9^=Q37_D6v^NB-K#V)Uw5KeVb zOjNN=?5y%QA@sy}X!NAUR+$l#GPqm=g~$LU1s5z|eZcwwn}a|rM7}vO%o}8@mDZeN zX4jLV4GiNO=Mucv*rXL)uF|d^dX%o=eZ0 zoBwr?fynoy_iRIY?^Hem!ivye?mnni{`m$(9~y-Rq)CQ6J9W)`8VDXJ>ngJ&RiOlzQDCB#Rpg6QGeG!BEbEA3k_J|svlKF^JWGSx4 zWW>|N32TH(Q0N~8A>Q;TOJWOR!kj(L6uq_!7hn*!}Va=Y&T)HlH};R`AW=ErcCjovU9K&UzqTPtR7##LEBsfb76NS{ax*?G}k$#A5i72 z#XkeNx54a3mwifNBENTWBlfq_Co%*~a51xdFGzIO4&{!cv-kX(G%AkFYpR+}1emNz-HZ*+qt9&Nhb*a%7_JE7?QII%e;Ll5U zn06F@e`t1GmlX?vK#e}mDe1u0d*9PtQ}*446sc{em0caurw2Aag3R8+8}^=XTVp2W z7c>9S)apSA>1(nX+4pL{E8%Un=WeL6rXxJt)#~hJTx>vP`zOx^2vHP~!UZpfHUwO;&Piy&H zWnqPQNn8_+2Vq~#K&(+}~UuIj70_f4BR5LjU* zj~vGmseY<3E^9r)`^_gUiZJ!I06d?l(8-(k`x`2Si9fsN1{JMX-{C^tIZk8%vY-3s z{mYFn2uxX*oZRVh`Hnl^YlOycDrnEn21kfHp zjvf8j$V~(oE-W=FsZ)Ii1c9fW85LK_O5!K)8lY1ar)1}D;RXrh%Vqr3Xnk^3c^7fnT^!eU6Q!rVAql7 zg%3fn5-@$*UYD1Z(`VhFKOY4N(QfFMek?(&GMnyRHQ;d}Tc;>IucLbra5&+*kf;0P z>`v-}dAdS-y%14j#G+7h$BBOT^Vj_N1dwo3UQ@B7*EOPYqg8~sRHZJc{=k5>)xpB2 z*eDKPBf8DyC+{Yf*m4+|QM{b19N%wrM@j?fu88qqbaJ$g1@HW9Y61OC$h#yoKoD&T zwii}dA=ubX(?EE#hLmmOQKBnX|gF1=Vv{=v?vsVj7*6b_3Kjp>lb5& zI{jp1zB*3!SC~YZ^|GnHGp%x@UUrDEw3;$~Nb!vGGj&A$nQaFVuV-rvpL~%^>ED(a zGc)93(k{SHiFb4@g4_*H1l*5B^HM~AAW}+~hajf9B2?*oB~`??4N}sLoD~%}Q+MTh zbhV2Lh-&an(z;9Sd<-fV{J7|CYNT5hu%U;mjcr|KukBP^5zdE}nz(p%bmW>JWrV`1 zf%71zOWAPMGUhzPl+x1bO)93+S@&xOmN@M{(+^zV4`9r$hG+$e6u-pc!5b63!Jqgg z)Su7)R3+!`0t=U6SoD%WU~B9~NY@%nXY)GdQ1aOlh0hl4iQ1zUCkaM(Bqda5Kn7kS zJUe`@95s-uwQtQ@1`&1QCz<7RGe!aI3$cWQ9S^(y%R7k|{(a7{k$|#njt@riMe(#Q z+~M!gF#i3&05mJF7Gzplx)ikAz;A*|s*+si>k9;x@_^M^H1K3BYt1#(NEn19;rE^X zA(5Rarmawxt5&V6+&8&ja2Tz3u(H@G(DH#HXRxGra2e|BL}IQ4iQp%2qvDDG_cc;bErYset- zafJ&}qHCSl!YL3VgsMD((C0j{T^T|X-O4!f- zJUrx&9BJQ-Jr6FHU@s;)Hm26qrJt=O$Zv1A8DrHejZrycm}NGc5Xa+fB8OAAbOZXd zPsiUG2`7G2_MUv$-%HZ0rgsPttDoN5Sq*P|Y;M~!AJ0^^aES7*(yPqH&W$G4H-OKu zT(x6V{$pc-j?}x`K*->#QRcWIZ;I&L5h~2EQ!4FSaxjiCs5)8mO1$;waaGcwmSR^x ziR&$wqnZ7?4@43KoW$6xV$z?oV#NwBO8r!bwld6Ijjk>|?~Wt0$|X4}rt;I7O)9TR)h$t3xr%g8JC)e=tk3mx2aGz?l({>?OShh!Sq<_j@rxmD#_ z%N-~T$j{FyxDUV6Eua}am;PZB<|}$Xo?My0B{(QeB}m!zB*`if@qR6Uw?{p+M}s7! z7Jzl%}M`(u{hJ8(hOv$WhRZnrNtqfZ_5NLVP-rL{12w|?##u$4LXX4luo{rotZTt z@{UtsCpO}>!ua-TJOXEJ8Hul}R9UIZnm>J<!3H*BB zPSWuiSzXxCTYDxg@#s{>T@Lx40vDvbi$z<2e4zakmaZ`;Cl)uCy7uTDCnDdOfm2fF z2fpST*dj{|@W7w3%$%N^la| zZIAy;RWdE=$>hZFAy{IxzKYABuP1bL8pt2jcz?7pOG7J)kh}i9F*SB4?bk7^|3~0( zJa5@;;Z~=iDN^K+r@1Kr?N8q+2Biz|B3_<#wt!Ac78_1e6xW|Dx>@J_=mvLX zMTULq@g0f#nLI&4q8NA~ZujbEr)9A_akBbe%3Pt37H5j*nkEQBJjEKCb+R7V8G%-> z1)8hwyh5M8e?z$RHx{D0EUMDVDXvWy^wpKmnW!-)hocl&NUGBP|CsvfxGJBZUnwbR zN$HgC2I-dWmM-Z-OLrZ*yBj2>Q7P#XkZw3MNcVk?zxUqv{`v6P-7`Bo`<>XCVN~+_ zr7d3WPgF5#i1$}%M1GkSa)Gj!vYb(Ntp4Dw8wd8`V!I90s9Rf{H1yvt+ijyxR$#+K z{LGj$Z%Cf|mrpWS6vs!$O2z`QSlVKU^Ht})v&HIbbF}iv`)ak!`n97Ra>l5&_(YUW z($~*(6FyTlw6WFZd0sI z2KskI{ly9|JeF%WxjEFlntZPI5ia6QI7FlMg7cp9&8TUo9MJ3(SipbfWV;=UnH$z% zrs{iJK9z-GoP__Im=Nn7*zwSlV4~pX#07H{GHEddN^2_dH}GpQtFS2e)5q<{Qu@5OJrsW-HEWqtH!^VI4Js>Pz#lKxan8( zm19Ygh^&xXaf$aYy>=wifNRvd>yEzM80m1l7^v9boc>;|bwBIRwL|e2ZC-QI=6+S4 z-)Zhn-_LGMKD6;ykl2;IiCv`@hO7CJXLV9$?%by{m)^<6HRt=(R5XA7!q0r2w(GiP z+yGxSdoks1?OSlq_Xear$ji*>hxJXritwt)C|6)6ZmFSdYzB`v`ts);zB&ChvW7`F_ zynK)nXRN5r4rU;>ItoSzS}`ks3vkgQ@`Dh;9do5>WZYOMmX1WvR<5z&s4sQCo%ezR zKFB2TsobB9I2BXCk3rNqT4ECY?1$ZfI>;M?_5IY4w`01RMxQyxPQaCRz|(;xt03S? z?lq@2D!$EXepZ4fu`@3AmcrN6(g__;<3by?<(#`t%PGUtkG}a0wSWzk2$s+@{$?{< z8UXb+3PBU`zP3t-N^F7TGiGPK)AtqaE^3TgUFX}LvDL^NtgnKU>;EE{-^Lix4#Qz_ z`EWPes#xLjP(F{62bB#`aTnKlSL93HRoQ;h{%u0Stdm%5=2W}KPHp7tR^Tz+cC*X}HzFzkx3~%5 zTlDXbunm127cbN#)R!i~CxTRULTrl_RaN<0EK!Jpt)&SRXeSj6!8Gs54>;xHzLBNM zY4#y3PT)B-=q zI>a#3m#f-=#vZ+R4Bkr1w&OprG1;C|RTXKhb}z3{D}^V!^5K%Kicj7~%PNb%Ro1Ry z3FV@7%qkKM@@2d-CZ=(rZDBW*D%fOYs^JwlSH6Ta1qcUG^X%A zP-^ZxG{eD95qeCYXiWa7$K1KR82E8;$GNsYDJM7SeA)D)*?;ew!2J!!J?|;+(x^u} zI^YBqc7R`LbwOT=eaB3AI#|;_Fb=ZVH>V}QzV4x{qUd)llMd58?iR0U{<|n+iG}q6 zGvpKP(H~Q6kZlEFbI}G@;ZNXUcDJ=-M(D(>I7pY71(2|?!?!lNp$*!J4rsK0veu{I z>q>p-t1^{*wFNqVi{~D#EK6>ZZ$24cO=qy3S$!*n>D0Q9IUv8%(@yQpbeVaX zn&@M{LY^Wj|9*LIm9z6Z0rjtC2QuySPw!An-(fr3nO}KE96{sy$0BJ@OJM!1oYME| z-~%m8_epVypBkc>pN30&eP}*p6%lMH!JlvIfD17n?OqG9kK25^#Rg@+cGC$!cFPS~ zP@JvT8)Hakj%c^lp1M=O2hJR~X4egLN*JE-(<;IHs&{VV+8fD}wFSw)u31}p6z~F# zXS#B~YRmAJuKcigJtf0`VVRP8Y?p;ZZFgaZ;m?5Blr5~k`On<*K@q5j8>tR@qh^`w zprJ#C`Ew@ulY9(PhDpx}j%S9>&!{bH*^Z!zv0vl3G>%$(Tuhh2Y)xM#q`Zu0{KhNG zU3w`P5SHpiroXNFbyin+nv=X+Qu#pQ#|$1^xpwA^gU_iZ1YYOoZBZ*d3pO4*{OcF& zPTw6pjN|MSTaB2ebJtuUR(0Wp?RKipJNOJ*Yp$^PB2ImIn$rMvn8qHZ;*)gKnqq_1`|{Hc zzd2gY(6Yge=y!_A!?XFE*av^fW0NyRZNrutek!Qde^Te>kgl|AEq^{NEuh^Q#s;zF zSH)xIV7|&r7mIk&m}7~b=usatVH z=x^bPlZ0?**_D$OJ30{UB~D7_9E zrj3iqDP6JtiOn%KJ*hp`QuS(sXw!aJlAo4YLzY&jbv}U^4d@B>jnZKmxrqfyV1x;5xr8eq*}Gob z(cVjMknv4IDq54vtiG{OutO=Dr)j6wq5x6-wvbrlErT9um(IeM#QAI=r6K+I;q=a@ z%jzHD35I_+(vU)JwT80%EyAyqZ5+Rfb2Uo}OD}cMy7{1`2o^09Mw(*MMSRsr=v5e$ zpBpLyrthAA{x!HT)hnT_DUQYrokV=9tIX$+s(N3r{jF#BoO1E?!=)OhrN+Q8Hif8; znd#!w$e3zOj4+V?WxF^&!2ns-<`PmvDD8QFfmYwQG*@NeO!z3N>%)trh}x3fm?oSd zW{$-WRY38Cx+(%5Q=Po?>yOllEf$&Q`YJ;?{e67jt>SzRo0Ay60d7lA0R4DYD1-t@ z(z-SXy)(Xh<`Cb|^=(Zd4(L-i+WEL`#H};FS8z{O4@S9Nw8xocCBIXJx<(TBYXp*L z$<2@I?fGyAKni9d#(|Veh)jnG328$D3p^Qd`gYh#Cl-wh92QS`Oo(h$sf7I4Z0uB zIet6R>G-V(X|&|rjxhN=^!}GV@xJs|ty$qFzNemjBhmf13)`ile9-vFnB`(O--(cA z_53GA6Rns_-l~47NwLefp3?L3do^^zMAF`htvDc})B`3AO~OxM0iq%2mUNXrNfC<*>J^SXdSuc4FW)Q{@pj?~IgdEO(A6x)iShzy6)wBuSfv01;i{E=U;Q0YJ3m6oyz~LGz7^SphrVp6&hrLM}6a}+Hw!%b#MVaNEkXC zC%$yc0Tq#+GFSq-8cad?W>S#bsz44(kP7?o(wtTmUqNyEthBY<32T#Wav9pbxsOQf zqcmP!#mH@}QimXlfd6NqBF(l!4241Pnzhtd#L8W6GjbF}8wKmLulzyG$HnsZrK|zR-+*%Or6#@^R#-17-O|)&lI|vZ-ej^?jB?NwTqCxj@KI^3)nMU zufx16$!NUW`PNepItstbpcz(c=!1{tnInq>1fl z$sT{^Hw4_Gtv2`e0Cy}Eyz+5r@9H@q<5|F=Ubc_VWANt zzxttXYFt*hjc=4GB|r54Can;V?QgwyPwT@2#RqXge)xYDDKqBjqI(zj?9l19ULWiZ zq^XlP3>!wvP1+cqPsj?VG zbX64400~;DUn~V03p2__qRb_e3PC1v+J8m}nB0d}7Ky@PA)3BX>xi#K2rLDcnupp@ zV-9;ylOz_b^sv7kx0Dikf^c6t+blH)3KLNV7Ahb# z3i6MV_wTO^C7e>N`Ee=x-3{}ba@7VYG1k07C^8*aF~L27&5MBJOVPwXKi z+C3z*F(1?E3Vo|zPr(^6fd|jH#0O_3_6q&WL7IWq9n$Z-mN=@&8|^`yg1E=&QPi<( z`USfQyVYhSwH}Aq9FTer_nsdjCpi|sHGFz*c+7_7Nc&Xi9g zZmmmnrs!E5BTnEAT8EI$^;p2C1LA8S41$%Y9mY6Z%S=qK?p`JEd-9&84fJYMbtM(c(|Zi2jlHqE-a3_KA`YiZ$m}zj{R0WF}Wd zHUc515G9S)hD9LBS>^nA?>)WWLes&LzC9(?IN<&CEi2PQTKBML^g0zqE=QmKR%cHB ze4t&|iw82!Q!DgvxBPBRo6JegO0zkQJwx~zsB7kIwGZ)+6B`9E;0(W%U(c%TMEnZ) zrBdOJJd`TfSgN;@tf+94eo3M+992`$2}Y+V>q$xV;X z4^s4kY9_v;MDBZqq~6S_mFiAdLeg})D=UCeHWyU zvWyZE_xM2gC=5ldoH`UQiNAhUq}}EVssMO*dObk4?hg@zk#4vA`t*8^h08L{W<-$E z=eIcSf`Giz4EZd-q(nBr*lWxOro-lCJHgOC5`7{Wrg~|6GE=uC>gL0=F?ctf8_yAx zj9c6-IZtj0A8y2(#d%j*@FfKF{L~K9@W90|E%glcA}WBZFR<&XcL%+p434jK-MF8E zC{Ybuc>o3nQL^R{VgY;UZ($h`%9zud{F$nA4f@ooO#bFZqS>no35_(j?)3K3LZE=Y7zgkOl{9GU zYXT!rE>1r%VA#sz5Eas_9`evHf*1$S!;FJH687h0bN?FN0pma%9C=ViAAE9&Tp|^}s&*ou!5p zQP%fi1++A81_Q_u%+}0U3g5J}@ysI{71vxj5E2?%UByM`tCxt`s0)u09ZE1ub?Yf4 zcG!`<+*gv9pfS?}p>6zoi<|Hk{E57^n&i6h$K8|PC5HisB{@VO{n#L|6y?4g2gToS;AiWq5$)`_Bq2--Za)Y} zjn%|n`w40VzY2M7<9PTbW5^(%gcSH=5ll~M0DH9*LU=&PX>+c6*W!V;h{l+e&V#vx#y)HU+9oTP{}TM-hPB>~~6Vc%Q(GO4|V4TIJ8yY#m4c&~R$!e#P7A za^Hzb!_pFcV|V^sX|0gIH|KK3uOZpn>~4Qf4YClAGFcCV!jpr*>E%S8DLR{|h0b`Vcf%3rgj^;4911P9m>-icY;{jW zxc~kjdlo=1DV(CKDb>7_@(ko7g6KU+u%UDj77q+wm@jnO=L&kA{_1CDg)863>7 zdcd0{^lebc)Y!n#hIl*;NYSkk4p!=&ZxjTZk$0Uk*L0`MvXbF(bMLq2w1l{6;do#zv ze6u1aK?$)z=2QZ0%jt3qNr~SMSkHF|G(SXCXY^kf1KCgZTCr${aBWlKWqi0e68JFP zKE-^Nvfi9R!J?)hiflZ|%lSJ|edXe`H*|(XIv8l0K2f{>2`BB%TVm8>48_y?>mm`w zV&yj-UR@PH*nhfSnbAi?@<+^c(@r=_tt&stoXV-uqs%}rS>5Wzo_MuuN} z19WRF$$I9T;up@uOJa#dsVc`IbPIkWpM(z;f7AZH=!Nf7)+P&qhA~40MI#MFh~KiS zTEwZJ{(cZgtgJj!aKgJLHC6MbcjMAzzok>atz&fU%QzCTU` zuclE2WLWicM_6;uV&2JrDpo-tD=Il%ag3&u9rAuYtZ})nyx`?2m%ql5@7NnB$CMv< zar@(k#^?71MPTK!Gh7(gCTiYmSz5Q{ff60ZOu~GAPIc^PK4@M=<|&A{sO3GRFM{m{ zui7LQeIY4>FxSp-Wj-86Dr*{O_C}tJ3jACB!u0m2SdQc_AIw$pj>xbP=suAd#m(F) z)Fv(SDMqiKjMGm@D#Be(XP{wxFsY&{A1*E%M$u zw?+*_LW%NElV)A|ycr2bg}twy#>Gk)Ae6$^c3jPeNq(HC5Fs-G zhs7ov^XmJ~w~T3PCt(e1Ga5pRouBg}4T3~N<<+1Wt^y1pZu@Hk6#_HY+=v!Vdv%w> zJbx98?7jj4E?I5y#l%^1f(<=>o@6E&YIV7q0E=eeMv>p|rwLQpD@A+~m(P?NiRM!! zQylZz52h>GH`}0Zngd#I*2_|_0;rmH1lUHetEG09ISOQO10z`%!-BQp2m09FDwSDw z0EJ!&FmcJL0woPuh@WN1h=o2;)$BLkAvQbO0)=VVs-uqQq%jP-9m+;&82(FjG=1Fo ze>QvxyV-V7En5(BfU5hX*zno|#KvDrK%!FlU#@}&-|3IWsN-v*lYqw^!1^1MS)*_y}3pWWE0F0>%#KMoqfg{;ypt*%Rhh)viVQ+jBS0yl5Jt9P{88}C@V+=<`5=6z($q2OAc3Vh zE;8aP3m*Kd^p?WU9F?fe<&!fkRQ6YavZU^e8dP*(Ak90^R_^kuMJM0`G=bZcziMG? zli~|d-K6AeiUYzVgz3RS!q5Vt5106Nw8yCro&9qC`*~b*^89n%FC#__KURN-=$N_s zmV6XE$#+#0!~5GQIkbfcOyuYV6hCPi9a%W@06BhLt9oe?o9Mkp+W1W4o;UA5+%kaPOm<18jU?pSl9YJORBbzf}*RLh)<_7^K( z=qlM*Mz9FFxz$A~F;=H+xR^6phxIul{`*Y9Pn z6$R@jMe0Ui6Oe2s_Esa%w5>#Xs{t17bM?*Gx|xib>Lg4}pFN;Ow*P67tKS~4XS3sj zf?sbS4DvULZmP34!qUyF|y;0h2lo4eK^)gGk$*E)}93v2K@M-$=!t9tEKPtZ-~C=T8jeUP;XQN{%}9!#-F22{>V;T*pP)T}C*v7PQgRsnC( z3LQI-XkP}9aL7EHGy5f@{Up!uuX7&ZIcb4e7mpK=wwonK(Ky%7v(kN^AES}5@BL;U z8r^q#oKv1nNfU{wE{2?ByVr^^0k63U_5PQfSxG>6r24ZT;f0KG8H0B1p8s8T&l@$d zqh2x}z(fOHY2-cpGXO{({RgR_=Z_o!ch%lgYVK#F@Eg*7c43qxM>~8u%0cLSYk@xy zM@~brvO@7bqA7%uoMDBYo??2%a9TDv0GqNr83}?I1d^Bc@KD(I828EPa@{*w6>RkI z$(zp`JsQ=&@ZUQ*5%MXdjV7YZjpz?A9%<2*RPj2yLCtJG;TjxngRq6g>daK1>Ss4N z$=fNC-+T7)jLW-;#C$TD5Z^=5|I)X8eI;L9#FEmm2#Xo5;v6BBygPI4%;2yWM6uLC z%TFto_O3xtS4xSbwtt^}?&)wE&RwcQROG0&nt)*9(D)D`CnYNM=(vE&as;Oi+gT+@42^9<@(UTMJ+Z+y5W^pp4o== zjc{u`gp< z9N44tI(=6R{_RnpBWLs?vAlVid=0TQa=Sdm^_5sOUIe2|sE+khGDn=hxGXV&EbSen z${deSs6zgHo8&Ltyz0!T6^iC3NyZHx6w0DxDMmRbtx9i+KvKVmN%65dMe_EK zw@ZRNE6LO!Q!n)zQ4yspBaV;FWfNpmL+=Ew7jgrWLO8k668pa@L&M3`E*@LG7*Nt{@xbhlObBS#)S;P$BB=~KxII+bTD@q>PQ)bbVDk2J3r;%;XAoFJc_NjM zY}dX{Qqj;grQ~SyxVLF@BBArdUv(m?hmRQ|+k-8!C()gDaV}hg|8eJZ9~rsY{ml~k3dzwsUwoB&>yAc%4_=%wVOLf>qkZdpVbHMRdGSZ zJvVlp60z+X#eMWYX(?M>C76uB(_H90*<709nb-Or+?{nu0vv?S$97ZSI4nl+?+x|A z(K|BVU- zw>Q>ww#5YP6aO65Wn@(1DJj<5!z_t!Zp%K9xf^YW8kFr91|O+SEnrK*N2gkFR<4hj z>K3M8gxg!N?*~eV$&_kzPSEl!`6>BU?&J@Dj-|wtRNGz_1~bllUNmwiK|O3KVQ7Vr z$=T%QnoY`Pf+FQE;`^&cK1Wi+PM0j1n9WIY6{}9#2)p)Fvcxo&L{%*p_5Fkhb;J=$ z)kfUUj?1YW9x?ri{6rfR5#dHz88Q171xYgW6+DR=#y&R+`qz>=zaJ;69pg`ujMo_7 z4o_JzGS9cDixL;!sZ%KRJlS6UY0+Wb45C@6q9!Li>Uu~)en^_9mah{OBsDj7M7NW6pEa=F= z%+Y9-k2Y16-gsg_7j0&8)@N`wxZ~R{vt5>eiBbzlapXMS$V~P0EB2rywFCu?w|oq( zb71>`SfZ49!o$k!&J`d1pIP=IY<;QsLjDf{*s_Mg;JTanhHWPFbR#zTnUH)l660*o zdOihBW`^@A{mOGyzjD%5ZD+qSypdi^%`;ujwpM2w zE_$UR`p14;+{nTHSw!8gx}uolceL=#@zxkpO1w9K4<;d2@p^+Q_An8K;YWX5?Bi^F zAJ1&it~D%8sPlFT-+78**mR7$rF*LTb@aPw((z>Kg`(n!t2lEHhAdwGl8YJm5Z&+pnF`l+$C(jG$O&Q2Pr9s8u2l%3dS-BYny+wEEm&CUAiyc8h zN@N5FJ^y#dJE3E~bUQJ>x`(ut5OhZT_(tBFg&pP~F|L;3p@fE3+qCSh3+q~~X;B0D zfttGURNOsHf0vH~`+6qaBFkZ|pQ4(7uDVufMD0;K&xF~qI?GV6|3x34yZjmNt8gz4 z1A}=+28#*mY}CU3)#_JF}lV7B&NO zd$Ht8^{7+(kD#co@4eUU-`O!=!R!G{uK$G|1orww`D@h1Fn~Iz`K_Gdhe%FxMO`1- zn39+-(*8wSGZ#Ed9o2+aL-&7p^?VZ|C0`9EvB03p@XKyq6O5qZg;0-q&O4%qVwv>? zL2d$1|JHrofGf4}ji8w_yH-8vxGk5*I*DrK*IPST7N2ycGrT-j0>~GK$gi6X@swDU zCK9kM@d^IUl)$+iaqiP%q5Ku`l~O#mm5;xY6~-)y?pGfR4GFzZRG~=-K}IMN_h9X_ zShUBr`Ay*@`B@@=m3kZ5pA?$VUVX8?;yu>qQa73ILXxsHX?h26ub2}oyy*8Lcyfs6 zTaWY~DE{%`GbZz@85Kq@Ewh>z&-rasT6Z5mZO`Kj7SZ>(7r}ofq}sX7+tb9OH+Dru zg~MePB@aLX_|%$?2^xjN!7uFQc2Qf|{99!&E9gvS%WWodEaDMI#x;6lQB5DD+yGx+ zenF>OVVS_Sv3U~lirPU9$L~Xl>4eY|D_ZMZoVnM1w>pLXceCTxmXzzw-QQ13y-ioQ z%oab2axSiz;Yo~*DMlx-V2}hT%&Ex_AKE^s1Gx-RR8nf3{vBO79U{k#x+5XKTdwPn zK43p;FBhR$RF9wLPy}_%MI(v&_jpQXfyYXi%kb(kNf~x!bNZO@n&~;$@S1e(%Ir(J z%(?XL_TJ+fOTw}j1-wGK6#;%|Rliqq2uI~+2#tP1Fk_b5!3nRdJ_mnlBd4tJ(r^)1 zn|kf^(5+mQd#SUo+I735%X}koeay5X7#5a%-48fPCXTRtGqm*LCOpQRZDJhhsLrXp z&ii=1hH@%>FZ+^Th~a$$Ha`i=7n4UMu_vUAi+k`N5Bm#i@cBhg>Dzyt8y=xem1}q( zXj6qBDA=)H^>JOETD?p@D(>;${v~X``hN9%yXbkc2nY|p?&YBaRCti-V$;NQ_e1o4 zew7`wsoZn86YqDgCz6qJtz*cp5a=2-1*b|Wyi>h@*vm`HezHY-@&*yD_<|N{osxOS z5UJVy`qb!}ob3EglW%E!#%ojvYfe6N6L}N)kiUZUQwDU{O{$7%rbfATa^_WS-?@%> z_sz-67GJFG4VrN4J1x-7C6juw!sy(Mp$3Bkp}tz!w>rKs(p3+8SS)2@`~f%^f67;Y z3bYjiGT1+wE^)0_OPW^TZ1o&zQf3=$ejo7d5tQ#C=`@+o8Rm<6lx9BBJavu9$|W|Y zQ;>h^m6Wx|a%~~)Q4Hp9zTJqvA&HX0Ub9lHD@+!46Vk6Hj+}K(RKzZD!EM4titI^EkO^K9(%zVyPosTo0r=!wAS!&_kn(;LPb-bkM$ z8ZU~<^lwS>(X}M=EtI?2J#GOr-f=~)#AmarJ)5PomA{GQ)A;Jfq!eIZ8~Z!yAsDR= z)`J{Gy0^b}@sPFve0r7v_~bGFv4Qd`_ql>u@g@{!zT7`4gL+P2l?nTe*Lr6vHDOtC zgvEG_n8?&cS$B%oN>&UT4^nFufT7VL6c8Qwfms@6ue<~Pa;18edB*)6$ML47>j^uq zi}}V&D|i+i%I)M|86kSU@sUv%Jbf?}AkxB)?4I-VuqV%KtwwSjUHU9uLPeYB)Zqvk zH!>kFvAmdN>9Shkzxr4*{$q)-hSNO?{9adBH6l)i{zMPt>x}>wgcuUl2hnGB;HLbV z3s}1mU%Tsz-T%|f+_ZlyB;a8u(&Moj557yBsJR(v0N zn0Tq)+_fG&w&$ewivGaNlfd~Ztk~4|`Y-a>;w7PBt1A*ESojm)YgiabD2MRXLy~Hf zh!Tt0({3JLn3oiduQmuQjPR692fR~C9U*dZYP1>~xZ|W9;wzQPf7_Z4Rt<$?&|yz# z01=`zMiRgae9{)Cw@NxN=A_f~qHAW(1POo{==zeD%$8jr)M_G+!QvRM#gP z5eCzV48Veog@H({JciD51l(Z*s+e*rZW^caV;TJY-&cmi^1D-w9hFMUI+5=!f~toZ z=qRj;rlZ9ckrD$B;&9fcPY3rJTAw2@mheNH?h|=BEGia~@2`j-R|yqKKDe3DG5=Xj z2_9UU5G`-+#{*FL1<*g|t3=XfBY(X~hMluB8yIG0p9JHqi3cYax;DyhXiMI;86l33 z{%QFv`-Y^&(^1{wt)YN#??xP2d+8qpfMytgW;_b<$$F%Z+J;PSi>VVI$5to4pW};@ zA~nq%s0O{T4}``eVln48Xlb`l!alWLaK!34ifj~wbX03kaNV8|?Zs~{AoyHEWic0z+vNRB zh7?t87Gj9$L}9Jk@@G2VmnkT6(V3>J-^wJ7}A(M9@rFT%`TEZZur_G#kz6Z8tHH5}ORpaD&((xRl- zAH6bRi;tOWwK`j^ZV@^x&MtE#xgV_@PAp*bH%}#NLuBIcfCqW2fpez`zU4}D@Z%f>A@+e@4X+U!tTEisBlpQ#_&C8g#li%^iT-7}r9q2?JO-oJb`cwE8j{7v4G z^v+B%-DVvDDInk-Kw2b0rMZ9?w;t1Qb~Sk!ab2lFrRiVMemzu_zkVO)pq@epvoLCpO$5lHe_;qhASS9=jG|o3rGKA8y7Jw zOdcInV_*Xs1BofQw=+49CqtqW``oarD`CJfkxn6;WMwzGHY*|cM3Tn1R)oO?# z=wtw=RnJD{D*_BFPC&ky0}ORoY^zx0gL-?PK+qfv=#u@4-k?kX@{X9K|gAc;hf!`Z*+eij-j( z7m_yjh<`E{%yvS8Uls%KQ>qfMF-+R##ys@O#{uMnO9Bg z>H3n@57n0Is8h!!z-KS>C5WJ_arOIfe`X~-5$bdoSnvWBVS|x{vyAb$f&WM5($lS0 zehj&LQK&VI23Qo;b-@wLk0%wY5)%3&4~vY+Tt=^LbwEsxGvT?L!pNnM6C$i~4H|}B z{)s2T-Id_}Xc`zuxjoEpy87eF}<}K9lbc_|T z)yU4gehe`k0E>Az3gvIu?$<-#5rM6mfh#WZiNr-NKUEr}ejR?1QB8 z#BeZmSONK4dY6>feQ`O-mAj`BZN$aQY5Bhj5`O6rsinpYcqpqmd!(N1D|?U{hNW*U zT8KN6ujBtCHseFNW`y1~K5C8*HDT?5yPm8MPo`_m)$eM=al48j9+|DvHM-|c(XK<~ z&`@yMx>MAyTO?;}Xc|LEySjvhDi-nlv4>l2YfeR=7{kHe_oK2ty3-3NW z;mljJ7U6?H0B`aDyqV|3vk{cO{FBUUuglP@+t6n_DBpIoO=K^4MPe`VBrNQEj^v=F zwO+u21dsU-IY(h>Tvhg*ujCpGg}pA5Einwm?C|YxptAHh+wPFeV&`JP-0m^MPBdrB zBJ3MPAmFaDfr-?b4K+Jtp=QVK<^j9LXbwbUqvowfvy<&`4$lXgNc=?UdA{Gm;2*Br zn{ayFhbT46t|_FA@fX>KO)8?V;J+jR3*6KwG-(@In9$+veAEqE9oLCe>&i{4@xZf)w(-jebd~h!y-d2oE&lgOb1|2Ykq>=y4+n5Fvm5M?*6p2(77f-f&RjwV}72ArXT3k9Y+r zV&*~-Q^M{4n_d2o1X5angx>G$#848_seupb_ccWDB(d;Nd!Bq<58#=>2u>GpH3I^w z03GR!D|ZR^kNAJKs4~Edzyg8)Z??ui5-8D0=D)a=j(srK#hlT^#7fWGW(B&UMSQN@P+*&J_-0p zhcD3#&M!*%4j86@1VEW+=T*r2k9b@_0%|EE{r=5_a$aW}8F~dZ0IT^G%zv{(ZU2#A z0+1l|hPd}1@&5_nbmd)QeiFMbF+&n#LT2U>)b9=m(1m>$x2meiOWbDP61jAUP?(>z z-3&@*)^+Y@VL$zE3?b|{GUI&x$Ct==`lA{iJ~^T|AT^7R;t29PD$8mXvYYOH1b{%2 zvmqJqdqme}ca^YMo}LbV*(`)=kJrXnpy6$$k0mcSu1rR-|J{k$?+fpgt>K~t8?M}u zsl{Y)T*x;)A8LQzlCQtOD=jY{xS7z!!{@{i;ZsHMJ->JaGQNUw#QF2^;QSXs6)_0h zFZcr=K8Kb@9f!$)tsHE)rp<-GW%K>~;J{2DSLLy#tJg?F}>Bog##dCv|nzUIx z#bNxTiVtNiCCyjwXRWMRrQ(zo6&T=n%2)9~pEAsxe?X7_`&*d$Ni5aDEE*N~oT#>T zZyc(D*D#n!DzJj%%vugQtF4`)n4rG?O3{j=)$if#z-53<4`?;1$^ABKr;P-hink-G z09=(-J)hZs$Kwi5-J2k<^nZH+)Pi}<4&a0G;SPyiZ-{JxbK;X(-zzH0u_Nf$dS-!$ zjz(^x1?&QciG_Vunt~3WgOo=5DCdYG@O2!yhm;SBGy<@zEz`*GVu*A6wH;(rI7k7a zp0KM8(}DRoe#}nr&|PxDt~LSRMKwV~KujkoA9}05vm##_yARu4TziPgz*69ks|?yb zvatcP*Ky9Ym`MEbL6}JVXD>WXq2%lKu#K;4E{ESj@%}_n4Dmz@4<8kZKTJ4hmQLot z*mVbsJ9h~js;2)mkm|x&Kcia&Gi?ZwW|CQD=MnFEZ zjaiTc#v3IRfT^xf(cdz!28PMP*8$5LC<2ntUs(4z(r=)7^E)Vii?kMk3lPr>1p_}4 zatPl9Ix{eE6aXDFa8f-UJP8$6CvZki+`u*2jS!z?A4G2ccU9=2KlZbfNNmzMmqZVc zh;%9daER&5n2`jeDLv5uR@wqXMzKPHm2jYE5fGpb5X2X%$PN6kkYE!$0DjOI1HaOM z9wsP8@{93+jPZ(tdeG?Xk`c(Fk*cWYz-h`|81x@N~TlS2q1*SVok z289lpx6qZ!5uj+G1dzY=mS+|z2z+rgi481E3?-kB&<=Qt!v0|gEUOJQjlu7sE2*GD z4>cQ#-}Ij=vEWJe77A#9WuZD})R|)rNY|=QecWNQFw5zMZI7HVr{$wTlF+!*N!r&azL_s?r?PHjC@j zmpz?3d9ehkBqrc%@)> z=cXG)2Xk|b0HI)hAZ2sZIjU(c&UJ8VEsiX-0-se(Tt0__v1BAMIDBbV!-cOn~^QR&+Eyn(Z10>gSL|PWRt5ewCTtT!LhV|Sd3`BJjj07yH6vY}k&whYEvEdKg|+-b ziz-PS<6WPVz6~}v(;mFE#8O!W{#B;!S)c2_ZAnd_x%b{Z@>IhY?vZS+O*f`n!=abQ z#V}l7PU@1G+`N4>JKf&^M+`BfhY&A&Wb-^WP>q^zZ-^+iZzoeI5DjPWqV|JHI7huY zzbfZjWwC=cmT~XhuhLpjx;~I8IO2`UHH*|(3+`6r>_q#Q)CccP7a9+!9DwMF(W_SA z+%h$ax9OAks_E$bbh|Ugb30{$Mp&_5bF;I#&wcJ6hSY5r3V8ZsaH*#66?$|P(pZhk z25KrN4TQkX4+o+wG19U5ditzIc}4 zYolyI-wYz(>DYEOTO~40R{OfBc`$IV(0$pE1Z0$n3z<83R8mly>1{T*#{>V0R;gRX zsqcy9sqRhOPj-Z^dGHmc9Nw*^HiI^%b!`n8|Mr|Bh>Vu@-vs1nh8@Blo z8g@8m*lOBJB5!t`ICXoRt`(G2b)&g{&})Ju^O<1F=Wvj8<SBiPx@O&2_R;fyd- zE#Uwf+D_7f5wB#GO0UCWG(v!z5n_GwJ%%QjUMBY=mYW=0$H!9pp3^d8_pvBItg6)- zm?Lke+x*olH4eLw4vo8=Vh^;B#9tmSSeXqt!={Tpm_&0#Tr-ILmSd%^C6nJf|B{H) zkE|o%_j>1&rdhTnF5>o@+m@>Z9{*~Hd8K`g1AblKi9u_ zKS~~4MyE~4fnP2rM3OnYZ1L*v)mlClteM67wqCgPunNwUFydQZr_&^$!->(UNxCxv z&oR+7%VI#dGM>*v6h0?r;bIy;xGa2s{k+%b@-XgV->T)8>ELWtZtlg+gKgLO0KxneuGI2pM zu*qM(8wt2(^Sm^K_&w~uYaVY{&1C_&otXQjW^oRqG5^a3m>1l{t zJ?J@Fc%*dT+>#Arw?fkr)DWJ_#;!JXzx=wM`@p%QY^l2b*yGd%;Q0HF_h$*})Rq6pZx&!L*#UB>I4E;)zL#@kEOtKx=i@x52IS-9M%IzkQwDgto^(?}iY-x3>%zw3&ceUjti=HJPfXgaz+#HF z-%5vf>=rOZydUGypPS<>Su?9wgK5z+4sm&D>*eon*8D%VL3hNQS3&~lF(*JD4$@ES zf#FGN3R=HKTuJs`2Pf|YyBqZ2ZUn6%@V^$_{y*%!byQYc8!t+{2#C_HNP{%eB_Z9? z-Jx`MgCZ>;-67K5Afcp`bPGre(kXf8!o9!m+;h$t=l^@p81AtbYt49SK6B1z&gb{i z%ze)g{OS3OPMd0dQvD*XQNY(dQ$IXWSg4fsvJBHXql#LEgX__Itg)N&9tjcJ0a(ud z@*eGbIW4K{qeJQz?>}F;gbcvCZi4Y2*RV)!}h%nmXonfN@}XLHDH;Vw|~ebX*M;l`78JmL$*5B=qdQHGM(PN zjiTIs5PC53Q3j*c&0zEjFZXfNkbv)TeIc@DW0fREW|HS{-RQ=mpO6XnBi~!ogHC;i z_$;T}T~V26){GYN-i$1h`kbyHV>;?RgQzn3#UCGf{EjYFLJZf?0?`#yZ5KaF2GQ2! z$C@pUC{EP*?F}IoY1Oqf(S9te4vd=2Zoov}6bO}2+>>}KvpR;JI-AnME$_0P73&*0 zfWoJKgZ}uzuW_RWUEa5mhS@7gsREnf+cDl#ktG6agpuWrzuS6s-+fg4^*uxO0}Be@ za-{vng3Z=~D<+=LrCxKQPp03h?Lr<`*BVc<@|2c{!b0KvnUfV5Op+^%}0aG z%h1ObPj5@JgeDpDR3y&|SCXAt#P`%Jh3(pO1m&K4RGJ?+&o-MqPv`eYGCoKZLysy| zu5^`+<+?Eb^nI|+TX8#KD7*E_YInI%^w-jmyz@Im*DjNv!riw8wpL~&BX0y!0_-n?}BABgI_h68U zufhjWkLNm3xrVyzWxOK)Fn`|Hi&$_aD>+}2=Om@#&2shP??J1?*K+j|mU3RV&P1Gx z>ldf{PcFePa>|25{LR5)0=40JT|2xy?NU$r$`^|_o!LL=>mseY`lwZ+yj5GY2wa%V zjW-*{){3;phK@VZo{bxAGJ3A0$6ihQLZxi~CGx4HhtxXL554o3hg_mMZZTuA`K~-D49_|=IxI!BER!FkGQlzSv zsU>t#}Z#+`>tP-j9;^xXiB2jTtv(s--a<)3Z1Xslol#%FC3f4yd(&2t}h#l z{nhbnIw1Wq><>-ex1`90;`!*?GX2@eY?Jjp;YYIFE%sYao3l%^RU}W9nnlLwtTGC8 zEQNuZGgTe;ypXvRb#h-)#P{(?D(DjB@agJ zj=E5qq0@S&cnbfCBwxh_%ZYy`osKrW{wFxj%g^`F_OlIk2Tx}{S}GQ4pCPWfR=W>b z@_oLAwfSK;&zOFS@)61_;T2u39wa2mwAjc#+$CBJmXG(DZ&&2Q(cUab_N$c@%Bo^MxTtTBy#3oU}wGT6i$?HO5= zM~v!xLbb0+4;d^|FA)D6t!5A}p1cwk zh1xA(cKo6i-HivOs-jeV5AmeYk|sEcl7Y$Q$wjGJB7?&Ozsr?Cml1b;VfekR=Bl0W zwedhLhC<&XMX5LS1^4-0@pOOBkN4WY;K)jeq-X7jJpM7C8Q=UPr(9z!YS3?VMkR4` zV~2hGv6x}ZrTh0fQRkn()nsBB$y-hom@OyEf(K8w^TpeW1Zf8Mig$VERy{U)GifdC z>*>aQq|hd8oo}_a_Kbf2ei{BF@k(F7=eOJMvO2{^lRT;THw#QU(e&yJ;_}6(_X$LM zelG?@oWz#?BCaoz>7UNC?^#C_*jVK-mLa&tO`m-S?QT%`Tq~}R``sQpy`oGPbPF^- ze4o%*)gnTgSZb?bt$BL%Vz%{Fbej>)ufl9>FGv~_%!&z=Pu_1GxD19@ zWjWo9YF*^m;aIBIGx;2fJiKr@`hJX~lk}d1HK_hdzh@DTMORQ3t?mAwXR5FSk443N zq4&SD`>*7pH&3XJC5E^Q^(Q`YO_g)r>L>`6UPlF=Bo*kk93Kg}ZlOe`KYP>qdN%!V z)%`J5_oo_?@g=-dk&N+7lWvYGavMcdlJJ{sLA53%TU*@FoaaZoW zx)Q#Lds5v%CNB1tms$7M z7<7ZtcNRKr5>@(YhzeKsdAS&3ogSatIwuCppkNb#TQgXcUpx1T398NHjPdsQsjs9! z_bKw<@Ca|KUx`@ovhF0ZH_rvq$qsyAA%6JtEW({C!0khx)WCA@Gi^4zhd#>H4{EnZ zh8;8U7AzEy!q>Bf&K|3LRZzfpTov)T?qi}S+YQWYQ%&HwbR)@gy4r8G8S}-v%wq~? ztuzkS^ujmG77v6Y<@XNOp4^gssI8upcjT9S#~TgKyvmmukP3rQEwpX35cAZfV;GI!k)X!8cpF&ic3=5)K3P>D zANW|WI#QZY6UsE1M3%=C>lNZ=@v~+YD^2NP=^I7WE_+0Kh#u#UXHAH&?FL){E)GCcWmNnpBR~aEqoHxL)^%OTA zo+~6NtX4d27;;$xGacFw!>E;ym}}!2$PYrOxRg|xoY5c8$YS?=vf93pS<-HGMU~E4 zzW@22j@#{!N&M1)fph^Mn%Q^vtALfJvE(D#_^dc;QP+5vt#_Q^=aRhy0viWZ#(j}G zQw3)AP2*XfP7?|`>@y0<+`+CpixSn{-uaA0!E&48iRF4(bRDfq6RXeduLg{cdxjn! zE6rF^$#r7?^3(Cn-T=R470T}S7R$L)reJR~>M_~H{dhX{sQay2ai>q_r61Lo=8n^i zRwAvxzIlvsU>;rh@qN#48#3_=g-jZpc*et4Pm%&Kl%CaB{S7%K5Q)4RbYp)#^P5G@ ze$KRT^nf6ZcD!tBZtG{mSV*H$H(@u%n%fMU`FQ=K2?s292{#L2m$_r5WWSoe@6#}+ zuB%Yh<&G3aWz(whe44lr;js)Rq2n_B=a1SH&v|V z#5Bd8SaBk^;g2XVwsXXY8z?fy`Tp_6Kgn#;MSVL<$jzLoksw5I{#vW7Z;_-(3`zx^t= zpeH)v*k}T?xbZX-UG3|4?qZ3zs8XN*)IYy6^wK~Yz^is<@CkcK@wJx_KWbt9on~ps zFk8(ANBrAD+p+gG9esx#N)Jfnhhvgxjm0UJBh{%2$_3mtiVYTMUVh_6rDA%~JHJNx zRAsWJsOdmNA04dh)s)V@yW*#K?u&Zley3MasY8jOpy-i&t23NxP0i-2KFvs8x_z3j z!@WBiYd~Aj#NNY_EpDC7;WhK}gsdF>Pzc-fyc(pxEUnl8(BI(%)qGvax)d4GA;;Ao z@ofu=o`j^HU2o%G)VjG8o4wCR>aSt5Jueo%avIQoQ7Ccxr303 zwN^E)kK!0U#(f-2?d1cG^8$u7Z^{ogLyiXF)07APH57p_Cq^?>Ciu(>#ou8vFs}Gi zSZ#}JjT%$AY)!qPpF;QP#jLFG`H^ACS?W!>!oZT#&=n9HO2KtEhB@l@@& zdJsStMG>Kmn=`QHTr`>9uvQxhrjIYS;3vm859&F~YEyW!yl2M{-#`A-#7}uIhNQrw z#nrk+Wl&uM_D)=AFt3W5L78}HiAtQ&sqTqITGN|)MG0lv!f3u}^0}7^c_le`-uz ziq7@%s84$xapi)m3`#Qd&Ss!OfnJE`BgG=O{Oz{k2id3KjT9j zXX2!=7)i2d^QRxvRU4u%n%%Qr_Q>(f+6p9zd)QxIg2MnEGbrFLuH7Gzbp)b6ad}}4 zx_Lv!T8UI?O2XCCBgCPcQ!jmmw*_6vb+&QG(ThtRvoBWVi)N~t2Tx>l z>-9G<^edxB!%P+XzuE@LH%BSI11^DOv)40BYTBZ6N}GL7y^}+FuM8E6L)28t_$j~V zIOv3`0*55UwTldHyLM#4G=-&T7_rcf#dT$iKF=k{RO!3~vCZkG18OzK z@xZO>lMwSCx#i`H+;jOgSlMEXK4Mg(iOuzUDig{!T5|MxB~NOcu)2mvKz{wz%Z1P% zwF9_VxfkBgqS_ihv!@|1^vzDxRn9 z>X4HxNoggn^-r)#5|jQwtGvB`U#RnaGLiauj0Co%#PTX^b0ba??%mHLO9qbe>QC+Q0hOl%5P@N>x_dVC+_XW<8x-@!MEpK^w=3 zw+DhrLOZky)valhyD2t0CN_*rLoxcpm4|L*CN^f=Wtm)xKO+oKYi#wZ-e#>lmO$al zp7+(QPQt(8d55p4gTss;)o^xvTZI<+i1hQ@R*xsi~PRx7`VmBKilo^v?%)(9cP!9u9`KrT5@#a1U|RpM_h zB@skqg)Nx)5{qSk3WKVX>QjTPQRKhI#lIw9h62Ak=^X7m#TH1AFq2fzp27* z{MuS5YJQtjj&HjtlbMo5X&lg54Kh2{ED#RSeBktA z&FQh18J!-lU)YbVAqI`74jVGv4z9;J;lLo+_@%9LRIkWKPWtVv%dFWdG$W<_Hy+Hq zSco|J!0T&Ikhg@9lCjdJ;-GS`u1xOq4U+|XnFf2)K-qI}bZr``&U7C3w&h!2sa(nD z)v3%ICL!gVRGMhb%HbIIXVNOcS6j?e#Hm4Kv@27MXt)~c2A;)CT8W?c;*2%gUp|wb zmHJHOfIP0dyqeS+a`KYqR0+RTnaV4n)0()}FgK;Xvlt7laMy%@?u@B0H1)>Ore!2Bo6|wFIUyy5-1tomg|s<29iG-w zD8Vgf-xpi9?nBz1Z!rTBhf7|E(;k-Ur38Ksw`mKPtjANTCXYO9>J$qZj_3{+=BSJV zD#w@Mnt2aR)dSpXoy=kBQ9gos^e+9D^42=2C``8M&Ai=V4N2c_mxF2c*khw~ar7cE z4o3qhMI<{PR1QUFH8}Yae(EH$Y2TaKu{!LvQxjgCt`r^Ut95eQo8RCO>xq+1ZD2!X zmkwt}2(FaUB@QKDMGK6Ao#IUuxh=9t8COz@qv@BRQl)GsYN3&!Dv+=&SQW8^c2FT` z1P>IdrIEM9$J1;|O)^|NSe!eIe2?p;ES!iG*PRbOODPlUI8IhBZeQYDxP0-$Ed^X^ zTj&){7SxYA5ZwG>n--R8f=g;9fR&{^^$4?jt*;LRRa{n==(LBx4IpSu1kSTj*SL%w zh46~S?MAZG@dz^T^|q%=xPldT`wPpyGsxnVDu~T7OtH|pm7r-1#eBSKbzNZ#bqV|U zd-4sO0k$($MLA56Ijs#A+7*S%Hm5!+k9__p01t|%RgexPiJzpbB1y}<)eJO{@cvgla2UzeFpP11tDvLLTMv1 zq%2@d{~&Yx9hGgACOJ{K=7uxrELTL9O8HI?Zep9;D>DKP6W#%#gip&QMqlN^B3!PH zl~RPrsbImCzF8n)z{J*!9NYrHwO|4{iZBm6t9u^%@i~o#Y6UZC#@*42)_)c3@&iG*zpj*wk%y4h|mDdXA!|=9f$@QI zl`#{a*A?c&Kc7>u>8V1>D?=EPVf`maB9ilAO%p8NDy@rRP%yFc%_%_s@e^LfF0R-{ z9V};xuxyi6L)_V#qgfxS7a5(8*b?+JeT6O9H?p{{UkHLANxAz5AGI~U(oxTu+VYyA ztJ_!@P1Pn+dR_cNjb6RF;+&<^FpPx35Baov^unbJ9~>LwJ_k9Ovxn=P7X>`5DH9#Y zNGTO)vYzx;yQ!`$R9azT#}1Kf&a3To*xckM!e(BP1E+L0B&gJmWn9f?rr+a7s9|C%sP$l=~aCb`Hn40gi21SpPoeTs{Gm}eOEj! z=J*of5sSg$qw8KD=_?8>jLRs?M-n9*_xYaBdKU&#g=d*Ernn?6yyyV~!-3J{R>|X` zNDr?kbk_B)DjWHBuDJ{Kaf(ZsG}CmK%yLl(!{FfD)XpR)SAuXu+jV7d zm04dQ!kUcszUsHJ(VR7dca^{QL85Hg&(u72-im$flxLPP_ZjESUNQ{n6MoPOfAN(8 zjFJe8{>ZeNsg$gvlZHI{rAk-bIu~t?k7r?t`3Vnaw=s&Z7wd0jR;Dk^zh=$!>B*uG z8SsgAv83?Z5o~ z3_~amO|2FwQBReI5axSu+*M!ncSEXf8aPxN{;JTRKkWDHF;-}JJZ@!F64%G)57R2? z<>WBEf|!y7qQy-ASpM<#%D3q$IWBw-Yt|W#$?9woWa@Wt;W2-RJTO6k>V28C_%t2M znMxwcv@MoV`&2B|mx;b8Lay{uVHAVN_LlduE398_f+k(6lhfo^x2?kMAl07$_SI8s8X9{$YS=-5PbJT)UrF^HQmT6T)@!}l zI^%~_#v}8YJc1a=4Uq*QRtcSNu#9>&O?RylZ-3(8^RaQG!gRNT(CJz2N0jYiI@O7X zk~_oVnX^9aJ`~9p{b0mM-<72_)>UR zzkKsak4MhG6?mU7EsB`YKcKvCyY80?}XT^&7tYQ@)F+ zlXvSXmXylzN|g{T-SrAld{&i+XW~-W2U z22uteKFZDG*T)(6i5V!uDnqElpQDj5b<1W-_s6}Bn%t#tjy~CNrw^U69xuwG{8iI) zVlA;|$&sDH^g@oW*0**iZRCf=ZQY_O=2pXj!4x_}j0hXWFO?PyTS}tV3>6{thGH|a z5BIda2@ODAOJmo!>+ugrYz}@GPM5b|8^b)n@}+%#rwIQgQ3bKvW6Kpr{#ZwSHXWjs z?bj5;Uh$9YX-cT6Dsz^9c?a%Kp32U9Y#O5-PbKz@JB>Hef>JMCPcvR$_|4`n?C{D{*=BngdxUs=i9Y` zH4d+S{fk%eb)NXhyA#MZcy5%a#;KHwG!yeOnd%_=%O$pyRB2jaBVaR>68{DW2aAkq zvr>tbXx4;YnuZ>15n&wo1U2KoRE9tP@%b7eHtS85<=}(fI~TozZV?b$>l7QSV~iZy zoQfNoPib{L*ryan;i6ocQuV|_yO!WbgX`QY7)f&4#~FML25ANs<4jomtIlaty`+6GRA=)~*y;UwN`dc%u zB~051>WaFjo^>R-Rpq~!oJ+1q2zy2^T%ozn2^ZV}aU26F_z#km#lJRHN3E1d&hf0o zh9FVgyK1$0{_Pp+YsP2F$7XXG@^QD3FX&I|O(jxzyguGvX29puZtUIF-nryd`BmYg z2BxPlmyra$M?Aug>hWE&C5bg>!geTp*tC*W(V-_14aF8~ytMd#aujj8#t020#j;+H zfb{0x*o=^bXV3i1+;=oI<{~BS;%;VC`3O+ZPB1R7Fqgv&7Y=xMYS{6l(E=pV(*%j zU5C4<{SJ-hq3R^QT8gtOU7x{ExtFv&dUcvcxTZfpIpjwpulmi{sf;QaG}}vP#Y&me zJLt`PwHUtDY>u0)Zja4hYA({LhQi7 zVtSkEtK{N3DSdr0F;i0^qoD>%M)pGn{0ht}5i?ntFHSKO0r9kFcH+E?^nG-dLb-1q z*PK!1i^dJegz}!x=neUsb%PiKmDe+U(+D}aX5*+nmuTXr*d$nl5f_3b65k8R&hU}wGoTu36CyYD#rvC>)u1QmqARG`oG5@}wZW4R5$$Y`|}DKXWYmnpO^tFSD*t zf^O_h*b7Uz8(Q(AkE&VrwkM8;iQi;;s|l(l=~tIkceT?}=&9$5gl8{!Bsae6W=nW4 z_7Zsb&~R96KNOJ+d>x(_ZHjO8qcKWGhxtn@HRbFt90JvZ$}~IEgkLjZuoq9pPZ&IdzLa61_0uR^vNU;qpyD{-<>cAPSDw!R*=htTxQ+C>=H+B;hl<_Pihb;;VI86^N zIx5wNRIV)td9R(}JDs2eHCtfPQTJ^@%UH6ISMb$#uFEEO!n|%?sNB> zJIu#7^xf~)5}~cyj@iC+oF$w6W*lT{U8Mn*h-Tp`M;7m*aeq*}wh%Y|=DpBd>4`D^ z$}08<{J6%hP(yK5xb($wH>VIKlg*!;HK$+mU1J;-{fT|G@~Lgs)R>1!zeLB$L># zT#mb*R)z3zBhEW)at08uET-q2SOX>MN%S3W0T2rb^5i=3Gi-8p0lC9f3&^9J=KO{jo=z{&y&Crf<#XHRh+MGFlq1xo3Pz})w@vLNs!Tl!3;q--S zwr`S$;)ztuy94?)tGVT@f}pGK9g-NYCv(2lgqqZca0cUIp;`LQ`^(^63%4ZriJfaU zJ5Z@UOCj0MeDEg%RHzm>7NLAZ3a@fQ3iT@UO z*BLLMh$nbXJ-T$8Nspc_YoF~2e(bllh-NmS$NNs5GrR2Qe7UX{_3#68#8V?{lRQO} zxnBMLj+3KTEIdp0awmSKCu^Tk7?&w%wBn8rlm}`eWk{sI_#Di0-gwb;ap!y&#!4%z zhKmO`*s06J&E^fQ8D<8Cs$4!a=(pf=*XdyP5<(~U&P2+E)6ZibE)TT@^>znm;SiBu zZUlk5oqlUrq74-*6QjO{G+Ya@is6 z1k-oiIy$-yg^#TLklO`kIx3^ zs(8mW_$Spc+=WZ>UpIev&BqL_8dw&y*AzfX{_7J_{{#_c)=AhB}CB~^(BAK7t-s25~d417TSE!HgQU?hz8yt7>q0}#==URIRAeYSCL?fN*&*CHT z5~ofXWF#nQXI^CKcO)$pKHma8Nbf?ahmy+BNV`G1CB@m)xFiIrn8t|u*`kD}(e^cE-Z z5JiRq;-iZ9?jM6x#VoA-1ed6Yy}iIFUBvmaHwaLAD?f^R{|}imFdn+_PU^A{8A!s> z?lM0=p}6Y|#7N%@%%Yr9db6@gweHUM6c9kRB!eJCO0v_p%Xor^5~xrVA-$O^(Z#68 zO%LU_3Y)?{TclGuf!p6R_lbtQK-}q9+_&^3nGG4!BqII01Ux60x5xlp%JXMFi-pln z1X)%SCNnxs8Vhsc-3)t`J-uX@uVGZx@vs-C!Nu|2Y?v^TM*IeS3x+U|UFpwoN0${y zf^wj)Cux)C?AfiE?t4B#{=4BFgC|meA;r>QiMr=d0L^A^t;qODIqNAH(P`vM^4tv9 z-g@Mlj42gS_)!MMdy@>8#)Xt$(F3Wxkc93b1^APZi9}PI55-s4fA)QT!WWm{wA1h5 z^TeEX>4Z_Em1^_#aOXn?oGJqg9w!tE|FU2>2-V2#xiQeE7OPDMDSQrm z8-+7Ge3x5D0dOX0v4u|+K#rFrQqLXL9tcYET4j-pyXq8MS)elb*AVPq459~-4Z5Kf z5L6RnP{arf-yTL}rZQXkdbm9>NE{^G-D6(Y#tB zv2kl<^!Ck;|0WQN-K^VETdlJPqR*G@)50Kv%E*`K-56}R?3+iW3;|pM9D)+%k_>o5 z^Z$Bdx2x`0D@eK7zC&-xO{#sD+>L2p>k=h_r~aReXb*%k=lW4|_Ly@Aru!6%uDTJoQ)Z70~(2%luxI0$KK{^D^v z%Ep5PiURMZwei~7DDU4a;SwDCcc3Y>&e4&7$e%<#FLsI2EmTD1W6Q2ARw%kPBmKvU z1nz3H`^W#A6I~(Eo<7MtD`)uM_OSLfwjVdjWa2oRD%-bU-@o$$h8gW=>GzdJeuoZn z+gGiZOXiQ1OMc>=As&P@Pj-&dz*ikl8CWsuAa<_@3_#)bXYhV?LovmN(jv%Z8c}vU zX?Na|65g#@o2VyfJN~ct_xCqJ*mbUDiwcAL_BR_74j~Hlyg7+dWBo&5l-z@yfAt%J z>aIO7Vm=0Z$1L-malj#kX_6SWxGL;oL zojXhb6m^7*K6A5pmJ*i|Y*8OjPMDIc(y?#da_u0AH^$)&z?Ub7aFbB(Ap|^K?``@M zpGAoijFxaWC{VZ%T213uLY|9}NSfL|29yk=o(G1jiA7N7{mCZDfBo~k7E7&z7xEh@ zJP6CiYV|%Iu{LD*Z8**~;{FwX`6P=*#U0E@5*vsqZNo+3zo}AAq6Lm$74^JGEh=|A zur`X4jRzgTjJ$>+==F|F_JbcR+CdPTj>)m9UHJ<@(1tzS`N$8)G2%1%@4vaQ!dYM= zPTte2_a;4PPVw*-;KD#@wQ#Vj6yXXK-dT!2Em^sGA=aEql7UbAZ~x9j7_ei6r~wVH z9SkRKc1hGE?+cn+79E%(SI`3&P}MuaAB9KR`iAIxhh zpSj(sv7G?*vl0)-6NRkq?q~haK}v2l>q`uX=KuBIRRCDj_-RrPVEu1Cg5u7=M{gP$ z-sPhI-`f95@c%bbyVmZkhJ(Y1k`@zD{h!qT{{b0{yh5>_Z-|jeWZq#v`Xct1!51h? z*FxVSsmX0~h3$_GJ;u zKOcs9`gJ1)88DO5fOLJ#cGx}%pFaUX;<^j8x@m=;S$NAlYtE!(O8w33fAhZ{P{x{MW2=F@{Qcs94 z&i3}uRK7m`OEiS5GJg-6EtXPYHD|Aq(@}QMmBzWJP^%9OK;KtxJFHF^*u#E=-;l9< z;ZMoDAKc~j$6yY_+zNw{gQX0AazHG*N5i#>dSP+}-;X?}nY#0d)D?$0oN#5exA?~ob=1Xy9c6f10?$E$?9 zH)J8ZzRe{`JYXJy!)$lkgwqt$4g8NXJi~>>NF~^48}vM^%vhO0p$eRk-`7Ne3Kfgw z4Zc8FcGw|O_C4=&7IcXsNL~YH3Y!gE_SBUj%fX-|Lcw^z+!Z+lsj!_Rpch6Wz?vix z8g{2fWrV2Z)L+P7++II__&dn|2wE!xC<(+gVN(n=$3O0dDEp~!iD35x5E?0P=FOJf z4fki(IyymP2uVPP$otF7quvm*L#7V4T(1VowCr0eJ6m_dihmcuKCl4+#@Wvq$7_a6v2EJQ|Dj(ul*b9d2u}%`bZ7%i7&$=?o zbi2T3WC|wyB}&7!Ya*erdyCfr46YD{QQ51oCy4(3@feg>2#?{=qGX|Q!hm3oZ<^^n0@UH|IDWi0=(Eucz|;XgJ-}%VUJH zVBNGZB!!rq7i~|zv$^M10s27zz%PAC%SyLU8ezzrP1y0W*_7hSWUsTAVH{vU+UKzmZea{%;~io9nG{`y*(QA_uDbiQPJb#}yUkgTxp69T<24Gu5xg-l7pmRq6afdqQ~3QGs!{H=s4#GZI+{ z-}})lxzZh(iy?UKA4b6PWcOOjJP`TbR5`LIKnHd#57p#DJ?w_iR^xoQg9eDh&9&&qwH8;RK3$Ehv2Qsa&;>l5lJ_;C+LZ z4uOz9OSCUsR)_FjU!LVFr1Q|lih;aMU1}loeo9jz(BuOn3 zM`psvQEH26_?0@)zaPMiIPfYvZ+?eGkV?!Vw&zN|{|ch{>D|Z=N?V24+X0>(7#akq zgo}2Z&m`Bu@64?1LXQA$%E|^eaJDcKP>@A|`EzP=!*!_?6 z?sW8Kkl}#D;vZHR4^N+P$vt>a5BURV3n6xTLmZPusb{%kMvC#^pZ$WasIZq`E$wV48Po|blr?-LJ)lhRy;~*%^cqoN*jY%Y zqx^88PCYY|Zo}|yR!qzHIh!{@fBVG34*A_&4t9k1FqI_lw~8`6I7f!bK|n9Wc=Y$! z;5{_$sn&a4QUMlznRi7zy;|66;(c7mu)#JLEBZ{8E*6~9Kcn1~n(KWc@c88Y-CiFK z;S@Cl6DJ16M144BA&^Fpx^ZtGm5*|D|LIk*Mx)d6ZE*KHG#r3rd zIGu)2|KzhV{T9!W{hu9l6lf$io`)HvF+3k9I__|&gvpE%%oj3CQTVXl2Ui0YPC;Pq zZqwf`++LJr@;WN<02C&>|QS^D_*d00QPR&e~*V1;RreaOqM5 zFgvh&K6MUluNf7kkdH?czV-ln*`Vc zXh{I;;M^*L;g}h`U08cRa{UIdE2Q;Q4hL+soEMs;fnk6V4hIu}uqJ7rnnV}^5Fgqk4bmxFC_p+^@bYdeA)o+0 z_r#b8Ly};HBC>RH;IfYb)G!Ah#{(K@d0|oOPzoBpkpQ3DN(t8?3`j<3lI)jR;cI~^ z)%Tj{wn0EbQW;eTNz30q#I}>jRWPVZa(EuYWB!&o=PPK+) z@QNEz;tV1j6AAQ|1BW4h=T9O*4-Ht%P50md<;eB$|FfV8dF;s^60-UOgn>My_6n$h zC4bNwvY|a8Oc#PE4j0ZzZHL&dm?F@C=HNxh7S1k=piNu=KtoelDq|`efIy%QB3;@U zy&)t;QUfwMunQwAfF!=P*3$(^90J-C{LGW2EesZTIGFt*;r86%)hsDxIdB%(hJnMTv%E3xk2NX3x0NP9U*Vorg!+_SBh~fTp=Mbc9Ynwz8 zIk2OBu>8gOZ1tW2SnC}uqqmn2EB`G7W_de>sj&nPh!cP#61^Kx9{^=MsOWbwm-azO z-bAcT7!WLB;0-<;96LmK_8Y!C!b!l}&mAPW(5eDNX~{OlBv3X)$Sk3!NK$QNR6vAC z)o_Cx;HVA4;A2UA_yIyWLx7qyjByCnhrsrV&!084L6oyFy!8NW!6-q!FAZA_=AfP* zG?YlyQJs!ofl5~%C#-Jop#k-5oDc;vW=3F^CPYNUcf?CVYC|!9fZYj5fE_Haj1EGd zGw>`S68-Zbp>a4w0KNZvN&pUD+O2skhp z2sjJC2^AzlZ?WAUY#V?M&Zj($>Of0hpnCSD++|QbKd3}z?N_;L2txB#mwRBg1C;#_ z1|J&uL(~6l(VYNAK+ggwk3^BEAxSmW&@ht8Z+Of4+y-Nlo?G7F}psl;%1*mrml@OYGSbYlpGQ`!ZQdSifsI2%o;azO! zzy1{vp%1wq*`;UJ!_W}rkc~u(ybowl%6#&;q%1|7svj9$ECAe*x06 z)uVy!;H+}PF@x1-`*-ZHMr$HQ2YGyzVQucH~>r}3S{NX zWe^r#CS-FP6CLx{0qwW&aB#P`mw1+a|DyEmq@!JNitF7@`B|vDva`gi(u?W0Un1df z;)&s^s3`a9tRh8%gp;Q#{h#wtED?fTmA+UlNWqjoBL)Y@8s1k9qsuW*`C)Z?l+p92 z@w?zgm6PlD8`tlx8z-I_*Xh;O?A3cS?EFW`d)^zwW+-n(5U@3+mgW|3TpiN4b!odj z)))-59=lE(p{;l+BVb4S!!9W8k4Gd4SbPt{!{-)G>$-Y`ZVqnPxM)$OgzqZngfoZP zu*V3Vf5E`N+-=_bdY%#I4?9HB6&0byNUF@278ZGxaG6 z@^n+Ke5hr`7xn4pasS-BgbXP75${zcHCcc(=F8z^F;1@!pdbqP9#JvMcvt{xL=-hK z{&vNBIXuIBxXu8NDrLwEkCeU^5kU?;^MexB$Sy}|*x)7IMBP~v8+eEx@yG_J3<0$` zFNd}GjoATHR25K(_zI607%_Nj5z!Z-LIY@9n>||q-DiV$M4k_|1VNicFkI(P+D8j+ z2H@d>!bUI`D!8>(y$@&$1_fvK@1LH6=0~tWC7TJY{TM(zk+ithzz%g*R&~qOpdJup zI|aghzVJjIacs$uZkLwlK|JE3wiy6A0}=Ha=qzB%>Kn&D`lg2U6~wJm{d=s{M*P z-9K>X(^SRT6e=K7fi)%MPMm;EEi`qUcp%Q<6#qa4t2Z=c0dGMA_{#?$VXolwfixs* zR$V}IN9iM38hBK>RM1q-_F}Vv5NPUzo$K90U=!8w@taJb$43xLCdHGFKKR2V6u|Q; z-s=NoMewk%W(xmK7(AfV8WK^F!J`g9T1X9FlmSATwgnx`=^?JS{erk|32tdq08BXt zj)*};OWuM$wJTBzAgU)p32jOacvMCz&@bkDTIz}*|G+54KH2QZax$bkhFwIT=P?asw-gNCVHk48zM9 z8Q@{5xEBFE06bt?eqjThV<$m|$wklxSzg5t&_P}Ey#c>5(7UzgLodH{z|=2TZYWe# z6AkoPYsjetQC$j3)JVLBycTGH{E4EBAu>D?NhS>X*Z~GB`+QTSDG*u>C@9x_({gzN z1f?Zp;xr8hD1P)ZbOD%2MJ4EwtIo3J8PLrtc$Ux)!1vn+5BChj@d0u>VE83k0tK zB;TMa5!62~#1;@(#_I$5FQ zQ~*4D(TG6;zYlanawQ)sK5TpeuK|!P&HYK7UQc5E&(hZv~DM3=DJ0z6u6gG{pC8PzUyEnbb zcP^jz_ndRybH4t+{IBzEU3=A>V~)7TJ;s_FQVk05POBEnENIlP!4(z}1K`(>-~ms> z(qftk!D$IR=;Z&p(*F-!>2v?Sd%-j+DJ~NhZ5&@AZN1OU14K?*U~gCB3Jw~XgxYu@ zk850{Z3=AZ5zvfWm7(c8aG@YLZ(P8G6#eI*0rHL2SPhAP?c*u14`OB}pw}d;AUG-Q z{OGlR11@0Jn48|V1P2-m%j(d|o&qHw!ZF#0ROch`k1Xkln$H=6!7YRw3rdj7d0Q}j zHy(r=$Q5D6z9ojx2N+c1+bYi?ad5CkT&*1exm&l5zRw^eCqrI#e`8z}fSgVU+oT+t ze(RD32V%}9ay6g8m4dB8R**}P2R6%oebE9so&-ZSOKwiydJBG3$jxv3B?yK&F|hv{ zI=cZ`s(w>0Clm$L!Q2Zz@5Au;nZV$~qsy2fhcQp|u7(SEeh_K3vayUMFd@N2a3S+p@iF92q2mOr{_-fujoz{B=Cy^cv?;;B|Lde7 zk5XqY_7_9xKsLqZ!a{hK(Ceo^CC}XfZwoIQ zWv>8hP7da+`psfT6*z&r;C$Ig!V>L22Y(=Ap7j5t0KJ)?5xf~U`V$li4}fs;To+A+ z|Isa10Z4uh`;Q>OdPA`wpxhlClKcRkFS`FH+x?GVwOE4V99ST@QLerd0hW~s@`nB| z%0s}|?}I@plutCS(Sd^l+{waD=)&*LX5BzJdcjjF(9WB*KoJ)JhT!Y-s2E-V3&59F zU!FY)MV7mB$yJcYe*lCPcWti{I<^5ru27uqb>Rg+GOLbEo8JY)v{`lk8ygNG@5yDL z3fAp@=_4?A^=|i6%p?F4EjqikNWm-lP&mz#;50Vm@drkX=fE@ZAg67=SSa5m9_$C6UY zbG#G*iRX@}pHrVmp4(9JG@0E3ag^KBvYn>0$iMeuKLc-lIbvi4NgTQuvZsFzn;`&% zq@t4kuRs6m3W88bU-R2q|KGcy(|${kg!p*h|Hs4r0We)%MxrQ&nfUwnRDP;bqxv)b^ zpYn<4zmEneO$M-Qf_xZz`2R3i5-rGrMVO5LM|5@Pw z7D50}D!#AlrQHAHT>evL^#80FP^14{P@q!mETt&_KKFF=spHMR5-z=kxm~{ETyt-1 z&IDaoz(u2<$^Ho_lskq{<0UO~@9g|OSNg$)=l)+0x7pag-NvEY|6uZt6wVy;g**Yd z6A4}vIQJ2cVNd1fc+7w&x>h^WOSE%u z?6UlEExG}plZ#X0s=P>n+^PEINicY?AqK9f=g$5gpZ$BX(`L;%ddk0KxKSz})HdgBjMnl|62T37*N`X7du!JVfes<-_e) zz&B9%-G?#BklxN7at)?)e$4a0$SlU6@CM!Gy~W#i4%D8doa9_V9{vui-+h&#z6Y;Z zXuBNco$m!pMYZ;7S@t^uZyAPr|Ic~OiMtN>SlA{D-9 zuLv7b5@wsmZ6!1snymeW2P1%=1iy<8Owm%kWPzsGhO5?LHSt}D&O5JVCiBVP+yFe9 zmZTZOfxT6Udetz(>BlD=Ywaa?NLw~!s09AkwrHvd%9da1Sre8@cB%&q7a!aFF4Bd41`b0A1 zU&o&srwmm<1e9go~{p@d$oFf@0Oov&# z+xs4@%dg7~rIa6=ALHJa?GAx$5Nw)u6z=D4FP1C0y-8ERCh{7s^ z>6HLndA}5PePW=DvynPHid$&pW*0OkxamlEQ1zg|%|OWfNA`gDZZ zxyj%Tnt?|}w&N--diX=ADMtPA0NANdTa)S)+#U}&uk z6jwV(ijB5(k%*=J)o>edwaDU+8=;NIzr81WJ|?58@nM`bJw?0=5Ob!bkYTm*117Ei zbFu=Gx+5NZjz-(;N8NA)Fs$c>JbpZ!4b*`dZ6toh7vk*2kNP1IkLzMoehY!=RTbzF zhzZ75yFzQtkp7r*E8ky9!nFhvP-#f>oyid?T-f6g-@Ytaq!aY`B+}K1D8d^=t~Hi@ zGYQZyam1^wQIVHUaj%ZPL$edZFxKs<-|{DHqTAgENc6arN(Y*p7>3?Wn1w6_R;qjD zg+BP@v4kHdCT$!xq=>MmeO)_$HOUl8NY}~MQR6wN+rSm^pNln%-nN9}*#RtRKN~Z>N{1-@dozT2czZAK)O*Ag%Z) zol&qKF)-vKAlz(;O|daz`@sN&dL8){nt%sHp7GgN{52Gi@GD8be5yLo{mw)UGC3-6 z>pze02ge@Rwl2_J)Ih`*xS{4ZVN|8zP()<_)ZjmlUj;doQh#$VMBqM_1_1Mx6tSWd z%Z<2KP&9uF5cofj7Y6`yjjHxH=j9YB(Z#;{wCEi6RUB2gC^YR9#DyWeXpQ;mxXUJkl&g1yZ;)K#)q3ENw_WLh4 z`zb0@uXe635J6Pl8#*(O`b1Vz=F>0N(kVKVUH2x&np_-8JpUBbb}QQQa)r)7>PMU1XwXoj%AFBB#`YziMH)es4wcazQfm3Y~uS;lnR% zrqc6SqDw;>;mxWs{SIlmy^rHc`xk}pxDW0kPJ$@!n@?tFq6#;y0I7GEp)VJ;}8`Ga@ zp@#D$s)WIR$Sf%AL|v{%QAl3xOBzgVnhJ$&9UKWC08)8}2lal3 zdrh{=US3))MWop;S1%#9!`s5L3AjS{!0_CtObL3#kH)`v&6B2U>3g9%BFZm&G@Df3 z$3JsVo}9`(zudXH+>3PmZcL@G@?gz{*xyiXdR?RDQCo9P*rEa;)b8B&3vOEV-+Q)m zV!c67HRIhaS2RTR2E*u^6&e^*H9?j;)zf5Nt(KUUCwT-EBJyF#s&>dY?v~s# zyhWq1$wj!^Hv2ZzIkQ7-zEX8%QgjVsc*f%)}t}b9G8|=ati3 zwcQ6IqV&u^pR|_NdYzJ{wKJNx#?HwvVka)2yS+JxCB&o2i7MA6=7Cv!r zCzfp|M3-Eymk4^`^*$oxD#;UDVl_!HdAI}56OA>}Fx05r+#!RPe{%Q3l!0h=2lZcb zzDS*El8(K`9EB2Vm!X-biIJmfO}<=MH1Z&_P_0_zRV=@Hm|dU9WIi?*PouV z`}G2DH~Lk4vTY{m&l7o5o3a7C+;pn9E)mpmHnCCOo5sl5`*mJuq(PHaAbAr&=GT+r z|7B+snq+bOEyq$+G{55Qe9tB+W%V5?!qQ6lAH6oB^V)4m)mj52rr);3zY3n`H?tMJ z|7_?}7SWfLTUen_xWHa$OBXx7R~)~*efoO)0YEe}QNRn205&BPVN;*c#^9Hukuc|4 zrm&v6<7Gyb;wWRpa$|oAY$fmX%k2rq7l^Q|y(xKx?vHQSY4W6UiCS;Z!dAV+5u3ek zV$W;vCiS-wbh61fV~yAF7rLtd zE}3C+Oq2AEoZs$8Y`89?DC#p1Sm{IzsgYsEoC=$ zYHcAGY1>`?O;?_Feqn5`om4qo0uH<&9ru?PhOK{;eLAH{ndfsP89d3fN+*U4pFaTQK`IjieZPPzLc4S zYd=8qqPW00)M0l_7wfeNdQa}n(p0#Ox6x^oZn-7caBSNLi;}gx&Z}MGu+yHJ#TE+D zi8a_y8k@36p4=@D)rrM%8e-gZR4+|EF*xrqAlva6YY3gaQ;chR@yx!I24SURXY^QA zY*mWr(GR2za&qv%1CJo=-DDhBag^^#uC6w> z2LGO8WOw55U#*DM+TFTc(|1dPDwM6{d$}-Z4iA+){URBX@%`YeAGy#WeWb(QE5c z!MOQCYrqzV?L_Z%;GpK}V&fCj!SI@1$|& ztP!0kB=$Gxv{3mt#5YWJ*dXun@+Dz*J~6bOi~~*s)%|3^=-qBYml!i4MJq*32PNX9 zV?I}?(Zb=lVWtrR2ya%>{V=(5xQBEwty2C*HVCW;5R!4$uVy`5qy8Jfj06Nq99mywqU(;dA4IP1;SJ4mgD%ZQrw0zvdVpM|8Gyw z+w$-xn6a{y`hb*m7s)aRm9D7Y{XT+6nPuNwUVL`bdO0XF6ufMAI-vDp(11{sio$cJ z(X}!wQ%?fm9&-4y@i8cz?9zhqP(`)IV5UM{W}gV;^PzA>u4&-y>_v>im=6jxA*^U7 zAamp4F|XZrF1d?yE8&9F&5w2UDH%i%vjbmUap$G_K&FkNAC+Od)5Kd$noFZ?WkW&X zs(#6n0LfYtlkU1%+&Z_t==8#j8w8;1sWb*4T(76jQaDEMZ=bKWOYm=T_znfjxS2VU zIneP03nuyS+1|8uYrETtr&@fm8!QQK#Ql3?^Ef6g`;%%8g%(!F)q{8lZ^!d?{+i@9 zvup9scHK zeH%z_rtc^ISY~xc2SdtSZRG`{{z{hVW;Qx>re7#yT2+CwO8~sW&yu41tC&{Fyw=Pr z*ab)EPw^?4ic6=Hf3{t$$l;sPT;Fh&X(I7Tf(>8!YGno5 zBa^Qovgfm3E7~8iBt%zJz@PK6vph^xEWQczqK{*EI%lC>Ck9ovp%#{TSp9q`TTk-T zB6FGIt;lsGgi#ko17mXiYAbV$gz|ZXUvXOOOuk;l3`RQKJpeKA+6usZ=CAn|0N-*0 zs+1!9KW9iMQ%@1cAP#j9>ezYvCR#U>M}i6E&5=(1;x*rAF19H-rq*O@epjcsh-?b# z_-t7jut^?%Q|{mx3E-;YW$%qh50DQ+3oTIraKw+<$-G*N92r!H7swjM7ceZ6K~>f- z!9JshP#AYOCaV@9puJiZ!toUmcEne9KNxGSH*~2x9SD{nOLcAn-hzV@alW@YwFQM~ zh3KN6ns`#RtkWl*LY;qf{g+-k02uU?Ucx%;@~4@ElNIGV^}!=*@M>YqgUpIr&d)@& zUQ3%zXWJb)@%pbfK`8G?-G=9_zS|e9sZ%`YZZ6iW{DV26Jh1{{x{eM?16^IWxggXX z1$5Zm;G#Z}D9K>K$vD->Jn=1yj!&|5+NB(vKz+72o5mo(+~JC`gAWv%+ZN+=Ako0L zJGhVjyoLU0UT0;{Nn7TON0`nO_hNDF3gyLTaX?GnsdCs zezSC>E_L%u#0uhJ@xa&}?sk+Cu!X{et>puP4tQHz?kWjWaeJK(rAJ1JP#KEteTrgl zwRUV74YP5nd&2K=SYSZwYCLBWQ@v&qrjLEkniy?`II^t$vb zACO?0@VeYjk8*G@y~<`0XT99MJnz5y4n@n?TaF!^oX7QJjq}wj=hMG`V>#em|ImG% z#*_NJ^gG%?H%?sPUKyIg_lR>Og8g#Y?vmN31HB@)A~G^`DQ*N|8a7Eh*nym~7^=Ni z03D`(kTtHHLqz744mNfMPROZ>DB>$AJMBW;(s|>I&l!0zjpOrkZ{BYcrjw4$*B%bB z>7zoy7x?UB5j$b@XJzzZS)zzz0veY>B0jU=T+f7rGodK9EszQFP-ks}MB7ofI+ zT=Z*c(H5Hlx~Dcsq@t1F_Ui3Dl{d@@l*3V1CsDG*EW*FT8}v|AetBVf#l_$EdgjNC zqd3NIem?Jx_chYiY{8G-qC3NF@v1(lkUXmh(&o{6L$NF)RR1a^wr+uTrk8duW<-aoQECi=JMTOyoo9(uHdJ>2Q&JEq!5w3Y6l#>056|Y)wzC2`E!Q(dA;IB!`feZ#nQLtuZJb^;(mf5!bOTSJHpO{VTeQQyor#VZ zOS?hyrO`%oI{p}iPYrM3I< z+|qY1Q1CLOdV(G&{ocpmc7cw}SDe1+c;C8c-sSrw`Z6Xb*G(?Y06Br?O{j{?ywS*C zAS-$LOWUIDo)$-J3E+t0a{t_aB7Rj+T%X@bH+1g?!ygNXEN)9d7#famt;qgCuYi&9J)ZIL(zS|Ck=OzafDh z!1I?n@Z*y--RIjRlTZI1L6jPE_tL2q)7Uy4g(7919{e^Pc_?9O8N5vEbXI#3kQ_f+ zxD)Wq3eAStV>2mb+zONuxH3$wS4ld`)NEaT=GQ##wF){dQDx5tGBb&oM;~zIRr~3w zwT9c!@K-8^Z4lqM?QB2(oz^Ip(A(n!#M>;*0QAbm?y`;K%;_cO1WDOdIroJL%LbZl zm7QrB%?Y)l^qpB=kMS#S5fz9Y0td5yF z{QkZ6L=Iuf*q=+T1@(RdDKjdbepjfkN8%TVwfMEXC*I*o!7E+*KFnIXFmVH8{H)eXJ1=*!HH@#~z{%u_g33HfO75Q2FJJeB!qYw;`a5m5A5j8C-jogdFwd|0>V zg6Zkv^|Sjq>t5U!8^x<#%vXB5(r?jC-ekY~?Y7IS56EDb$dOIbOKPp+&~$!+)1DEqxHX6XNCE5Dd zT@TMuLByu<1+S(?~ae3Y2+{yQy>00riuh@{VEfwTwjnqeJxZ%tp zDyV0B+)oGCeZMH{-P$FLCK)Lyzbsykx)R>&zIBx8^npr}7`*ennqaIAvMp(vSGbD3 z&0_%6Z%oWF@4k8?DL2ibwx>G(bPnjXfCHcyU){w!kl$U&pLJ(GJ^W%SFYj5RNA=j zmb}EyOEj^6tUE}-e@p9Epx=|qr2DrfR*Ko-NpVJcs99AT`~XBokTzBqVyVd1xgfRJ z$VEJ4iLXQlNRe--Ctd`t$%(uidZ79H2eXqO1#G_nroCFK@K7Nz&b4tv?@N{EuG45O zk)na9f=7qRsg!lZMML#uTtAn?m6_vdLcxK96Z_jc1+(>;r+u?K>Kyc$iP2a22V2r5 zFKqWq?j{FC$`_BGD3mXT#>?*J8(>>S)t-k6)DU?L1oX_3iG&L9mlNC^l$59#Ug5BF zm)AWC_!tmE?9%=y{jkuigHnf50zT?C84<0YBOUS3X>xMfB`;oPoYT=XO@yl#20F1Gw0iLW|D zTzxsAqj}JkQ{FDW#V30@V6^{V!+7jIwx)B(|smd z@q(T&FRCFlV=vG5G*7hZN&&u0r}v=nv#+fLq;rZaO@OD-Q*L>JgJUh6e@j02MT!%O zx8)|L)!n`z+MIdhdrHEqlb@0)M&*RDJ9l(3ojQe^Ye%b!M-qur!}DLu537p~3u71G zgZDzh+3Z+Id82_L6KGH>dNM=`y4kI9FRWA%K#N|Vug1L?BWvqZk-Nq zyg?4raX1`CqrI|xH%@-D-6Rq5#q!ktt)RtF07w$3mHY{nK2eeFo=!d?Zpw5W!fqeAX(8U+P zcobDX-2AR8sn% zKFUNH$cZsOH8`KAj6ZiVUubX)6+I#AQF5ugdL+5zk5Ud>GKNw3u~pF6kbc$jV~K zeq;59VH#Qe)EhK7>c~fH4i)vg1PZi|3^i!2e&00V3TxoVF7Jmy49OCd5u^^v(dVO$ zY?|d)KvHo`5f-EG@ydagi;ZS8$7w&3+k32GI<|abEg{)XKmy&ku3j?1+ISi~+x4cv z%P4F~Pe1qYBzYI4%nls94?7t1nvmob-zycNq6#wk&!(wpRe$A(SC<{et50T}o8!D) zT)f&D!N@^AhQt%9>&QPI)Gd<1*aiX}6>{=RS*jlznOvHI1(2Umw{(XRfY9Rk)7S@CUC6sYt zceO3)Aqh|-V>H=qtaYaK^;(stR~Z2RL2~3!ur>Vs!%*rq-xcZcQSZDNzShG>MkD71 z?jvWRKrJ*U&t^phj;hKC3##qTX}w%*$)jCTeV&UeUF_>^JI0T%%+#0e$sS51AN(m; zx}WYY|E1f%x&g-Q0a2IT?oL-~pf_VukrJ*Y81zg@Gc_8Y>pQp0(MxxsdzE0N=ZXYf zDqzhvnB?acYC{IqUq%a0&7*x^luSHVO*7&8QXiGMF0OW!yTFHhlbb}(g=K0|FqVUe z&XAV&jq;o_ju+O7=V|yjpU1>!UeTUC&7L7G=<%%BsF!_uvHzplulk{E8zHMA4Kv}GY)*lEUaFGAeLywPH?RIVT744y6JHldYb(-h&UG96OXs93 z+4I4_Hp>mnVVF7;tbo5kq0|ygop1dbrZ>O5B#^xW$g45ztQ&DR*nhVuYg1mQw>Xer z9kYI7KGbDB*k!}?RqYwUrT&A;ZpkIhEuF#6!UiHe@)x%!H%4E4OO^L0q>!#S-O@@x zadjxmK25E6COUrI*w5^J3~`L2IgRF2Z^pe$^J<~$W$Lc&!7Gr?c?jtZWZnpnTLr27 zZ#kR?=mG76Hmgwju;_|D7#k;7>-iq|D$~-#!VEP5|4K}za-+*34$m+-o|bmr@|54F zE>rQ6wZxK1(LJs)TqRLkG;)n4KqDmYkxTzrcu>``n1MZIf<4f!>RFsax*7H_cKUIyJ=R`J^R+ zpaeUPivLYtxSZsr;{>*kSxv*At*k>rdFx5+INaR^6V@J`nbXlk4C_fT3cVSb0jZ}& zH*cL#=Lb92TpgaTr%x^#C>A_|vy;MAJUbX-k2z^gCd}i0ya`S3b9y_gZGf##4mXc3 zxRLh9WwPk)8tInDy1jw-;fTYU;fr{|bBNF%9n|xRNU?9l6V;wDta=~&T3l@>+T=Z} zVqoSCBC*nT#(7W?n5qvi*G#`2x7ml!eoJW$MVR;kO74aZUZDi!=&A7~Ct|9vvq{GV zywUqD_>%5aD?~bm>+r)wZnBeivV>{0gl&~~ciF%k-EG$)yCl4(`-Y2C2Fd5D5+h9y zmb6{z>mRMEP0GcYRdig~op;&+{^SEMJCj0+aJdM+e69QChqiG7;ymhL&-~zZZ^5+& zC-H(OD)nK^)oFV%dDa~bAnPn03=Q&d?*g8TtDjP1Ad#C4`mh;3DRAibnU1d2y!)e{8kd(s)T&- zGce4F5t*d-HQmS%$`>Bm*>GWA`;%Yv7r%<$CkoU%v!#0YEPhxMyYaz&u8scEUN_A@ z_Gh8rwXwOHdyT>g5nqGV_ZnyY@v8^#Z#ebjFt!I-xzjh51+$+o4t};b#hn|E{xzM@s%l-*Fd(*XQpW3k(cCf6*2bLZt4B_m1n`U zE?R|A^^aqUTP%5Ux;{N1J$%{NPZlpVSLmu(A2*{O8fFjMxOj|Kp^4NIRDJ+bw_*$I z3IhK!4+S473Im=qc5i*=C+MxR#T2t*#aWTgy)+4tExtIikZ*?%^lkRc9c{pZH(n%< zbQM+4)%h!d0iJ*5Xy_6|=T&ZaBb|>k3jmWkwwoBuETURB_~~e0pu}CVu8Yp|DG!Uc zfq1#7;|SvgRAoGbpEvlVFE$qLL`Ltb$wcDHRbVjsh6-6Rx{V4X@1_FS#sSJ}ZA7sx zoB`VU_kTV?iLeZmwMydgc@#QG3Kc%kANI5SEGU1&e)@y)(-d-EFuffpb%!&y23= zG?qY3A*=;%g)71De$0N8(~7Yh$xb^*38~yFSVnF;tof8ie#Pl&w`1u@WH}u!nGEa) zAAeYVQW~`b6j$d_l++CkQY{YtTkIlGrRUG8S+J6Lg`L*3%3I$QJ2mgvNc1J4IXL&E z2L=0251oJA*Btm}YoyemMzmpjwSgXytm#H1X{fK7Gp?4(wSI5j*HSFmp0S9_2#9?f zUHa9#IC;22+Du#x`OGsk;HQZnTxAmJ5V!j@Ic1*V6mImPtWZ~41jL70W;fHdFrJLJ zU$B_zoC>=wxMud#j-PG_fo)mv8K!Esq>Upg0IAk-oo~%(>3#Ha(;21BX@cTuEeK!i zM}s>_3xA9LvEmEXz(5=io0jU&2Ari<=vp3J7cJqy51TM6HmUj19_?c%KD<*QH$$6= zn{LCZm$El@gHajQ?~t~tim)Y>U|W4VJfDjX!^0AlDvkX7Tf;P<>~pi+_}R?W<=2UF zjoQ;F`kxRrw|X2vfA=Cj)kyI!FvxMn+t(o z4sSGSx0N#`q#G@pAXdR8Kd=6f8hKMy{7r-)(pUNLQfSQ%E7i2Bo3E!B7P4(o9eW+K zpJGd)%4XTFscntRA1l?ZI8)KLlP~R;2ltCf>mSKuTd;oh0{rF%;$0DIra0xND;?Fi4Q~V zJ$`11e$QTb#_{#8J7Tz)rl^44OpVF!V`UHXBo#z|l(VwiX+P4jOQ#ZB*nT&|Xahss zD>iP}=ZtIJ^K5q|-;(q(FU`%SImp!JY~!2Jq_3aQ*u9HPto&C@-feo0@QmCqyg?~` z9azEI=vf>9NMqnH@sMYCqIr(mFoaQ0QwoDmy#==#D%u5dknQq^-qLiyiZ@5y1veAH zaJs*4eZz$@WRmucX0gk+*xksr*P7bRYI&RVTQAaU*)Gn4I7<7Psa5jB?_mQ&4+lQ} z3aMBOlEQWWBw@9KmfFu+%n2S*itd}pI!{Wm8p<@)xG(U8CQ-X2SddFJ1*JYD30UU! zZmtUeUSixDKj|Ow=Hv6TrotCRvZsd{M??n>&mCzz_6P4tSnwt{^Ws1K8KZfqRimaO zV8C8=(wJRY(D%5`Ebtec@RjomDXBbno1yCn&=z28*E&E3aV-!dFktmFFdG_VqyZ`Qi314bxufG99v)6PqOrw~8F@hTu5e zDr&eefZL^5Mf(bDvQf6_FKhKXwi5Xand;a9JEWg-Et)^Gh~tPMMpoixXl~5GePWNTha3rb()^Y7U$^*Q5B{Sd z!7LR<0Qft}Rp(t8Kd?&TKAM!cl^Ke91qz!C1ME9G+LI~Piywd%*_#B?{IIH(MqYwle)$Ow58^cYD0+cLr znB=!e8cw(Q4$qTL$m>q#!xkJ{V@#tLc%|~>n$~>sxd!qdTtm>B?y0!zpG}PA$}>Bt z4QLl7t9cWXp{$K8mMZw641Dwpx)@3Jh8iPqeFv;iZA{2+_!dnX$I|$^RUnc7=fi-3h=n;uNT# zl#*gWbh?*Rs9_q&YZ0kUnoU@o4l8ULI(P2U$~xV)-k%+ZUv4r7s>Jm5(_aG1n>q~Ml4BN(oM2i6)ra#jf?KWAS~8ZCHz=CzBr zZ^qU3iV70L0ViDcTn zX89yvj8YiybraAK&L$`}|Gvl9m$1sSFORbhv94Y`zjiY^_u9w}ZTQqKPAtZ;A_AZS-pQz{hx4gK+)7Q+a!v^)N2yI#Hs*&@uKqhYa zxF)jkY(X{r6WKP3TTXvyL|AYF8NK9wNespD^1U}d0No0_H$^I23XN8T zemFij7oTM(zGB`all2VeZgu2JVdPE77*a?MSAVd@ujDKzx#bw6+T<}M=&y8G(`EOO z_rI)nEYm7~nRN>WLzNCq!Hw4XjEW!hL?&-P=sUj$KJzi2JjS_fS!HR{`J2QHKKa^w z-fveC^dDk`8r*YUdiQfya4L>j55AJh^R{sr3C@iGu6#@PyRYWS5{wBMMS6(nztf zD*{O|TOJK@V$t@zqOF`wh-)4!iD7Z(sRD)X6zaUx4lY7Im&*Ri9Jof}R*rbbBT^@; z2{rb2!pdIQRDn<%r%u4l6tBT)#FmX+m`>Z^B~QZavFt?UFQ`GxY9+`9la`ri&@PGz zL;0WN`Mc%iA>0w{2=9doGbm&!8#2TGsP6L3Niq9$7G%__TOXsDv*eudG<)ty`i_V- zP{409tDcH|5nIu6$XUYF650({<^W&E=5<>5{%#JejyH>uV{oC%B8qrMtuYJr=4LfV zp7ecAcq=Y@MS2ehNi$&+H#-=m&`Nk={xo`6Vrbim@G91(HQ)PZuD9;=t2Ni;3MBcK zY<(ot*1*5S#<4LpIyqeJy-Fh$d?}3a;!Q^_!Ikn_qd_APyP3-OVl3K;t6_?0*$MWb zvNt`^$4kThP!k>7>gKy=W{9HK{eT6%&4s6satu0U%wyZEF__2rZ~x@1Am_Y+%kG{Z zf!$zbnA=k)^nLCY52sZml`!|J?B-lhN7&7kZms?AXMcA3zHaG#DW1qQJ-RQzOOtq* zK(68b`xWH~N44A)Xi-RrJs;fB=tiWOZzDql7Q!$X^JIpTyy{KQef#GLamdlfc zsHg~*IMVEHPSz0a?;-XfKNRj%+`<(;Y$m`;l^Av8U46W3B=|{N9DI<4vrEPIn4NL= z#xV1u%U6q+gp%jGsb1Fy64Scak4SO+lAk7Hdy}X_0^E5<6^?3fUSUmWxYT9 zvY>JcxmVDc^F_s^W1ol3@F>bI*>Vb~B}^5bphBl5mW@E&Qr~jxeYC*rlb12nq~UIB zj*tE`0QN>GPKakh@R>RjPpnPti5ic_J%|bsKbJB0B^29jHoqleC&y;PRne^Fz>V8d zLpjEyB_k{~VB}7^<6qyl?{1QO>|G50bOm$X(kx9IaT@8uiU8~iYr1>a)cHzIG~4QI zdZgcU;k!e1%Nk*onUR1HWQlWR^F;`{~S z$tO_yT%@EoU}SMK_eaQsbNiQ4C=vf>inh%Mq$Y`pH_m5v1M-X*6=5I0?{c@9-I8SX zuE6LK-|%w(#xwNpd0;oIq%CYYA+7u%lu%T@LW6cH`}+p1rP$Yn=DO3Jsag6UfufVh zudwYGw{BIi>_53M=|biv3HmN{`72$EJgh~;aLQsU+5BuyUNqn$N-tnW0gdR1KxChi z;IwmZmwEWY^EZsNeMU~;_HKeINePRiL8NI8X*pypw7`Wv)O3z;7;*gCR9NUilzd*} zuhcNHqDiwm5Y_e-BBU)-KhK7ZOrs<5&oI_?e-Nb7_;yLX!ayK)>3nej3g$eV)EH8!JJ@IXf+q0;Vp`$vx!7l5e zuA-Zd6rK-HN>2xA#WK9-L2KK|CN!HH|E_1h@^BwqSwC~qPOqNMbtJMm9T4$X`t!zT z^z1Vpy`aq3a=qXT=^1~NQBU4Pu&HYuD>jJguhYvj+MYSo|i6h&@B_Qxn)l6#npuK)vJ-37&0duMq}#BBwQl_ol;}Q)5_HR z3~)kJY8&;OJ6C1G%eJd^+V$a&{34E$!bLh{>s$@YYq`Q9vQQ&0#RJ5_6sUV}=sD`k z4BL&UO5BjGuF9Ft-+FS3#QKds3asrug}70r3-fBvuo!%RIr z3S%v)t}s0Gq(hovrMd`o%*H>+4_Sn_Tco4A26IpMC-1YsOmUB*+}fFX0NrZKqv%qZ z#iVcwKV91#;v5v@`_WuaGPJ^fveNi&>DCs{)$N4mM8v^@>52Sa&wq9Ik*BU7zodi@ z?tEJ#zV7}h@d)7i(~DtA5EcFmK9C*wc2jUzsQ)_Qd*an~T_V_OhR3H0@72=q`XtVN zs(yZ?yjFd9?6np58O-60R#?LvJh-saA8^w z>Kw1<(12C09SDsVJTt2Hza;TDtTA z8Ur8xwlu#bJGwKS5(}AmC!{26is{F||lXgzC(VCIf46ceQHm{j2`#B zOGogOeQA59+as;}{bZk3g7#%Jt7h>y|IR0^D(PPQqjDr?GE`nG_`YdAK+H&S>uEKw z)?U{m^iW9gqg95Rdl*H8+$vvm#Rtn0sx}B}qR&*C{ry&Ql6)@guzA>Z<~lz1jo`G` zq5Gcn?;F(2zcDq=2l>5Tud(-&t`x!B8>Fz~pV&@bqZMgYVFItCr_4o5Km+nBQE-b` zfcb<}px1=1k38lh6E8ZZQ>RB(f^_fktgQ7j_Kx+>mpt-c#~W|*q~^S?Tsr^aZRSpKVb&8A5*_!Z$Y*7y z(J@xSREge|Tnfw2Kc`0Tdw@oFx{HarR9=__JTgOCvCooJbuuB69c?P&eEJ;>CzcKj z=c#UzCB)#Mzv08++xF%kCX^SHgHy26sdV%o*l(^7!TCTx{jHRZqZK9Vrt zc56Kr?VjP+hKJ#dK zRJdlPCh(+hC-_0}&+S2tA=9C`r}mw%MBj+7STIp>|9q`9^!_oaRPZtO=mYDOuq9^s zkZ6Hl<=;UMlIthmDh@f{8glqU$~St>odP+%pNX;GSGe3dAdQX!rfL))f zvl+?PzX!}@zM>})7n2Wv3@+yiCgRDkJD68k5E0K3vnHxNpz9YN8>$xCYR~}e#16Cf zPeJb~6aSkwH+Q&b)_L!R3x8t2q>OD?t2068xn%oTGp)X?7W7^PqdRw_1X~6UT?|M* z&R+Cx&Ne^w(eo!Xsgt!oP2HRO!6!KaeG0O#N&zb@;_h@x2Msds^)a_hWG&M{V zbaZ1446e){vr#LZm+1-mCHhOyJ~(v{TeG1m!BBc$HTzmis&Cjk)%4f?Oh1JpwyHQ) zmH8$r(VRhwZ_GVU$78I(0sg!pEl-7gxN#38&!D=MF`obOS>WVO%leK5rFS~nyhb|c z)YMX>16wkF#(}o{_}*C%>~KAI1>I7rgq$oN&%*y^PkTaw{rskXwYuH6G4fGq-=+4? zul#}r>~^!#khUiHYXU&FZaw^zDeXi~CD}@IX!h1&=0@TE%VxUCKDDBZC4%fQOuC-% z-iNv?q(mVbUR+yaFUmDMd2z~R(Dj$f&fvfT*fTC{PPux!g3Ba3?P8DnNwWs$qTLqKhtqrF%TQT$Gm~A(2c!T#>w)GE>INv% zY%y=*m*g2Ivl|NZZ7>1;rywR=oPbVvdpyPc92Nzevj?%J^aUAW5v%$Txmb_M%T#qq z$>|hAYcIPgL3%R4enr$Ir|w!QJcK1utdh4b*ppYR_+oEnFB{qBPq(o@P*(g z?1ml8D8(=k$ycYC`vyqMqU5>c>eaU;*u^5TMONWw>I^YnzlIYGkR3~rG=`I*#xBb{ zU$o!BO)2U!84u5qO&Oe(3*aG{krYzMNxSsPDbX7XtNnXq&uRe0kd4v0{@A8`FOu_Y z*_-+Gr3>EzRvYY}D-A}4{i=&3jqp0Fx!%+WyWjiMCiIL2a)-uSr769uLPc4m2lO1s z%`YW(18Bn;f;?F^^jB{r&L`TKC9W7AvQ^pTYCRwP{VN;W=+GC<>8ymd;N`7Effm%t z%4d`<{zrBBMZKf%jz@el^xi;icRLH&NC)uXSkVZz>Db{7W)G?arjM_#H%E2msa2%d zw?-{r?>MggoD6eOb<9j-U|n10R5cRM&hKn|MT|>-E`*7Ali#9h2?zty)-E!2b zDLrwiBl*3DnsapDd{a!Zd~P^a`9te2FHtxs!}2Lm%+L+=CxHF4Cy0B)H#87jp?24i zB-17V)8QhSLn`Gp>jhC$)3UhyP{z*VgYtCb?BB=@wGx3pPlYB};FrVCR4P>;HuijhcvR>Mk;A#7OQ!$!oG|pZwSpH_Ra!xl?CrfkcN#QdSL-9T`#BYAg9bR;|NX z7*XMQ0dUmPi1$IL(u@RV%^;3QZ%F&KmxInKt~L@j0o2>POF(D&SxmfRkAGlGpBYh? z<=pgCg{sJu=JmDcq%Kj*5L0v5@OJt*R@Wxnc zA%pwHtMCkkne%(1n4{5l?gKL7v%9MIsE2V@b@{w4@00ddRbq6*j$0SoRNNUa(=>2B zYsY}*V84~@u;b2gS;%^C_IG>jf8PA!m zg>BZ%?9*#o=NFuBcTg+#R5Nw2K)AJCZi!RQP)<4B(6K@~MZ^Ux4RB-9=n$F%@=1paX#ikNgFi)@S?C_(>9~#G@*2ER3;VBSN?F3 z^~2SE1p>Y?4cD6^Tj3*&Wyos}?h9Bx)5MzkF|%W)C~THzXpF8pby$Lc`KWUBS3yJ^ zG4NZnKlR9@s#lu5dBZ!&9%%x0;xcqZ?Rdkp4aKDOL}ym#j1f+)Q{WL^{=ufpvdrXR zoGPZY%Ga@#J#&gjJ6-<+!!W9+;D!s?OkC+8?f`C?|=ox(Uv zHdA|Tt|su1d~ezH(%7&1go%lg?krgcq6d@FO?Os`z)?h-ZSzs0XhwOcTN0F-MWHI`1Odtq<301nCp2uDDQ zA)mp}RX}&fCIMAN<%>i)kRmBUtE>s5!Te=qCX*;~SYxx!Ie3fZ9*xC?7R(RgQJZxF z`ogEf*B9+oYIQctLl(+EqfeO@3Z8{cdbrSd_&T$T-iymJOMd(QKoU0i6fzD$o-1FZ zfYB37og!-;qDCk{y!dQNj4IDgvZ3yi5Otns-FGKevm`k+9K?%xxCZCLj?w`QS$P!y za_NWCh3=4zlJ*Ra_BWIyx8kR16Bzy@_=W44edg;akq5+KX-04q4uNq1KjZp$CYE*` zg>ZTgXVW_5up!-Oy4f`6AMtNqODHutm*-tqHX&vZ7*5GBvo&6&5MDE8aSbWbIk#WN zE5<5^1a9t~2gHbS{PH#H3c&6mqyv?kA7dq(F7Zhf(BnRAFQn9F>L0AxOienLk-ZB3 z&1RxVlt=s2A6l2v6fl9SDU-rXNATn#lw3PN;L(Uiroubfnm{=jV_I^nR;~Rw= z*$Vgv5UATi#lO#{CzE$`oK2&%8Ano1mVIAz`&u#|X%RC(>Hi9ysS(2UpJCN$MA;yFpfBWJdcoV2Y0%M=VJPO7Ct-iq9 z%fAsTuqQ8<88=$p6O?cC(77ud3M$25#ZjgovU>ytR###$=R-Dj%+2Q z6CjCCj&{vdDmfM0j+ zp~h7vap{fjk(IL%G@mbeqcIj5OAx&YB-cROy7k}$pQBq@3*xDW_rce zVA`sd!)nWoJZxp$9J5)a#+=BNb={pLKgUp0^Vej0$?rCrqPMTtC z4}2vRIAV}w<~WDVGoP{Lsk~yijXIO3^6vE*XP&q~nJb(O9Kjw2Te({6FD!SQ6;C%M zClVo0O2bikW!)BsLOqY_@ysYmTk|FQz6+^`saFY-Ho56{ZUh0otKlR&072keO&u?O z70i<{ry2tXy?lGXeBAH#^h;xR@1*F4tHQ@itl9`HB7(shkjACxk!$;9(q#bR10Y5q zQBd(8ZhDfJBgtwL`+?=~cNUBuP(isRoDZz$xw0Mo&Uo~eGLHgK@=>Lxp5(E|Emn2o ztFioBZShMfJ_ECc=KSh)LiWq4U(NzSlZ_c|s~jEcM0;fgBYGKfd1q~1AKrQi_rL?% zqlV|qPU6f$5a3#AGhCYUYXFi0>@T9JEw4UmU2zXPV;;|iPv+~!&ni*hP3cs??b8ID zTgVK)!RQklK%%pW@bK2|X_VLnyB_Fz+_WLh)+QXq znw=6lB`sYxRF^o@wiNME@rV=P#EBA$?B?D~-w5v3qdrAqLIrl_5~Wm4J@5`^Bu(odAZ-tPgO}|Gw}z7R^?V7;Qb$$uDKL%rAfcU)nkVG2$0<`c z182I_Yg*DbN^{SgupZW>41@HKQ4~Y3wRZ2ltuF!rRa6ukUM*h{qYj?@LrKuWqk%(MGQP!eMK zxBejky;?ztCohZoiQVt5L6ag2TgK5C)hmg<>!(7Im;ER~Z8EvMs<^Acw}$nKRCj0% zQIxkPnw1%#>PR4Do6qT`>GI>~qhlh*evja}#!P3i=GU@lUE;WDywHXkq{xb>q*m12f{=T)Xp^4SO^z3zT#Vk z=IDk-FTk?JTWTLt*T8;CO-g${3hSUjT4W=h9;tb;ui*tmeXU2M^=#{reT))m4U z=ZMT#m0V?wQ5b`DZ2728o)-IhpvY{`zdj~(`pw6eGMZq3ZytD6_H^kR^Bd@gF1bTf z_QjTpAf(j)Nz_wg==NzaR#PCLTDAz^f7oyT($M~;q5aG01J3_1^zdJT&_kNHJVl<>SUB*Ek_R2yHaK4qdA;C7&!3Rv;3Hpmk@-_X!qtjEdGQC|^(^A&Xa7myR6aheH zF?txPK14+uKlAn~$e&{*0wk7gu<8q-2DM!^BkPDzo)z%qJabtNnh-uas{0-K=LdHui360c7(t;d zZ_F#R!AvoXlL>WC2l{fQc+YDx_@tcy`q3t8b--i=fiJU_dFk}$WGeLl9(KOX>HB&j|*tJSNeBW<~jwT>6jLs7udDB<44L zzn+YCM1zOMM$sYSI5of00Wy;W$3B*(+hdX|E}spdQIquP7KHcwKXIwKWQwL4)MrC| zvH`2~jz~2jtXk~O9Xot5Ch{%l7E%Eg+`}3XS25;U-jm7YpiVoc30C7`gcsa89pb{l zJtAombM*wbTqeZ>sPfTcJ+F1r^ZlAjkG8|}Iwio`gGWrhdB(=NT?~dg6qd*e8|_Ly|A?R zMf@@}E(or$<~eq?@ewk}&)Y4Ro3e2ns(&B6CuHT7soQ)1Us`|Y50$j9<980)zYh)s zP>pJe^p-7u4#G~>=Yv<-V>% z=4@H{9ymajh=y8&tVgFnOe<)<`AJn(S9&qh@Q%R%ZcU985anE}c*#Xq(R z`8ZYuaJBQ3b(}n1FWvE7<6R%T|LX z^Jl=EDb~Oay}cw$eW`trQv|wi#9PL6-_{ru!#3r{$M6x?j3dx3gSiAtCR0yj=pIt% zm6sLAyX6Re&X|=fW$F0UIz}qiCGmd_X%NBHD z?U(x-aWghcZ$2AV&s20pGt16-Q(G?@WvDOU+06jlS&)t1z-!cyVevCAYJ5!dx{ztL zodxJFb=Z^D&BY0%yzIN)+%CU#%_VE#59jHSU)FfKBazDPMzwWp>Rh({$@|zlAQeRt zf*@Z0o4!!3*%x=31z53*bJxBT)cbJ($6(zr^x5i4G8h>(tT%7ls~*xN9ughR_nYL* z+yY+KCnbMPL3$uM%i>^6B^@SQD~O;{GZ*QSaJQZ32c$Yp&MykBh5(xbfN}5EVIP00 za`gjTeO~Hi*x{OmyKafr2$0bLq5e)r)ciymW6b8g&b{=GDVCg4o~ zz14hty1ycq-s14IcgVXGQOC!#Zcry=AHBfmLwWyjzp$LDx|ZVXy&52J*)bJYkI?4+ zIV5!F5tN)yj9B|LJhjoKG}NM!@RHKd;s30ldy(eD@|+1e4x*RTc`o8Z#=F~FA1XR)J!Cq{&w>-5PNxBGS> z#Fg@6!tHV3$-Co0ErpoxF=qy!z5d8OjAwwW$cSBZ!tcd!=;vVL;=gO4j84t{enfZJ zI8$BTWKxnK9sH+rYOo$#exUgK#>1;W%hO!!Z6}OBI^TcFY%KFifitpKGZ0J`%Ea0# z7iAkOtindOUMz1Ux(Ei#A~x@y`e3ZY$Ru>-n& zRjLr86-s<~Ox#UlMdV`oI-xJN&D9Q!2X~&N;^E7v)8^YUGzj&ciA>7!R(gd{cI-(G zpdOS1V;%JD!4&xCzs*n%#{4sG6$dZl|9&|MO48M8Q>_AmHISG5wPY;@{CH>ozoj{jH?D z9f%6W7u>l8)Y6j$>x7x#qW?=lMJL^LqUdB2P|AOG-dMK(46pK!bqbTnYgJ zp~?j!;7(I>eLDfcnFrP~G7lAHWSAd1J6c%VnG+D)iu8?8RVGxY3S#Q!?O;yN@-MJ@ za@C@5@Zufkdx`gDL%#O$KdkFkdD$Co#k7RKGMR6b>~{ak2H6PF)fkmon=Id6|JJ<~ zj0;6D&3-_(Ml8X@q8c~BhQEeKN5amftxz(5CU}B$)ii*Iurm3^Jxr2+9Ba|J&IRxAwaZ=E8At0b; zJAR*0)VR3ssc83wC8zZ=J?c{$HU&~cpL&T4=`|PZw`IJ>|t-`-~#p# zzj4w63|t@I2HjvjX#%wszwt=*A+wC5vpKUM&t0CoHzY`znVH3$%`Ctg59I#t4*U|o zVFiUcfk7a5cXu9lJ|0JBOVB+L5fRW`UJx%YH_(FH#nS=$#Dm+xh4t4U{~70jxr?c@ zwG-6Z(SiAR+$T>RU7_MPZX8ea&+FGZ%{{D7XL4})dt1N;LC61q?(y6O{WCVuRqXgK z_@T9jxt-nvYkL4@z#I|+!op%F?f>}abjII$K057r@1F48-@E?y*T1`JyO=x6INAeq zLM2Y|^>^pr|NOh780dKGzk%WxoKNlofR-Q?1O0J37UQ@^j#ie~AyI_%m==2fK-M0#O+ll*DrDwo48z0=yv z8^7<0?=A9%d|-Y~KzQNb9}&#{z86xi-fXwHEJbkU+`m2w3H&K(2>%Tgj{JfCzk?4Qjzj|A0BIY4PFzA1*Lu%r)!2ev|!!v|AFP+Qo{d;N06A*F~|FLA? zzW+@qdi?y!o~5pTM~+#r&S|9E4=r z%=8bi0wbP-7@qxK_eriXJ1O1bpp*F@BQiVvp#2|cxU7&%;IAnyGW^&7Kn3v61nrLL z*U@IK)Eocauv9)tzvDBfdvFbq-* zuqBaKQ|VHTBq{X~ub*?$l<>F({tIhTS4lk1P)%xncUKxAhA_RQx0K_1+wG~i_3t$h z5OT8izBi#@@#v^oRzf$wzM?AP-uJ=$-xx*6KA+&U^y1|{o5N8@aW(t zhl0F6mvIAn&_}7wr8C4usi66_x~k_hl2Wxl#*=y82YHQcs~}0?w?jm#-ud@#nV(DP zoWB4syG>JTtbYzdMMSZrW&8)tNDUshahNSiT=S=7O|CDVy7C8!TxFA*ixF>+p-3Sb z6k+owEPVSfU;#fLTz(hfagM63S_AC#!vPxBpu+d>6<_`qMetrTZs?}@S&juk^P*Ng zvOlI8J-<*b{D>d=^tBo@&@3DA1=Uv}_itMAFC@9!!Rc2Wb1~(wF)EmRInf`KW2X9C z9kN=NWj677E_+8bN#Kto%Zw!)t>lrYiy=E6#Do}0`^RdovO4vcP+l#6@ADz{q*)9_ zu@dCp0P?>{^)4>jJ10|XVxujzX3cE-{<5XK@*mrmV!lt<@9Z3`lGprt9=gn+{AUvd zfF(uOSt^}}sN}fd9_jx{@juQd=*(IuO-#)^;QDh0sS7EiJTGEMQgX+q2H#gm{{grZ z%DLW>V0z#Hh*Ssg5A*tKCP8oJ`2T@V5s(xEhtN-8q+5S2|6fL8c5*Ifb8>b`zxD_8 znVHW)9B9t?M<-Jj{Q>x6&emr8yt)0#8;d`QmVh;7v>0e1#G(HOu@Vr5Iyu)9T}YvS zZ*{_EBdQ6{lTt@~`VyiP`QyujG~7s~a+TxoZ398OI0kgp>{3gq`#(_f_hD_mz)@Yy z{?D4F7%x!W)_>5UQ*w*g&5lL2s{|3-q15sz&^(QBZ>ZUNc8j6R9!b56MSJpMi#W^n zt7Xepn_NAeG8eD@LoCcq7cL9eZ1SIO)>-|XQ|S#2^Co@7YVCTaSG1Q#y^3PrilV$= zAP9_5loz%aV1*AJ)af_U?)i6l?okeZ@Ur*NG-f?zE)n*}Fpxfl!_TuHm;aR zLETiIU{`z7G@3hx-#mM0CG5vO>gnv)JYc`%D|fh$mmjV%%~{9uKR87{Gan#mx?Z}$ zApcN~KRxwOj?|gFM|DZiy1m~j7tjHMQAsa-AJPmX~I@uNMaHs8xAQY z?pFGA~dqe;?OR{-;vKfE86Hi zvBtZH_vzTR^L1tPjS#X)P~i?HhOGC|L^@dA!h5)D0<7=dP|p!he*#oi=9+rAI}J)) zU(PAp*&8=2_w3PPr5HO?@*CA(0O;fV-KSz~PH+5M<$pm2^sqwfnYH^sB4Po-7e?4qaTy_03T(N#zc1YZBl12Z+Qi42|6Fk8Jz5vxwH|UQvzS*B zJ>cO&KVdkXZvznd&RB!r=aYKvoHbqXM7Q%kJgf>iR?&#~!E_i=+JLKMO@0b-7pZ5z(tT&?xO8X_s?X5AqZF3+Oep# z!-#4JFRi%4eGc~;vKMRg|KR+D?-8D^;N~MKd3FK2vHnnp+V4b?wY%g6gB{625;r7a z8`@p(Okgm?Lm z-%ATR<&VD0CaDW6Mc;CeQ=GSSyLW#;a-C*m!gkk@%#^V+Rlws^lz-y@^dkob)zh;CHr-%UP@7=%0h*CVpxbDkG(q(H5`RXrUm~xj@%;8X^57dw5VDupxlC$t zup-jIKFe-X$uIJmeFYdKJz*-$$(0wx0@WyOW1Z zS4WP-ZBxZy^KiH1MAfDB!O2r`#oPd>wWIj>(-S&;Wr%W--_A9GU$h9`FXkcHHxCVb zL%mg9>$ybaIlIMSzO`e0eL-;ED)$F_u8q&Wy7R^^htj|SHenv>jpZNCbD6p1(&Ps= z$Zoo;PVFlx>DD`ibHzok?de4RED>)zr?=E~A@{}BS@IT(dg1m#C_;MXcb)-oPNTB` ztRTN+)wlHLK|r8Q%xYwgAS6G>ewX(``g;&{+{3v4F zER|leaUNE7eJ&WGo9iZSn*xfd3sa`kh#J{~BrXa!#x1Im+Z~&uoO6a|$KYTl zw!Q9bjyP^^E3eWnkrI?4K$|F7GI>AB>!`U#t~^e(vo zH~lo;xcvZUmg8Q6Jm~hE)zmA8$_Ck6qbVJPO-2l#B}tO61XDD|=yo$~JjHB&N0Wq~ zYDCstQWH~ym8Yk}iK_RV0&Z(??T4n_(oG{(`5UURXX!pqKV_>P6;&J9xNjyV5<%om zoT5^Euvt|NVRw=awm7QdqI_9bnxthz6tp!H_Al_=SeT$RP|ABV{lKlKpR9t^Hy>@V zao`BB6$J|7EE(P3G6fEGc*#>#T2|v|;5Rrf)p~5Z zRXJ1xoXsY=n(B_BLWfqtE!DcJNYMS!J@qRK1KN&2hY9I1G@q` z+JCs!VdF~Th(!(U&)$K_ zi>zcm&ojDKRHb!kk%uL@!nL^T6)*YDqct7oYb9xmZ?qi^QT_U8Ut(qQh)v5&WWV%p z5l(UioyY6A(3QZVHA@GIxjNnXGDZ0t8D(=`Kx(W0 zI(GZIi#alBOp{J#M5OVSK|>V0@nc$Ew04yo8>g*YqV-&b+wSdM$JM5_?M0JChI6|= za2JxkSiG+FEBm;Yskih!&14&PX&*mt=Q*>Fj+z+~zI@F7{WqwNH5c7ey+Qbb)^JIj zA$aBzrzIhY(O}7rn(;@wTyo}5y6z+?VFI#sqFa)v;#z>sCOS5%*tD#CZDZnO>xjKH zKDF$|K85c(X9$*soTVm@{6s@uzGdr~d;R{vK#&gR7JCUi$elQ#BQ13SEDYlkExj0vJb(%Fz-U)rgt;b^OJIc68vdIi*}^^?jEC0X)z`_X$hfn#ar_A+5Rweu$;jpmyR4SCgMnzHwZ z8BfF=Ga~737b(yz!}R`aIxykit@li1HgMkcB~yhjo++?O}b5 zveDOwuWU{>j@2mfAF1w1k;(>x_Ay^1_jzdPwFNT@-Yn+PWpR19rZlE)1Sa0tPQ4?9 zL;~g|?K1-~I5##=)b(*2bvp9PjklP@t(|1{lJ^D+Am?r9ADJvL^gY-B@oCcyY*nW}LHXL3#JE~4=sS@RL1>EugEjUN`V_pvv{hI( z$_CukU}VK5dw!V7-hzCmIz9en&2f4PMsyd?4Kh)2h}Tm{2K0+Ohx?l{)t%3EIQUW zP^TJ4^Zkj>$vjbUG-*$l&*^@yI{en}n1r9hUXe-aBIFNGH@; zW^x#%mgb`jor(-oK}b&rr;LsaQX=$KG3hPeYOfWjqE~rQ{ZRcnyTG|`sVlNF*}&=V zTt%g_ZiY(?4ZbY>@?o3bXK#?h?iLky1?jkP1@27e4ygPEmh{Npj=O5qRf8 z_R(KCSNHgk;C??Oz1Gd=t9YUY6%f-ugG~gGmB!Kb$$VjVI{2M#t9LMJ%Da>9z`mfMr@Dr)lQ#Y_IsquK4LEa+Vmpv>diCEx6 zH-en+cBkX=hq=Zr&WBPJjM^yC;t!j-^}X})*5e~T#v;GVqg?bH4i`K*O_vw4 zJJWHn=wP+^_<0-sY9)CaS%b2x!t+%;0vS29mDV$kf{0r=)U#=~#)7ui&_26A9`yAY z^Mqjh9RWQeXq2#-g#oC}VF618FpA|Md$ih(_1(;ctO*ymmeBEGMnBcuHM-9_- zF2qU=T&5MWIW9!(7`xR_Itka;!< zFQd@rYn_m=^uFHwpPRFa1XX=J{6$IqjtH7*!=Z{TL=A?o1osPk8U!&k*R_i|zK#D!%TY z6+)!z&gkd&nKSrF!55#zfo4Ul%;wRGIt=YK3x?yV4ZXtZlmUJHm9MhA8@VX{$E$g)8BUw&W7uVQcJtoPnj(4Q~rL)s`C5@JX-Q&3~BXiN=tE7oM#hnA-j2-rC>Ll7+eV>{CQe zgT-}5fqn6rPti+zw3p{y=ic&P3~~Z-W@xsxlcaf%Rn#fVr zE7V6K=e8}e?i9`N1fjorZ53?O=bx4Rk^ni!5}@gK3Gi;eQG1G4vo1$tjJB$B5brtP z@r3=|u3~{lu-)WP>ewQDq~1%}i$?H(@UUsmBucUgukMz>>BM2^o=R^j&16)SRIAFA zob(Wx>cM9w8fk1b0sX?|uG}54UCxBP5N*%?0ik{mW0f_=^opDP@KC&`(=iQZxqW3c z6+&J=H;h(UT@L7qDI0TrPo3c6x%@*^1lnNC)iY~PJEzGYBZ>##4E{9>u4hV zen6Pkx*WnY&6?R6CaS4*Fg}1W*it9U-cDV47c=6*hza)Up5LT#R!yMTyc8S`ZTL>X zU%yq#=!!m&^iomMxkZWSD%hPK5LlP!@10*o5`U(znW-d~nEAY^x%9l&xx0>HrxI0v zFfU*Y>q+>){?Y1g<{5P-5{i`6MZajn)rAiiC&Orj=WBHvB@7oBJl~ClZY4tIH>5Ks zsw%^w`Z*|=KFxA~x5-BG$8!)%3`#dC&9%M>Qpn*24;PzEy6B!MHv1ks{h`=iT4%sX z6;^dFWXd)`Kw0APyVBuu6F1L&!U}(P{%uZL~{-VDjY`IYjM#+!U2a#75^& zD!%mlxRvZP_nn4u2{KuG-~0p@FZfNO9$Wk8m2Rx&J`+sn%j2q@+w+S5sFp^{G^+A> zR#rJSg3E-?0=Jb-?q)a6!z3ZBb4j8!ORHFyuQk4v!JxWx>Iw*bV`|dG!&V4At|DI7 znpT$K{ssxk0K>VsG)ruR#!QO6u#FL*AGBQuHrZqgukOO#vXCVk$~Xw$+g|f_$V5N( zt@nc*q51S#KbKaA!+5`gblA4z&jYEMZBSEu&iBUQ>4>WcxtuAZwNH;>29iE^#|BFg zd9S?0WAgOxgOeVvy;^427o?qN0L=8GN{PNmXK?Ko11Nqh#a*)0wR9uwO=XvY@YvV) zMq!nz{0}@15u)ap7{_{TUf)HZpMi)f6oznH|H5J0QFla^#qALu%{HYRV^_dsF8ZYl zJ(PTwi5l)k+Y<$7T6cH)Y?moDLrrpeiu9@wK@V$tie&OM4KojEa%7#ye z60160cbve5*iz^3GD05ROBa+55;H5m=ztF0w<{$10tJQNIE#8N$Xz6Wob5TF~Xj05_u6eQGIoRpb_!m84O=92Jm5E1H>TtBK}iGv`KPlqr?{gsiBX*$fB z#-2Iyua|Vf$~% zGtw)&ArqMm`EbS%?g)?4Ttsx?T@YrADdwxq^CO=5{S)rG^C z#ng+O!ZB_h`@mRo@=2GsdsvmGYAc0w2x|Q9lA=MViDdG<8Q<%$byL6c#}iWxd77r1 z`xOq_nX>X~W4!CKeYWy6MejH9o2-2NOA_f#MX$4%=H}WszSrK%RJHKeY-$f5O46#0 z8hqis(T`oaRTc3zfreIzp+7IM%gZ+3=deq3X7-CT`oW$~PU=F1j1j*S^3yJ@L@=nu z`ekj2w}A^^6RQ4YCw!it(NC+K8e# zegW>$0$1ZLNB56ZAR`3i8L4%}Y@lDw-b5;mIi3tBix-{}KOjT{!gv45bz0$hDkg^e znHhN-f2q+zOC{646x)@3NZyw09<(MMPVns8@A^5dr74WA+ru!K9590tuG}5wWc2&2 z$1s&?0E+!#t`vmh`GLI-QjOYP_7Z&hWO4Bv#E58{ihFCByQPmcmTr*?rRF)h_l@WZ zt-=HSu!TNF2w&icDYv_zC}`^Yi_(!GSayb7dZ0GAOhNuH&&~)L@0W(QUh$IT7hQL~ zw?KkUDA6}?l z`H84|XtN|>6n9uJCFeHcy%)r7@}Wl4 zVk*+Q_N}O&*BC{P$%1F~{z?-lAo-`rv0)-bd+gD2z44nz>pYZS3fl=0_~O~KYP)Fl zC{q*L=<`hDv-%l4ThUkl&8*G|uFSHU@_4^_h4sz>LVyEF$t5S<&?tn3_$1uib?;H&>$Ka2c|yPAnu!P^2N;ai!u*6(}pN zOZ$t~c@OK74j$nKbCBzgPQrf(b>u?*9nsW_G1mbr(Bl%;KE=q;`>(Hchg&M*>7ro>Mm6>Fb zZjlZ&n=5g|X9!dTA(VA5k_}gH{|Mc~fAeS1_W&(r5WNQ5cNmmW3R`>RPNob>=Rd5= z*Wx#s>qO<#@^T~4Y96`o1YT_{J78iIO5hR;R&m-ePc^he*bpAPVyW?T*Xo3o!-v$sunmYd%VJc5L!Z1?wK*&A^|mH$P3QZ|kM| zSi@|R1Zmwi*s64*B`7FT0MjKgbji2GFJ}H~%35}YKU6EC+E5rzZH<_`wYP?ScEsm? z!{_tfH(2O3;lOcn&9em_u<@$eih>kpH0J3pxCo>_QADi0K0)L+#Ar*K41oG ztgT_~FU_XbB9W{>P=jS1mn6BSGa`RZ0p9%rI8VuZIx`tmij0N6)N<-$o-TDj zbzbF5|6=fiBxPgy5h^jyT)$0@hMZFly2zlxKa6%rXEI1Z>D7+8#y05{-RCdUWCHXX zKxKk9)Ce?$-A1zcB*ioet*rNHw44*IwR*hWtB4wz}Ttdq~tG|$baXr zjNKA9lZ1&TiUaCtMMKyR@E4UQp#m>d&_x1)WhQ*CFK=8c8SF9TCQj{@hJH>6`{1H? zl|fi`#Ig`8-`f0sy-Wf>)AF-q4ND(L42fR$`%cXi4po`Noj=P^w3mro*x-qvUBTd& zl>KsZE90R`W;@3xDviWTUd6Gd)FD3MNhFGA$0I$_4?Xh+`NB~|^2qPU?An_4ws3V0 z%L&7?IdS1ro-({2RK4mD8ZqDUetXPKGgXG`D`_=+{g!G?+~woDpF`b7Zh3j*SF#>P ze#$1&WZ)~Ca5kV&!m($wL{=}}HBpH_z*pD1prb9c`qitxYqWnrW4!GJoDk?y+irTt z*Bnj=uqMiH<%8rW>#?SzVZ6XmWBcmh<6r@dE$oAdrlo zhit!oyn;zs!1?M*qWD31XQMJYVQqUY1H20TJX6}T?%6ij(W5(D8>qC9?sYbeVhi)W zr`?#VoKA=yXiR_PYzi~E0fUA7f`>ch`rta_rdR(%wUCrL9>5)O%W|}?Iy(|$W=e3mK7%X@7BCM97eYU~P zv;Rtb+vc#Z?>w$Odx7~ z1=SDQ^s;@{us-@CQF0hGOUwQe1pbt#o}8WTK7)|rG)sb87n%?R+>bFD4^0{|KSUBv z)>=($0E~Z%iSnVRvY5Z72RTPWKw?*4yZH^!INT8R*Gw-?o48Qbljf_-e$f-W zzmZ~W2~ZP5%HEKa$W*3|n@N-JDqL*rCw0xIQHC-3mI=KOU1UO%nAa#2-Fs%rN zFJ^K=*Y19aZeUgrs&Q?#5Pd3zld86vlyp0%=HW|aZ&It|=^|9gGlOcMad_s9B|J}| zj6yqnVqDd>CFJY4KL7H;fK>@4vwK+m`1kU&Co++ng8G zPk(}KU1aF^)EcdT#;&fAA2Mw6$J8w~mrIj!Sh6`GtWC~#*)NC0jEWyTJPY4LZ$`Oh zOK3Y;`|L+L_wiNZ9jz01%UseTuIL;FKT!ju$b4>rXg3s8vK)nhKa?+*)qmlxhU_sS z%uzs-gq=ftn%9>aGw&PGh!1C25F!WpEWsY9p2H=?$2N2Jp?b1FV=QmO>}@EX9nQL# zZPa-0fjveO#d{nk`g2wJ=pJJI_#gQ7tN1tIm-d!=NL_2O-^k2cI|Qy!ugeCxXai0=_jUE>?9#CGx*qJB6;dDVTSmdf^h5%`0W)L zrlPS-uJ?L=nVFklYbu~>oj6V|@j zZWVijoo}bghCEL@no{*+c(3QE8jEuLi%qnfX2D8%zGFIT{D`qCqpsp@7%393-U0+z z6mw$c;b<|Yu|dEviR+PTq8u$e5)&?hjQ6it{xIqpcItJcnpAWHR9d>M499xeR`xN+ zophGRM-)t(IMVDidD4`P?@qKARqL_C7c;`@?TO@lzEBd)zhYyknc^Oi98sGL3BzOe z4rTmQ%RIK;qdQ(>lCv4{clDcw3&wuX%5qNM?79Ira68L!aNzNirY9+Vw@&`S zLJuj9C%W{_0QOlMY08_pJ3IkROta%skK4mx-()yU=p6{B6VcMQ6>Iw}*Y?i-P;W+Aw?v(0Dh%e^Kaft8sX^8JL=Q~%;ku7j5)3r>b zEBma`fIJlY!k%9ZA|qo)frMv1yLc(q@C+`KdCe zMK&4ETvhD)RGfoY$wmFji)4`>mI^+uQ@z`pp`UP_cbZMh25Ml~xQNk3b4sorf# z9DGHz1PMp&R`(EQXRO1*E-I zFzu*~{V@kEnz(u~1sanO_TvJN{;P)iJeN%L@OE#|p;1%4AT4t!xYV&9^bJTXDibJt zArM}X9Y!5wy!I29{b~=Ru0E0mgySn%%aYPOQ^tvFOFZ5LVH+#nRnn-~Tv&S?Tp$y= zf$vj?c0+7u;ct8|5<$RO>-!uIQTo9GWog}BE*;AeAUk|EQIfbxO=p-zUl6TI?(0r~ zg$7J@pth1=n`^{tgFr%+&cM9)tfYW~4P$>ZOcK+~$x+Ni&QKzBezoE0f_{uaNt!^m zhnG`dq_%k57~8c4t@u3Jk4YviBeuijDNqvzd>WlbNGJNpg=YMTj2X^#$F#6n5gkNx zDb4@b$UPP=-N94ysAc5E=Q{FnZKj$}gR)JYyJDO1K+NxgT^M}n$1yW=;) zP-&KOTmz9WEbv-RNpQbNBOub2xHgBKS@!}Gpx}J{FGj3C<=N9!56ydcwSm2f!RxXQ zX~=bC9T|B=>DAfM6gP~*OU5@V%0IPcNL+Y}P1-uYSc?zbA0~4U%J}dj z4U$7AJoM6HRs!71#Qj9b<}jx%*JN!|`2)53AZW+Q7e}T|ox*tF|PdMtZvflU{*2F1^Nz>#wzWZa%nJRhKdBsh%Ty9_`uVYNj{F zx=O;JP+|QGTdG@Wtq$g>R0YI;@Yeouy)75X&dYtMRz!N0oxIy%=x5v ziLQ%E3kQkC`ybxv1sj3En(`$Byy(1&*|2bY4Mei?zQd!Bd1=3re=@6%Irh<&-}9;( z=!V*>4R7dg7{oJi^jvP(iy72iqb@4XtV|u*UUq2Qi09LM3z%MGu8j@s*BR~M?0g+_ zqvVHa;Nau-J7LmNGYk7sp(%!mlM2~57 zFI@$;+7O{GbH%-JRuvxQHu9n1p!uc_dcyM91LhK#}lLQ%}W9c8Hb0-HZLA?Z!sBO zy&^on#pAsT@us=&h^_@SKuoy-#ML@1W$&SXOd1c-*m|3U@1-%krO-NM`j29|!u zHP%BK8FIjW(!y|$y27mhyoBA>s`voMAQz%|4r6%6n!+AkN6ktA_P6Ga!%x4W*}_@$ zV#O9qWy~j`iVhD|?|s$8UA#PtX2OdlUb2*pA%PEwx()6l`^cicbu34@wAc%D$fjZI z>n#*C{;X)#hcl4OyH?wWIvwdX>-NH>b9K9=g4fr}YrE+>IjfBrd8HL~dz)2o4m(@+ zYTr7}D^JHIg5=0kDtzxz`Y?`{1e0smKUNy2;Gqp#fPU;r_2ejye1s8%l1`Ec89tWJ z@x;w%7?(FCMCy4JPIpl?VRIy&NPf4O$mHT!!nN*V2O4xG9g=FcUcq5Q0JPPmvrn zY&~09ikJm|Qp6h1Kl8c|{os$O(XcI_A7gagpg&1DlJWv7k!m)%{)?xMh{V_59U66R z3^PJv2K|~_1#clhC6gh8hk2OHMl0W(APcW(qU*Gz^XaS!f%DKLc)Z)V@CBuAz}oYU@%( zPY2FmAdDGtXl%RelrtIbUfuFw$@ncaUxUq+HzNUx&m0VUBhY<&q;WGTAM-J_wAiZJ zT+rV3A>h{k6ejn&`m)cC-$rfGQyb%Xg`^wVUt=(RcgyGBOrl@Sx&m=N+Wa{Yt>UJV z>jzb>8(14U9UH5}OCd?IXicOFldDoTQC;WE!Suejfo-TZzwB4nS~E^Qux0@JrkJexnWy2HwOPYzV8u4`=CV?d+ViKn;>No zmuAg<)!|0ho|$pX;!<2@v(0U2xj~z z2Q=dR8Mqt5-U=jYBd?2k`q}ZK$G1Tj(vtA2jh^H$Hw35k&GsevMmP|;HyHBXA;)Q} z9BkwT2JW(Ez6dGKaF0cT?*geFHY==kOa5u%3~RyrV>_wA=&_EMn0H_FAY339`~lhLd`Ky@iwl(Oi&RloU7bU96?Io`sXHC4 zRQ>XopJaJ|lRTOJw*>Dyr1V}&w(~$T*5NG<27%1;1>?2z>IUmK5#O>N)gfrb^i8vU zaphCFE~1Ij&9X8{*sY4(ZOe>t!6wYT9-$zgVi1%RlP6OWLp|$uX}60pM9%~$QCOXr z2dRWUN)R(+GyW}&mFw8{TmG$(h59`IU2`WvIPi+57i3snAEtMG-A!n<;B za)0tUHgP{njM+ynt;is})Lb}JWP)j$J--W7_+3mR z9`Ef_t%XNBG=7G8|D8J6^)1`=(EFDU?pKCAa}nh9T2i2ClR{dOVbp6(A4}Js3 zD|V>~DpmFkU&5S+6u+wLredDAzv<8_y2U|W`{+0xN**KaX8wuQKzj6OJk_aP>?N^k zEe6P8HT$X@y~r@f|IRrvP8TQ#shJ97(Wv$`g`t4tbK;ThDDnan9=G~?Ws4O+c70~j z;~Y+;n-eExS5gqtge!7^A=Gg)Y9cx##;!p?_?6Y{4ZO3s+8%RTQZ#SyQ37f<66?CJJsSytBqximk|px*dwR#$b?G2{2Y|fM6p2Nfypi0n8kw=wNKK2I z1L5o+X_2-cNs8Res;*1OpB5%HABzy93#UoW3Kh@WeMuAUD&P!t>kX~*kan;E)PfRy@}~B(#Wsw=K-s|vo~FGkrfW5t4vx3$-Cj0 zi4z|eL|oUKR_o?NZc`tv)QR~n7nNCed%^1di=zHVbyNz~UCkxZ1Rzss#y`dRjWAcQ>RJbPBQ_x6 zJ#AH?Vqz!N1phwF43P(&41vItfvj%cC*P1|?V%C8T!BK90>5(AP7dQeK%IKJw!?H) zc$eyL&$Y!FhWSiQoW9ARHed$U#}pf4cAeapM%j+`@zpARk3;v@+V2cEnis6s{#VV| zEr7QBUmd12bJmilop7I+B>0W%XjUz#v@W{&YOVGExu#Z zCh5wh7oZC0bxr|Ff17 zC|dk~R4)1d_oNRrp{Bs&0jiIm1@QlN*AY>K*RVSALH5bRrX`P4%bDL17Es@bjh%_- zSso4bu3yR1`=-@i&%o=bI*RD7Jf$zrBdP@m8oNjd{!dMclpRo)IV6F}IR{BS_jm0_ra!?GOAFfyPVD!q148VN9q(L}hB6aT7-eQ>Os!0-Qa#Lk@4kbXX*%Gsx?=o=AD z6b@PV`E%kItN&(ue0&&xr#w#JfW}-Mi|Hp;TwTdrX1sKqQ7y$xatt@H|EZOBA@e05 zhLxb28ca9+H8tERh=Y%hPsF*C*?~j;Pk}Npdow3nEoUA@R5NE@U2wq3vjG$ckEe$G zU#6aj=^txtIVk|X>3_U&j{gGb-y2^Dxap*{usr!|H&If@Dkn$vN;yk>0qJ5t`)xMb{t^(Wp z1<7*|#W8Vu`jg<))stvE;qP=p;{SjcVo>D}yO}Jgu!}IylJY0yd=f!PEaHYp*2r|a zB9hU{DN?%o(tf?p1Qs(a*%Rv~Ulj!&*zQKSk))J;{;G7c*jgcf!qfVb?ONAPsdL-F+3BIy@b8HCmxaw=zl}Yi zlC{WV1Xxo3BchHe@0-KKMo$N9I{*Dyvn-ONe68G@Q!*5XsYf zB#e4?t7;S~^cf26I*#d!6phOwM(JA{4jeiw4**Ycy;IKkuC5Zw4eq@3=;_ioQFg+d zM@l}wo}nSNbxd^o|BX;H4k!JlX8{|GZqax2s~#1LVbd=n+u};}S*j&?4gK_1lCRJA z>lKUR-WUq?HP81q(!2^X-Zl~vMr|M?y1%%Pd^y|Mw^6)VQwVsM6;As&o5kR(LPA|Og=xUyEO@6%xDkEJWIh|Yi@IXFa zr|{-^vX@oEji$kgUY2ikdiL)79sbnKQ?E&)uRZd2~m~ZF5x5St&V8 zqhF;v`QEm{?p7OaYJ7kz3-Z=C3wduTOMQ&!@b>t=u)JTwQMnzaOwagWT+{-soz>cB zf6#*+DCD*ex3V@Nt~Ppacz$;v^c%io(e~$zDxx!VbdB0RNAvvb_antBdo=r=P620I z;!`xSfZd|NDiiBlr?st&$|&0Yd|UMQR3Y7&g!S;M#qu5rXLVG$jKpB1dAZ*qruLu# z8oB8BeJ?_exMNcF(p!Bl+v3t*-+9wbC2hMs!S3)lT?~^NdCp~{zipcck7Mv_{x|sb z$Wso5eDg2TiFZIUu)g23o~2>G!$qg7+Lj}GEPdI`mEy|If7LBIP>V})Qj#eQcBEy$ z))}{5T_D@eFVEXczP@pGYr~dRe079uZDh)Q z5nl@;ci4!pP8rKf-Vpogvxh)$v09{2yk+2SI^3-DZSqMh&+uB{n)!)D?Z)ZFi zNZQ1j1(MW3lWEet!VH}{^f1{=rkOw$OV7K`;@GgZipr&mS36|5M%prmmB3}kj=Adn zY8|=^jQjXau(OJ-g?8Q}J?f)!t&M}YUF~?Up3in=Fa$4jyB)o)-+EAiJhl! zQebO|&V6~7dn~WRKGjT5KG7I{$*DHx)W=zBaLSjV_Jj-oKi?)OAXtW8*~#dAW7GmRqhSggXxNleNAR+;0RA(Uj97T*7-cdd(XhjyJL^nxhvZF!}Y){ ztQKEJGPD$Z*Fw3QfvDO64LlJe4-qbyE-dv{h(8RQZOD>0<7)G8!E(@3%}3ubNJOAt_1p($G!v((uL+ z%r0^1^f@`AWkaYU7VXo=rWZd!g!OmjX+n z8FPLMcgx)@?}+E5DGP(hi@|ya+(ua3^T^&$p8v&Uj-R>X{PTB(7GN@C&GW?)0gGD2 z9uT*$+y95X_l~FfeILg;oj4SZQA*)tri{u+S*Ij~LsUXW%U+R{c?i+4$*!y`r_bC2u3_I2IY^L`4-lm<-l7gV+0qtvzQ ziTyQPC1&w~-SOdLKYpF#55JzC9!jiU_ey8$-8`jbZg2S{%I2ba-n)&m?a_HOft$JW zg88R0V^{4kZrz%-ju&Z^#rt$`K=sswZF`@~o1mS`G=~qTt*85oa%&44Y(36x+fa*I)ZU&g$;| zk9~l$-1k6gd&2tgWrn<{EOi=rS!T$YeJ%}CN^Dr+7EG#sAe$YkmHw&DWoj?MxxVn$ zXJ$Lq)e~oSW~GJ}O12WLQwHz73SS=KgPjyAsYZv-6VvrLnz$ktmO7_K&@;!Hi8 zP}XE~MJp+EdicA-#%hW_WZ|6+Nfv_+E7{n^Mh?F$eKD7tq`cl^o$0hv?f2%j)(4i5 zHo3|3MprEbDw^&Lwe>hERAh;#r>=}Wzs7F)PmTaLkGMmL|8_Mpm_(gLOuV|*E9PF_ zzAQu{F!VFsR)R<4{iQ7Z#;p00ZRU=GFFZFb#Xb#Kxb7IqlHG>`_y9Ggk;eiG>YH~b zuWS`r5~JVc=uWR^ShP%xs8fk`E176;vRN)%KIQ4xxbR}Mu`@kH>fjcqw~*;T;eq+9 zNz2*Bw!u!PH%{iYH?XZe`QxJ@f6Gt|F(R7#4bV*|hF`yX#`fa8cuAvVR1 z@IPSe?I>?O+?bzVmT4as=e1l^Nyg4UIdZv=fv@IsKe1$?*3Y^|dyfCCU6={j#aXJ# zqKzflak0%2Q*y%j+PTE5cJ2A!CBzLsF`T%0YEQLNoL5y?Z*A}ji^Dl-s+9fDYmaCi z+AN5)Rn}_0svHn17V}9r(P`_POeuT+9Lrz)K?nQ+F5iE`q~FF=IW0HuZUjNlac*5K zUL#1FEhe2ne?-stE@yYtYiI9}arz6z%{ZD(CXZkmlOo(UYcF8a8Xuz1Mhsa?;yj{XL98sqk6$|!dg z9AlLBj{Kd5m_p<@pW(m!wnqA+E8F&mw?+;*rtwjqyM_1V}HyxA$UlaWrMgYQeCL)3= z{06&L=em-n!uSE}8SPHHP=lUF=crJ`_}Q=MXr&c!#fBbM!jq9}DkUw#-bVPws<``NS& zDITLQcH>5)9Y#*M5|3zzYNAZ(zi%~@dj)aB5w8Cc6rl+{KE{VK?yac`zB3BBCd3~0 z?)p2^i*a9j%CEAjr)#sj3TWMvy(p8S%V0AsD%AkDCasP=2sv@}Oj@>j`j+xR0bTve zav?!X*WZ<8o4+|Sd#1Je27lP_Np-pIi0pXNkLa zZk&G`Vzwmy9mT&Yli`;4{M@28aPwTn5G2vA)VcJ(-Nk62EE1#&{9EFMOgj}98TX+y zXkSX1g^mdv{42lwgdg&X-CU~_L^TzU(cgI!$ayqPG`^z$HOBC^H&}LV>fkQ=6OaJ# z^V2_tM0+&c+~#-CY?t2_G7+lI{)?(~ddAvUY}8{|d2g%5*pZU| zepB({Zi*u046%t-aHJ}zn>;W2z~9%D;o(&hGGzwZ(K}DLJ{Xs%YGoQFINbX?;dmB_ zPq_b#Pa087lCe*gQHMO-Wf-Vc#}QM?@GWR;^KrG!d)-}&(` zSLgSxchjOnkbLIOrB)9ABCl+q7p>M+=llEmtUvmI?Vv_d46hTf^~DQEWrfoJnb7>J zqdy}`z2H%^^k2~IW_m{`J;|M<}Ao!54H0X@dYPgn625fcBlA^*u3-&1z`p)|jS+~jvW zY4v<~>mMup?&WVodiiXbMOYv}JAxeHc(Z^1?)v@xe_v~d`F8BfSfc2MWg(^ZFY#vquK8anTbuML&;`kSzWSf$-q1~4*{)8OEq3V|WP@@fc$SX%S@JvS6 zNBkFHSPi!iJQ+_X^LbcB$9+`mE3@;BcC%#vO2md!U`6h81+R@7L0N?n-TC`H?ODGM zV*YbXLPGqBdaU%(`IeMYBrN!H+$OD?AoiC){iidA5ob!tSh+r#MHwYdHls5tKljI3 z!(Z_8qq?5g$y639X_65htt)=CF2)%CPtxFB4(MG|t6OyHAS19amIQH^u>Ti%?NLiw zM|t56754RtKzm{qO_cZ+*cb#Wui z+U)gwpMm%v2SUI1OkBib$P^!4?wMQ^%^#9{>U3CCW|FS3`d^9s-}r{wo)Xob6;`5J z&kCvRzcBk(hY(0qYT9sfd;E)lyR;rEgx~&m+<(#bfARAF0lx(CqM8zwf{=_`b-(4& zUG8;6uddZ3AY9FnRBwUMYipY*F-YgGH0#mDmhNwtHAz{I+a9-j!^V2`+c%- zi0HiBUcT)#vX3fk4^Y?t7HX6EeBQc8+yxvh7zZDfdq*&6bVs(yC^^CDHN}@X#8y1nm>3o_Yfboh@?G?H?l7AH5j7go8>vHB(ke85}>c zk2Q3ioO8#Wg^~3+s0-rJ19RIK@OEDrBaSBM55o5GyHNABbwL@N%xbyL?B_(WgSfEKoYfjoGQk1v*nRHj1{0Q-_INkYTQ(y1yK-jQY6! zp*hGvQ@cYK6@>$aNyh6-#lm?;)}SaajA4Wh7u*0u#xN4@@DV_ZQBO&f&J5<;da0_P zn}ASW6U`LzVh4t?g4c3jpVFs8h$m0%qJ44$@MQNFn91A#<|F6twA|r%eE`aG?PAcq z=h&&ojV9Sfww+z;oJH)GpBvIimx0!6{$cc*3q|McO9C zOHRYWo7?N+7#K_Cjo8z8KR2k2H>hEZSRA*j0|W)?;$mX!H{;9}R=8!K;Dz9j&G1Zt zpPL6>-1X*yz!>!_A;hJlJKncWgZivc)WCs_Q9_OY!ZCApV~TF8hloND#9j${F(kBb z85aZ8cns8tbh(obT)l%H0C3NPgly+u7QKs*gJ=&Ncf7e^^5pU}ye&vTMqK&lHBDTP zfS_*~V}z4)rR4&Qe2Pk2T5|XP#u)L6tRBH_6m3OS@9Q<$&ZzNyIlg!Tu-tIgsZM1r z;``0p$%mX?%Xccc(@T>OIcP2OT5RK`hxaLQ9Oyw#(CAM3hwO5z&-9)gFT6Q|-cX?p zc`;&*7s_tB<9d(7X!QQkTE>X|90iCg#BmMR5f+}P6j?RZo|*7yX(Hp{c;o(!p?ykZ zbUla~dr`YXs{nzIyJHnI$mZ0&zeX88&K9lmpfBf&A?@B<=k%Ls?k=MQwE=xTKmgmBb z(WIiaOw>cMeqA@X{b9MpU{@>8?|Zycu=JIyTh+cVe%3R0bl>xLyEoAL zFk;fv88;n{z)=%Bh9(-ge_+FtkIg>_#O+*ibxxBiZnIi`j;07oQ7@v~_HC0!hRQ`T zAUKmnmAGOtUP_Nd;eBFfsnuT^y5z2DlP0@y6LD%E+O^edM_W|0FW!;Li_kxLf9LH_ zxV?TT9?>-u8Piz|nC;h$+X)=-9?PK;DZ|n>B?Vpra*bq#46(2N=iCQ*8g>{RySHt4 z-*eyJ@FFS~*I57aCNYRbW^T4OI8n~D_=7OoN);rI+os)ClhWSx=4HxfQ2Wa8Z9|oQ zyjn%m{Q+{fI-Z~6-tsSDSj~GZHNAfAVjLxMwI}`#x*#?^0FWaMWzaeE7n9^%$=m8R z0K01-7=h`^qHj13&TW5qwoD$a&=;SS)cM0<>NE52ZGOXyNnyTG{wn-39%Wq@@bPZ> zK}?u+ph6DpyM$8yG-l}U*E#7b1`3dmV1zREKnZs8ve-RuDk=$eos=c z)NKMt9#-xc(H@n=aGsC)0P>#$D6HVwiv$wl6W|+K4SRaH@*Iw2YoBK$`tom`k4W`5 z*g8Y;VH2D`C3H=0P&j(G3XvH_^#`wjsc0bOxNtkkx(eEVon0U0Ks^9yE>rqWiMa&T ze7@$T+z8!t5anY-^T8FdXBog$_ZWL*a4j%_Y9aZ#?Rdz(UU@gur-wj*1pu+syT^9& z?0_I4?Ghr+|F5qZeB6WxKe#uYqz)>)12vEhJ9^&*Ty65g^H|4NYh?o%|8PdTj;{@- zw~CHpMl4|+Y@qJiZV8eV+)c-$p4WaDl?N4e!6wa9$E9Fb!`?b0yA~b+DtlDa9B^fa zOf`GEPM{b{$%flvMhmPj!C={&?Z*+RLLhy7DuWi;R6i_0!F}HY!vXiKs;NHQLmt2n zkH#B=qR7FD#KFV!%O^%3lc2_-L?D>Du;Luf|0+xtM=^`@zYjCt;rGe#s8S}y}7=Uub8BJ69--(RvCNDa^M6WCjs3WGj-3U zhyy*U*zIkf!T?dg%{knUUDsd_fGLhQ2*V7y!+!X2nNK$n3c0RF2+EKUn-S0oB?q5? zadt&8>gO`T`zscDZckd*iIOAaz=kOyB`58W;;G`or6BEdh!jYFFg-{VhufvA2C4N{ z{&81;bskYH&i3kFes<^rRnp?bjXJ0$Y=rzK57qeqH9QW11I^F_X9=)ZjgY&@-MhF^ zWTl>$5(wrrz}8mzGEL(AH=##muk6v=lz__w6@v$F0ok*NN2wiV{T145(1EA%>QOG# z10Z)9#bs;s9q3xt*pGZ~0A~TSF0{rLA~64eVQ(tuBvHwH0DQ+*`44X70<%CHj5iVq zqTy%MA#oWHzd)$5;PE-{A$D7Hx)Wll|nK%t?gQSpM$Vyp5CdEGj zMkTjf9EEI0d@(ZB%4vg_QV8q@#n!F=ARAEyZtyb-+m1%KZVwDQ=T^lj9Z~9Fotx^|%dl4|#ON{=F~kedq(sO1&r3 z0m0c*m{sHPU14+}(zR0TNvQ%K%+x#~@B*@M3t`Z8d%~g)+NzM|gL4L7b{QNC^svt| ze2V9S(cvWXrxEzPq3!UbJ`M3~4nh00=iWzq`4wf z&EBm$<%+0}dY94n)(AIv>$USDTY`@jB%yH`6bPW?u$UD|h!Wdwk&}D_#nFrFz$XdH zx>j7!)dkicZ%c}z1){#}+=f|?lIj0K)b2q5 zCIeG$jN!ix-C;^M*eT5)}ca zGo%CHgyM0#y8f?vvD5JUPTw3YiU;AuuhHeNxe*~>SfY>x^@g#B@xKes@Kzu`oa(NY zLKu!Ku<@&brsSlcFuvUflK(h9*o8`Ghm~PXKGU@uww!^(WTyx6b6(Sg5Zkz1dgTvFa$f=VhU3X zhH%7Qk`wQS|KQfguzw2nkKu)7o?1$>``^M_UMa%Wr*Ps#TROtp=y0bL@RkwT7EN?G z6`1{9=1Bq0xyyaJm$>TGjV$MX53PS6Gzm)}(GX<;nMbzt%_2 z7LQYl&waVLz)6q6lc2B_%6jXI<1-4n>w%s2gU`DqR=R?uq^;aY&^Qv}MNCbsRBvu= z)znC&g{_uFq0Oeg*+dd_<1xVW z3%+CuG!q3*RJgHE0&PWyx2&S@uRcaXidhVRQLO!e!pI^=VVKpIK~>DPm+;i{x%7_- zS(1`u7m>JcShp{8SiB_@i_i88PTUu`vX6KZg&};5mm?=)ph4>ox($BjfHo&_ zGzSa7pNC-+Bu~Dcpx(5_ft~x$!WDM=4)5B@lxX4ME3rUW6&1 z;DAd=eK?RwuR!}6>Y+IWkrV-+cytSDDg?$UwJZZiC8J#oS(iF?yrdlb{kpSX!aaDy7MLP+Jz=GNED{o&p|Vj$o&#FWH57W8XU)|9*%U@;3o zNw}|UyoVGMFAd-^YpK#>gaGHp@g?zB zDR@eG#QRMP0HU^KdfDd~39%o@C?uYfm84~9|5R4@&VK+aFC+dZ~^V2(R7}s7~4MLnwL&hM&;K`ogC0dOQftD6w+LmOT z8lgyk<~iY+f8hquJdCawMku;|2_{w6t2~I}q=#kONxCFKyAq7rgE|0X>FS z6jsg~z&oE4&xJJvoHxsPtY9k;cHzeJdjp^ivM|c|u|g{{sTD9s6{(-XF_qi!^neJp zOv<0E0c13?H&s#sLOg;6-h~w&=L63LCt4QB#*AizJ;~tNKpIJifAZlNvi&`{A}_oW zbZu$2WXO*IG`=%pys{V2h==wnGa~tK{}BkB0BpYioyUTrf?t{R<_;nhErSKF*Weh) z;h!(60N^GA&L0^gG<_xY3NYg}I(u`3OyI1!tD!jpkx7pep-s_{$B8BbIE+aA^UD5| zU>BHw!a(rKep1YSas=le1hbDy!rY|>*2Z1`!p)!R!Urae{SwHBzQP5|W{`ez{!`Zg zD8Zo;$y8{ZKNKD(Jv5b9!ZpYN76Qm-kNG3)LU6l9jqsBnEWeHkdyg>b5HLsZ9+t3+ zufD?56Q)OfZ~Vy`K*pxIpAiut+Dph@1stzxV!i=TzML}W;nP*naJxi@h|r&W*nn)G z9PdO$jDW81lKYR_3Ii3cH}MBk0vg=_rSoDyd7R2W0)f|o?K@CdNuZhFSK%*xdd6pd7sq(QCJv6djb|n)>(d&g?+*a3Rc1Q;lDNQAAI-<(c9E+ zt4t_i=z2}9^y-u`LWM-_;|BnZq=3=^JD@!N)jtA3EP(AV&<#Dp2mwWn-Ac3&iXLFc zqLX;k5)x@kn|JcJD;@4Lx?D5Fz& zxwvFBRGV}zVwrIOt7 zfxy@#M0Z&A!2aMVoC^4ggafD1Wpbds)R=7J4+3tH4K-$+%m(#3p$w30i^P**4H48G zI8b-t6(U#??t^EL&Hb$uQ3y?FVB90O@0{4Tq_7s#As@C@M1MJ(%E%+?pB?n4lStBc z=qZ6PPbndo4%H2amHtZlD*7SfapUNglSqn?&T!n0{|A^czGRZ*nrWPD~bf<i?HYeH5hprb@T+cSeKK8uO<@6%ToNMq z_@W0$0(fyM7Z6eiQOdkS6zF=#i$LJ*aRBp5@AqabXjp{(A9Xxem41Z?5#jMF3rZWA zA9WDW3n~yCo+dYpD*?(05Jb z6&f)>9Uv0MvR=QCb`>Hs{H`<9L>G8E!Kksodp<`38Q{TDdUH5id!-i0p4|F<7Gu7C!;TB^$TU*Nzx z|GUv$F8RNTq6uDw)2}&wB3>GUWZ7_k7=CoJrI6AycqbrfhM$A=%&vpLL41&6u6fTk zyLz&xld(0ehT*p}K+lyXc=SN3C7(CJGVlY#w!1bq5=Ef{!hV9@f3Eqsq*APSQB~P= zo+(`@%UawP-+`B3P3jL@fjuLE7zguJ&A!{nBYDrK$v*RAx5tab4t{pD&^=|P_AQZ? z0Zx)Yk>kHMJ@(ldJF6TnlKeG6DaBBlT~z+s`N68aI;NC+a=w>niH3zb?0Ru1BF^`0 zk=F`cv??}+<3z1_!kdcblhikldtvoZRGo3|1>ybm{@%>Pv_vx0-@L)17|%sxX2I!L*1KtOj^zrLW(fVW?s+$^eGWmT$@ zlECz2Tp6W?ji5}D^+FWT3H+<8x6~t7w#FmXUtPP)Nc!5yRMH8Y>UbGEOOy?5%vZc( zkD^-Q^qGN{;82;jh$}#9&=c9_xSuX|&VHg^fbCpKa)_*&YeT?EgccLDm^=B8m)|`S z^CYTA>u;nO7+pbw`VPQ{bQbruaHL_BY&LDK>I3#@h`1bf@!{8>rn2QJ2y~Ill7ocV zmTTUJx9``M97fd28pSn@hvyA?N+bzQuRCL^8}l|3xd8DH)-@lTcw2Sm4w@)F7|zKqdfmb8dO>>yVvN50M7xvd9#BsD@R*R0Rz+fB>W=IV|Ed`MZeaQw zNlLLTmmU18gEpjpnypUfu7-^qYWZevUAB#{paNTWmUtq|@A~0uEuDeEVEg0(@a=GU z=#7qSAI^t7+n?Xk|276{MI&}uk(q{aSqw!78{O6h415$Mb*JmPwVyG>4tKGeRws4<~TntIzCW3 zsT0yzrE5~c^F-;o#u?R!19_(rL{pOXeUp#YnO0SJcskvQqAHhVauyyvU&=~181`RqS8}SxPJagx~Cuwt~ zV_iCy!gn{D5rMvld*DOZ63;pktv}uFlN=jdc?2-vi8mCa9dpbyG~1h$p+D(ZT5}!Y zq$+M~!9{Y~S^g(V9ucVmvHYC7DlpdVQWA)KhkCk-j}krCi`-92K?{s8k0KntpOm!V zqT^XBzP(AO4B5=pX1zwQz$Qc~-fjiwsud5Oqs=uEb=7TV0)JihRquS%MSshIfT2u! zbtG{i>t;_VY%d6Pr&kz%Bq*YeYy?Z1{d#NBip`w9SP;Q&A-&3x6aC4o3X@M{8uPV3 z(E&@ib*O~4+N_AZjqZDFr)EdSwVMReM{O-zea%k}7mY_b`nwBZkhz17I_73CRS&{4HwhjX`%VStyj|xs8pFU`&4Vt8 z25yQzrE&w=M+m!Hn3^3|a&%w(1fmJ^H3I=DfZ)rt7#)I)d6Ao4)A4WcQ48S4r}zL= z5t^HMN-?aO;}!(E_%C|}xY3)RSfx|ZNiO7{pG#jpflO+I^$!a|Z}PQiCHqs-@)ls9 z-9rffg&rPvv=o$-9zHtnfw&YZyrd52l)`hKMxJ^QeMgXRDz^he;7wAPYm*HDPE@5} zU>1+k>raRXiUh(?&>d&97cKcpzj<4Q6Bzj#iZ|tHx$emlc?G=hi?eiozQG9Ojcl(( zuD?;cI&Dn5CoT)41_2xhJci*%8k^{FCMiNV+ix#IXMxz z`G{fQ5%pva!A5;&tFuEO`OL2JxGz3|6}XdI&)#lK9^6g*D?aeXatuS)-iWvdp}Z7K zFIsAT_liO=+#pDNBXUm!bkVNY!`{ME{d^gCEx+Rl!5e@2+-~`!{I+ zxWaCDHpH_$&C!C)0bVzQfhsylln{?_g+B*ih@MtUSOqojbsK`2~MDrF71~WhV2Z&cFMvTN_bw- zCnNmEfh5r6GgSd~P>F^i_#6r}1|(L3XYDJ<^S_AuU&N81{+Hwa|01d~So@j0(XKc@ z$%z>I0g9EOAmz`+o5KP#JKM86T~Bou*>VSbPkt6|y;)i~BAospB{e6)v1FglDg75a zYq2}yGoJ>j?Pd#CUWBK0hgG=_K0j+4X;I{m&k+R)rDsS+e{=u#sOxrlZf)^etwqyG0Jl$?oXXBwZ>UCszwvvsHyZhM&S*ge^`P%Y3)>$~u2Dx>;*qDffafpmLM$63eT z&KZ+BFo6o7?x@<7`;}eSo8)xYlapeY#uZYu&1V#LnxwXxURrK%uN3D)hM`4b?#rIL zi6$!vF*K28IpL1gT0M>Y6r;yZH6uE&#&1{WnPE0pTz7Ie*6Lkn$XRl`Z6h7m$CJ8y zx0lbZuwoiBcBy#R*(&6=90=NlgTT+|-G&Ao~(ySma^57|>z_JHpc0FC>Hmu#o zm)m7E<0;_u+HwA=?o{Vkh`LU@S!S(>d3xcp)e*C-tDTj^PK)C1!mVkG*~2{@Ch5~a zt(chf(TkY<`#?u8@@?tvEa}b|NNt4ADwt#lFD7sv5?)Sd$)$20hhxY3!a?TQIcKA2 z6Px3)^aRlo?T0^PQj2En32QC`^s$yut~~iN6pcM}8L=5RWD%S3?jTyo|J*_a`xgE( z&9|_k#RT)s)p-I<3Dwyy%WpyQG0IO5@>#8P+r>Jr_IpRCHQ@|euB+Q5#kzcE{V7!6 zIM%=MRCn`Dl1BL6eFs;aq_*e%^68buKgv{|149LM5Cq~S&2+l$V%SuE5aE1fZr*}O z2ze8v^=3N_9L~tkh0SrKU#oT7S#K-0HI{1GPW$kc(md9A`9V3WYk`(YYIZ2`SGm;o z4{hhAmg=b4hXd^fu{OP{-_LG-`v#?qVw|InnEeufo-9{X@kaOTCVD)(O=q%h(Q>26Yb|r#^be_L;>eENg!)TJPy*DPC_$ z?|QG1+4Z2DdGy7(k(cHI+!AOnwFW?Sra)!_1AJ8QOr%6*4LwgLxk0(fUHUGkU$x=2 ztp*}0zJ-q;Vh?Ge9BV(ZN1h(-?^=@DNc6}!GPCn&#;F1)MSA4RJ#SVQd;K7ia0j~k zAW<8c`Lji0CkEk`gYz$-ECz=;knyACiv4Cw0TaS6t!S*q!s) z3THAxZfNpsY>rCBYSbjfEEpAGN|M7dvBw%>0PBt-Hk;oIf(m}7U8u?LSxPhA-dvOL zk)iW4=}IR z%~F-~^Ul+$R10SlebTSL^heWWEl;4?!}n7l9JinEf@~0J{#1s7zQT~zbwUq8mf7ao za{bO`eK=ojb%^?>LnS=^aQE8-L=}m)@Pw58s7@qAPe=sSN$qTQ$?OaZxQ3f$RC2XL z5o~O-uJ$)#-!-(%f?PE0CCYQxNi^mm&70J`ub@Ap?I~snJ zkh>Z!L*?C@;J78+9|w{PW-2kAd72lXp)%JM62$#3R8l!c{G6Rm6{IvF&^dDnKxq`r z`6@S+(`nwGqO`rScELd`01) z;guXqDIBBmNSN$^3a_{Awo&g53w;~AV!!x9Hq(cm`w{76tCmG=WOOmhF35qRfNRk` z?1(GeqKdj;nelQ_vf*{>D-+wqPPdE*2X(z~aAvex5`gk&7o8XM_|k_Mj~EB3+2bYh z)0qWP43}=0+2+O&jO(+V*1zr(ScEqTX-bk~Z?IcnvS5$RF-qFS6z)MCH_CKA4~J@5 zEhVgUm+O7wJ^Isqoe4i5)sNtbLKA7VjDr(lf1&NLupQB1uM9n{Un8~F z%93mGTA;e3%l6LGbo{Clt9Kq1>%Cyf^dgov`#o~>M|n8T?Twse2UvmVTKoL@!SeiS z{`?w=&AHOLJ8&1b?ava#L7Ha^>=|B%KPBpo~j)qY*x2z=eb{| zR$z0jjeBXMC{JNE@UIlW9%`dfE-c_gyIps-iUjx`^oF*sE#JHoz=gS?<8Z$>Ea%W8 z7gU=q&L$!&+g?fMy*k*gg{3y#-f7Um+nbzWw+1?$;J|PbbNmk7u7mSr*TGSsK$pmS z&{5st;21w17b;DVaJ|lOH^9yg#%K0%gy}NVg=b9-fSdG-M#e|+(ankLg5$R-ybEHo z-b)LfrHd1~`zt%$nX1HyM^^`T$`J7H!PV2dAxufoG^VAdG8C%#k#R1&`gYDOvrmt? zsEnHlCKTN%x?9G@LYi$?-e0t&WN`5OTb~rq{^S+)+^F=BcQ#TQc^6F$&cvH{0;!e< zj8j^|a=N3P5A9n@7*1GE5OAg_;c*)}fZ7AcpbQk}`J2`9lFgmx#!YKW(|B^RUETDU z3#HQ+!rJ$kE1nQB={f!KlaFdUCu%;%GtpOn#5vvNUb>=+HHyVr`hNqC;utA3OBA1{}?SSgB`p|0B^x>P|s{PJ** zw7lD^{s;O7VV@y?p?L*|A&685hHse5{xIUT>^L{hSPU0BJh;4mLUdg6D5MHZ073TV7 zo7}}7ydDPegbfyPqR9egI@@_0jX4j`t3`IyYfp?Rz<0jlFRwv|veTD#z0U-+dtW^1 zbH36F!uj#|sUFAKTGQ~$LhKS*!S%qbF^Fjl&)YM4Fv?V3S6}SpT55qAvy=YJ_xAa6 z*4Xxp0L54{^2D))04!?Prtq04Se_vttqCo^KWkqumT}H{m58@lI=9NsD71ITE(v!c0xWYT8^O}>-qB5jk z2_WM6fv%LtYW!Gb3^(?UC=bbWVx;*5gTN)yec2}iQEn$&z3;dkCrND$pZpMR#=5Fv zQgguYmRJ{0#V6b0hJ)Q5^5Z4stPdHrA6~7`Z#JskfH-KPXT3G|vvAw3Ch%%1nH`1# z>d<_}iCYkMEKK&srtjT>${8Zb=_r(skWLGiMe)|McZgwB-lUHm&nV zM33tPp?*xLL}3$}_-&AgNRYIG{6lsny0|bWXQs!t)69A=Rc^`Cjh>aF(H^kX`Sq^b zzg#>hw9-Z;my(sw?fqi*>}-e^=i9w#-(&dwNq0nV@PuJY<&VrgE?U&;Svc5hJ8AQ) z_N<+NV|%sca>4=D>mQdc>S|6;TsfIKba$5L=O+r4qVAc5A|zUjQsT(hwrww4sb89E zAY*uBe!OP_LZ&kdU1OYZISbwx{iL74;Obw#2ypQuIUNWIbS&YmrBKGe8`?J41z31U zQc*`v_`fC-N_2j)-_Z0%`E14O*+j+g0WRg3De|)&+Jj=fYlM3DpaqH5K96YIO_dn% zb#pomD}@#w>{KzZ3UCsvkQA(O{NzY8Hk@K1&uu^lP=5R@Xca2m4Y-< z*e-T+h`alA`Ozlpo?94V@UEzIgQ(QM%bE8&Vm`Ok>bZ~cbIT2_T4n6p*>^sjryM$e zhHZDxx{TJ`ZFXIOWA*y!&y#6IQZ5i1j&vjtf^y;QuHvA%dpfBz?)&&H>A4gwN92cV zI$LJngvAKIrA0qNoVar8-JD)1ogqWiP%1~i91TGtp1)}IjzRU%Cc!4>acz6!IRTOJ z+dQPkYr|o77EXglx{rAu9T4fUzq_N-lE-G7+*CMH7d`pQ1p?=-7tAL*Po%sbs1utN z-#OJf>cWZ`@12>?53;xlomT-{Wd-OQupxM!)F% zH~KC9tNnuM*fB=1?Jfa)0=<_@c-OD2qVoyAZ7IDX|{dTJY zRi9}&sGFa#O+FdsI@m3}pSx3}vs}S=Gc9bzckAKz!v?E(Z;{d_g@bWv9}`$zBnqhB zoN>hKj~I(jY`D0Jg0*!npx>As)tv>C_Ezge>$Ojp(>JCvm78ri+@W6Wn`M>42hFyV zUO{($hX1@KS5h-rS^l8vNU6N%)dJdI1y!b&Lwh1S>54B_9g38<7E3v#{!!K%J`|-r zl@RZN=SL>+Ob=xAK14`&o;KOX6FV@|t~#tJCf6A+JtNL%>gCr9-@KeiNsrNX7!Jw% zHO1vIvK(6U@a&?Tm>i7ujpJ)6SRi*-vr%LK`ydb#UTrqx&hv7*-OZcydbCA1-M=~Haf%umPDy|?$3R?T~6+Qe5ylb-@NmDQ-^7lU;-mD3g zGH{$NwCj0e<1FE>)>iT96$;en3BAYePoX3XawX>kEt0zOA`M(^`-tQRKPyB#oWGA0 znCSQOTZN@}?jKXIQ+T$B)O`4||IYdBxJE0vCh_qG zZdDrJFX(=TDx6G4Fs~1qk#I1;mYqU{ho}r+0!|xwDmKfJ4~TG z+@(D-RY9P+G+FWPygzS{*c(9=Q@GxhKb$mBkZ}+V zG2&$jyx%z5Q;5`Md1~6E2Y@S3Q@lM{yl&erqF-SeX|}V5xMKQZNA4fQ*v32GII^`; zywk1Bbj!L{SkfiRta6&1U-SIdM?dG9@&za(V~+JZL}=NP^4F}y!aE~|BNE=G2Ip8= zh#@u$%Aplxdw{ z^HOX7Hm(q6Mmocal&8OdKoE|J_&Bj>?!8ny)wW)T^5{(pmJTS)}7hW znfFz{*RMRPgCsl2;|89-|M;CrN3T)wQNV@LYLR9fOQ3J^oz7r`oWa;p-VsPKnlCTs z7I&v-`}uUT&Cu;XAS5jzbGE4GYYjM)hIR z9!)=Se)l^~f6n^#1-A;|-Gj&kkc?R@UvBicli2%Zjf=q@x3kM%o3)x4X4*vScd6gJ z?K!nI>srkEb8|wy_yn(^eS184e?LdPCuD@rcLGu}+X_a6C1XOl?u)eFqn~X#8FRY4 zMO`<#o%&Hb{5%>~WtjJ@hxNfX+IGY=>mSrz{mOW`!;p$#BWKubaR*$D>SoF6U(e%M zKqMw^Cz)S4EFQ%mbZfdX9bSbnt(RCiI<8Q>m>|Gx6c*Vz@x5py*{ITI=8o|Cj0zry{TiV1Nm~MNaQD5=Q#@ouXEuM;Rf=qMfOxcuFwTb z~db%ji%ZB>e59YpJ9pbk+R-d7>!6(Y|!uL{s16huv#K_CL zPWkP=rGcCZ1xXk4O>A`D^g%vzK1Zy~7|WCvd)7k7c5A6^qcX;lkWNWfbCm{Ubm@cI z*#uLDD5i8Z-E~>1^?((*>%|&onH`2(o<)n_eCRU|3+g|k6iM^BOi?Q{t=e`o@l4N( zn&le-Z7-RoLT*G@|4JSak(_Khn9z`9>CWS#OO$dBQQ+0~q>An(zR{(TrwSIr8y-D1 zR@gdKZT)(!K>v&8G~02%_>ru2UK;O_UsYNkb0YloOXQ=YMogB@?R6AslPIf)a%hd~ z?g8QlrMAYE8C5@m?rzJa(cHee5*OVve#O!bslTcb_-TCTx zb(Yix2MD+G6j{#yI4-&c@@ROj1l(>qZBAu2(d&)8d6WqhBz%rKkovV6rsr=}6;nE6 zxJ05bv3;60AKp4gU)l~ST0DN#={S^Hs~x@ccXq0JEGr=!?R&(c;PhEWzUe^q=EoHD z8A!Zj=Q8oZ#3$yG|33atdVON1iwc&3_D!OOKl+qeZd0D(`9UDEOKr5*uj`c@iw?CB zVu@(c0KY?a;i}WY(~ZwQzc7e#_)hND*7)4tTfdR1kU2Q*s%7IBE{qQ4wf8S?kuyK^ zXH~2ebdOsHu776{XMW|AUJ0sKvUpTtmM4B|qe{JKpzMX8(-3<%L)?8$vqQ&dcpW~w zz%f8gmbjh2tkmA5?-ZnbN2I{gXEu;Yf*TP2n6<2t(kvykvZDI+yg#|-s!lp&t7Q{8 z?FSO2%APk`O?($x;5CQdN4jbm(~Kx;drn z>!OFWr~q628&21hy@`=fxBWIggvW2;5JEv*ftH4XB)x zO1Vbnao=MPA?v?Xb2+V)j(h02H5>)_VOR_GkAEC5W!?S|+uf%43%tR=@zpZeE5HDYbfs@ryo6-bbyTSolH8K#f(&P!fojw=g|&XT7nzfz2!&SYcpn zH)nZ$4=I%)L2B!vlwrf8d1)GnW^xWQxiceL5{B7X3N#uvA6iJmZodBIK<#iVIgU@@ zNNv<4aSfOz!;JTh_=>TeSusl>5;@3{9n$2dF9_@iiEDAlGxshjckJEhxgmeK&Ma>@ zl-)@9NRPeHJ_6;F{0cIKqf78A!8PUl?339YKR{&)1RX#2S`MY0Tdg%55GQhRCzmt+ zmdo03UKynlrb6KgOEBdT63WwJmM1JPcm8NH{+L0sfW_w z8M(+5Tg@2bX=*Bhv{qW%$moU4zIY`T(Fw4V$wfIwQihD_g~N}}S}F}(d24<){O1YQ z>?IJP#MsU2x|&DI-wK503Z%ZYFitukcrdpGqH z@86*_V0d;*;errmb>7c4%81MmD+@TZGBQ7^_ax3?}rJN-RQ!}5He+a9Lk5$9TP1zT$L0?Mv;&ROg%Tl5U7?W|-!yqgqlyAUZRzS3nK z*I|&|!lWpN4U|jI(J;$$Z#Jg5)kFF$Ei5S{RN3NV*}?XDm*q^w`zns{xRKAr;_ud- zLG`HSWot+q>XvD;$aEB3uz7lpgVJ+Stjy-BQ<49Tbl=Vvgobc-XucV-@>fN=jm9m9 z_>^v{tvg#gQvHwVGvM4Zw>qnd(&SkETk!Stujy*C5M&k;TGU%d$RpmbJ}Y~yx?$5O z&%IWZb@6ONSfW7paJ|^t5K=30c{lSSf)+AtC^dp3-h~*Ml(f(97-Ko`M&D&znFM=`pD{TD;!=`)9=t&|{*e+{P`)wktfF!m0VsE*JY>OMITlGiP!b3Da% zS2BqD>s9+=>Det7up!x@r-!4UICw0ixNGPx4-TPsP__BN!;)I7Wq31DzX!jS?k7lb zy&Pex9;mM{Zf3_we~BHGW_jd4UHY3f_fp;UyR9HL$Bl*DC>~)L1ArBKx8N0vdT`e6 z54>DvW%5@JIg~G6RqW-Yg1VA~)QA-&drSbn-M3 ze?>`{i5&7rtJ8XQ0mK2h-rt)w32}fF!tkcErcm(Dy?%y<{VRU&zxLr#gLCZjpGGUA z>qjr|59OX>%_`}j>dBvbF7oY7dGlM!u1F}v{+az9HeboBycOKwrsLo_-!v;i`$n?8 zdo*HTiBo2KWg8t`TW?2xTHi}17pN-FwkXSJm*w+2Tl9+8d^T zPP}K5fF9@J&t5jhi`ku=SDHSz6lQg?&|0)D%8a?xZ@d`+H-#B{52VmdOG#fBgbwAl zX?=g0jAK`k?O4l0caKdVWUc7UQVDIVAN7V7ASE~LX7BJH$!^p7@ZXbTvyo*~3<{J> z%wIA25y}X!v7ncIWSlNkpi*d>pFDNJZW%i5jdvC2q9Swpi z|C@TMVawH`t%hsfm=;9Vk+0M7uA;uR`5&jr&tJtxjkb8K!sS zN3RMfZ^mqR%o=}6hBF?c{8)g8DN5W3+rJScSJo4NNB-)a;?gAcvY71_}ad-KmN22iCc;bVSnS zDqkFljRMuQK#qTbTZO1e3`>TK?}Rm!jrAZb3BG)=_i#0-)7O)bwfzI#-q?v?r&^m2 z!@+LcEgsoLV|#P!?c6bn@9qRou0_DUO!XLchr%A&Vp7pao$|1P95W}c&bQ;WVADVH zd=X2#EwpW$H=+GJ>3}8hVV080!G$HZa{@Hf;!U%O8<%RN37agW@RHGN!O>ubjs9_Y z4AK3zat@=33_kP*b=x0PYrKr-#%kmd0!+foc~dVv4^y0LeAfQR`-!d?BtWX3+`8DsqM<RnNF8(O$|_T`m=aV+*rIS0hJw3L z$b+0GKoG;~cp(K^F$b&K8mX?#hZ$Sh(u!(fndd+S_etI|*tRQtFVRG&);(WGR^I3v zXe`12KbDyX>>MR%4#}NT_(kM{h@(cQ5|okNA8XXzW$j1Ws|G|wLN~O)?Tm!^B7HAN zuOIe2zs9AvJQYF5xuh-3nU{J}Ze?omcFr3ds9|r>w-=J#UM!TO&9z@F^rR=nUJhQe zJZSWPM)uHvg(CAZLFb7hC0@-cysX#Csi(A?9Gd4i_E0H^=~$iQ%}lIARTH@agAr4q zhQiROi@*INokG`=K2||N?m{wwV!8+-m_U_UT=+|C5NeD96-jQopF#EItG&&a9J5&6kCdah|)dzN;VT#B=Fp=fjkDHK)~NS7BH4IkItX z##{rRWDY_$oUM<|xt^dlLo%o(YIH)g@wNSkVR*5r%6LohNl}$8D&v;ZuQ{s^CF0}K zH^l@A4rF%!vmkFd@tM7blyiYxl0pKIJZP$1b#+ zDo7vVwFw*5`uiKIvI6~j&1fmV%2}$vZVn#uu4v0~*eZ+_re!78uH|F2u_oc(Um?1} zR=;fe2tHT3gMVp4T00L>;B*|;vcz++63I+h>tqi}?&vX`d-Z&Bsbl>n>vz&}F|0%DcAhyl}H@hd27+vOj0jJa8 z3p0E+X2M9%(24TOCWiH7JDt!3V#d!p7?; z_3_H$hFsy=RUz9T5L@f?h3{MR4CWjSc0o8dwj@$mlJI759!E*4`#;L9MINm5L+Eys z>bIV`;V5vl{pcOMbxDi`YQgElHHor6!>)nm*YY?NaCDf!egsB4VTFR=B#ys%c2M_G zQ?*Uf;BFc&|5cez2N&=WmM=3528Dv-6 zZ84+ohfEE#%G0M5^fd8Omk0ek#nC@_tq}ExISgM}p|zc=V@Vd3r4QgM?KPvvU)o=H z4YhQlI8WXyiy)G|nluw9qNV8~n2Qmnrm|A{7bcgCXHx9nWYAH$+X~gbOSwfHZYWtB z5PHYGyy4G#(bm&GXVrY-aD3Ri?Sz&W@0Fx9(`7vhCNCOszTN5>c&5Rr&F}{Kv}XqH z-)E#mYR(ZLrulJT*Zd|hGX{&-r@6OTc4^NUnx&MaPR#9g8C;2%oq6#9mX8ksNt7!W zX{j_>(;TlB)Q)4hs}2(omE`U`RtmM6b_l4NCN%8Z-MYbT-6Za51)P~SZ6+N&OpDJ* zp61(ZYkv^1pEH*2LxE>?tY&1eLQgnf2^{7H3^TC+58{=3oVC7Co^24wk~&HG@%hq_T=)_2-N?!j^-5PHjaTG%wdjTgDTa6h&tet0x(I%9k zDLBuPX;-Nnh9Pu3RLlVhj#nbdsTs0&h?e)%$|!QtnW=e24|xK(oosB?&j0B!BxM&} zl3gw>yz}{zC~8OqO%9RAsv!_>x)_G5yOXr&tb$ESthCknigx6OCSTp9`z7J-5M*qI zI;rvKA2?`DjYG=6)yq{DQt@Gyq906cWT-+K6~k??LbNZ?IMhAkJXtc!+k7xtL7gFTh%Hf0$#(xlb$UfmFZzq3i{JbCnWvPx-P~onRAeRQ znKdP!D#WWAT_39(!CvDbe&fjWFCd za=EIsBlTmzrD=6%o(s=_klJ_<5F^IWIg8`F^@N6bmbEfW|3UO}*=R78PFzHU@49q< zVzXznooj%P2{Gt$XqQEhiiT(sxq?=R^GfETahr8J7YuU%!8pfA`(#=u9 zRhaL`nx7svL$qghvYdTj6<%8sFdss;GsmpLIiv47xwfzv5I1}q-e(6^d)SEc_c`Jk z<+P)L<)M4jg9di3Ldpi>!H%LHCO~xqnadBUomnl@<2V{L=M;Dyusep*Fb9$kqw;JM zGd{7%HYEQ!smEoWLMM@63NrLEhE}Ju1TWcN4XS1(DAryDUC4}41u5VWq#Wfy+{COb4Z9TN52%CM zQLwwK>V+FVxxyFX5M{AiooeN97?pZ`CCACl`_a|p)N{@VK9s4Go7A@lC%mG^#`3`> zYOhiQkS~w1{(Xw;xVM-L>Fx^_Tdqzn6di#vw_frVx0{#z3ZvlM==(5#4aXziTHP=a zaGH~$>FDQ>L>=S1+Et|Z8JUeB4L4M%+f@X|2)*XH>nJh-)OYh`k^D!i1xHEdZNh4T zZr0|T`&fjLg>b%gj_xw=I;f8UEibAa2+*6{Vj+a_THRw$9V)Y2*4F;G{+}pYf#B1Z z4d}j}?i2q1MA=2|7&D3=W&)$>4GG7eSlXsstQzZWCNZgja@&I*dqmrl`(0g}b(k4> zx$UQ(dqm#D?S3)}zq}{NX)zjN(C}Ev$6rFxAbP!_mIYUkW3!ZtjhX((d8JH ziyo=-4$P)m4uZ1)|E1QSOe>!AnqWxq^Db z^qz|L(Kd;r*Jab`O`&7i zf352Ovmy?i?)vwy6|AsR+;iS)sfX=*x@v9(5x-J2vBzbL zj<_@b`ZgMW7xOt3+WtA4#^z_2aLw;^XYH)S8%XQEd$>K>971=CwY=XWPe1jut z|Ce0Q1IPu(Yk^^OUL<2+XxCE8pCn%#rrrROufMYH@5p6pde4upW0})RImY9WLaj$V zXoHXGBguW*!H$`H1Yvu9u@~l(i0p-iDP5&qqsfN02b-e&1EeQ>$2ttP~R&>VMTZ<006Owtpn`q?#LbRC(p{v0GLvNa$}i2QHd3rk@zd z_x^U?E5&&}j~d%#Ci%|zvEw?Nv7s_A^5WLkR+FW?#FZI@;i2YP4e2uyttgdP=$MJ^Zv-pH4`0!Im@`u#?l3n5UJA zM;dWS`^zdFEfz+OL%JuKK1lheXE@9v#GIwWTe4<%i;6M$Mpf-4EW#w$OyM%vbYv_h z?gvB(_R9t@ebRy40cpf&!>slVcUFeS9{o7v97(so?3`$XeSlcw6p4znYxnZclf&(7 z(XZ~zIzRjRotdpTmHB1l0$&`N`LH2n$I!SyOS2Y!C@EJ--SsN_cqk@oiB^6wFQ?_z z)^PL+kN!Cle_7-9^AX~Z`sk#VoGs3+WMuoxdMl|`L=uf|RwvhHPBFir9Ai=zjZT(* zsdPu%9^P5OB7PdWhI`xnOKU>csRC#oIHBzw<5!?r(__0WcE18Bu_;8Hx;`k&Q_t>g z7?Wq2FLS6Q8op6I8T8ubL|$=Jl64AeuR^=r}*#{44pYg4$xa=oJJ9Nexp zGw82bAN3y~MN_z=kV*YkJbO(3ss1g{*iJL*v(wOfKK&x#fxJ9o=@lob#}D09&3Z0i z;kM?|aBUq^K^(hkzJO$cL@>wd%}^+sx8)JXT1L9HY~Fz?KxJb|=Lqr`=#89{2xD9w z5eh@XBcIv3sXhMkvcFGE>@2-t9Ee^V7qweG9sqUmlUTI{1kerAXs2CUcHSsxk4vQ+ z6KSfaVLKUI2D1RN$(y~Zvv96NxTa-Lk2UR9C0D;gSyY$b(EVF-v8FSK1jqE8PH9_J z!{wT&OvFE24_qVkO?VQVA7py@fs)_E+EjbizAtALC_^rsq9d}cIibz$%@s9m_|YgD zjLh6-KC=jC#W7InEDMC-5UCICgwOvZ$Vx?Y%dC~F=vmNe&#g}_Xu^|8NdABt64Uov zn2TW2h!8X_;(p>zTG=!@K_A`D{7kl$41Zm5#v8nx5yh9Ji*9=^=) zJD*r(=YQU{lxX67vY?+$;M7sju9-zAUgy3zZjO}_n+i&LPR{Qw&xzY=Ws_K2=(P6O z?i`NC>O9?oA>m?&G`z^9QeR_(1ZUWG6=bzGhwkXY*Kxl|rMiKLUgR-13NNh(L*t#Z zhYS=}v#|bFjX9>2MLN%}`CPZ~`~BXr%={uqQ5fm6{U}|pY~%>C zm-Ja861IE!biv9iiC&0jWj7I?B74CJbD>^H}z zKJJ&`T*uztNRO;=Fq1&_W*~9Z1whP*n3Tr1q__MiYGI<&zhQ`K&bDpzR0SGzaHcW4 z^6$(6jn0b#iC~O~nBg8s@ypfk{z@nLz6KC6qv7W+v`9Q{rm|xm`L04HJq6zzh2Bju zDoC;S*>Oi%ZJ$VIJPrlo-V<&uNlARy(V5QS(8|6RaWYAy~}MMfD#7ApH9LyYW|idgs{oWQCO} zCu`%khmv1P2BaOKuVLw#S!z7gk0RI!5~)@G!!@ z;h+t&IuqUBKk2;kFN&JcLgImty|uAO#_W&7AIG7b{E=4Pl42KLzw&%szsgK zj*!eBK6Bxgi%~K0$Z9#F!9@TPbw{;B+hu&9R~`9xe5+>mlf0Xx=L?gDM=1}5&!fP8zrAE7*G$= zhO$RsKvTK91C2;8(x}gd@zDvu6-Q+zLSpLhRR)?UD6b)b?}j* zC9Po1+$>X73geOPb8rh1DzOAf+%f%W=v2NfaQRyb)|4o4X7|HGwPga}_w^ zt(>EcXq7AupcOei0_lg4*?c&#Fi`Le{);x0{81Of3NVsSQY|A6bvhU58&(>%d{gJZ1e(4;PN}>haf%!dhWX z^^+H|sZiN%iQQ(_>v1x!X~6kYPyE{LtDfvM*@p|+_qh+ymhE3J^$e%pQ;5AV=XYn& zwTPMS^XQ->bj(d5liPj%Fu7#e@Kf*esY4Y56 zd&d*kE9fkY%7%+$lwOJRj(+mHryh4s)>s2qWMy`!TEVRJjvH!(3Sl? z9R*nwkd+O+dl=xY6ic6#TnI0ak0qY*BS*=?L+;AKl7a~emGM}Y;h$!&Kn}d0_AYCJ z!!hiF{t8BMTjzshqH;y{h?pjb?_2k16@E|o(V*;nF}BeMGAE*L@C&v>4w74rl!O@_ z_GZke7AWX~mw>J9M;Q$5uif^#vJL2~@tbBdZ;rm>;cGzT?3sbZt@Sa)@g0pB$d$CA z;3|h9D0rd#szZquxRYjO8_M^J@$qTYt(Nu+#>|>6_p88LdlOctQHFn4r{pQ@8alxL zo=xJzBDGkwn{HK4Y+ZFtnsN^hcf1{`oTrLoMMJlmm$s%0ji8t z%i6pmsKV~U#{i4S0xO8F`p5uV8x4_l#UG%Rt@`CmBRz?uL84_G&@&>rDseoPeYa2I2AM7=kVd9G#zu324Gj+_Y4HG?k%zn_j?ncMTIXb zX3ZYL+t0Ba6w`|`f`hXj--XL#Jh*M`by~Su`VU2rzDU%tdAC`D2iNuzpyI9_o^rV9 zkWgZ?z!_2cmQDN&IL3QngS&8EP`91*WG4E_;r@|$E$vCXhc%E^ zWVINfPS(H*=_THc`^W(|pHQ6#Nj$5u- zuW|Xs-^MvvKMQI$uDW114LVQy^OnF)VBo1KMf{k93Q% z0;R6?pJmg3Y*aW^JqZxKNvrU%4Y7o7v{Q_82L7JD_47fH5^Bh~y6J5uFps_V!W$Ad zQj!qZW-ld0{>jX0sYb*SlvlkO(iPppcLhdV;F!cM+h$a5UrZaJT=ScrE1Db48z&DI}0y`M(}a3u02WTy65@kQQ+H5SZ;- zHS~q!SVd+9PB3!|R zwRvOzpYME;5T-x*M=gUOlS+Gcndh{oTl?-cg#ObT=_fMPg>4SoYGI#tpBX(fKPQQ$ zZVMmu1hcIQROwOkiAQFz(mbokEl!45q@vh-zn2?#4x$NmTvab`+W}rkp`$@K^+(kAvknwJ>p%is=YJS^{{@5m z7Yy=Wq6(OG|3%UKm*MmO!jDk&Sw<aUN@NV(L zHvkdWm<`y5HdsM`CH?Sq1L1F;;5gia_)!#g5x}M++~o+QtN#;ZPN1*Oym|m2T%bTH z0I2+6ITwt2MpgopR?>dLCM;`r}?ASatjg?e8V_ZpO zd~9tH_ja;a5Xrc4bU|4Zk_Wg}_iF5Xz}`ndLgjvh(TRF&ao%?C>PtLhyzNAM{Ey~I zs$oj%@r*xXHvewni&-iH%W9EuQq63U2_L#*D(240&Li8&hDav7)t9UK-pv6$c5Zvx z%Tls4gf|Db;vswKU@CsPwYvt`kq5H6huRvnXP>Lu%=(wDG;WHoRx5&+ti=CUA_4UF z@gdnufL3IzvE!oDeez)8jB>k4$X-Y>hHfHiAnq@}>H^>#5y=yVgsCFZzJVJz4#l0X z$vQ4kDByf#gsv0VPoL@aGtHm6C{pRlQ?heHa;46i$Dem_-)mi^I~zE_YZ-kHWJLbh zl)R_S1MFwu9pxiFM;+RC*H_K`c&-q-f{#km zW$?icCiga3<9!BM0yT@fA1K++olMPrj=yJhIjo4#PIG!2IjakTPqNBWriuc-b!(CG ziIPtL)0-2NmD0QBlc~(2vW^KwX7E(pu02aN;f=USZU@{tS71)^sqX#1O}VIV@zS5G zui7`9vAyG0oc3A2RRL6;NXsGTZL%g9CaI6`iBU1n@B`U0SaHVQqAj$1>7u?#s9-2fW z3?;896F6`$cvatR$m+(GIbE0C4fJihNWV^i^c88Su9lp>q-LJz5%|n`J;1l^P&tUe z>Puh^3u(wb­AV59)u1xAtaqqbCz@slp|^Sb3k3d7PFZ}thzm1m^19pYBh=%8-3 zm5a0R8Nx*Jo;!O*qnb<0$9SKU{xjg*T~OHfecJ&+D!AUWvS`2l`(eqmhT9aatDL;1 zgfT6Z3A>5ciP6od5_sm13S~>C1<#R(siD#zJB|}bpP4sd2GT-$XP1;wG7-#PxdUtB zZh-0_a-y%_J(Tb|qY!#heG@in4x}~D6UZ@a@?BU17n2etHC{M*^eEs&Oc21{^p%Nc zWajGCs?G@*D~ViOf+{8ky=XeS?3&21y7%%BvMP&(lbf34R$OZogdTMuu|p+&=Dm(j zoCUuZ-FfDF^TTataapP``?U_6dy)tLOx_Je0_xI^=IsH^**B68s~h`DuMyORwEFox zX2pnuW$uh^C_&=Hh01cp6kH)~hQ7RYPu)H7ad-*ijckLHbOQnKc@`uL(>SqZ{pN@0 z%hr2&1g_i{t$cP`LJq{J~gETM<;)<(WNJ#Mv6$prFM zy}SNKa1$_}r)1?nnN94T8HWlRl7incWK5H~>Osc7TN>Ww+chzD;NrM_F;7(F<*dq< z);-Xj98$lK)p~BW9Cft)r;BxfMg%@(zhsmt$Ob`=et3zaKq7&qyDI4ue+!X z^f7%V_O$kMW8);mGu%*hNOp0sNo~D{sjK~rX&S;vsowmkL)Y6*Jb%S#fm{B%)o8bt zd`imMRXBT%J6~%x=TXJPe54v8^~X{g80v{-FD1e}y~jY=y|5$nCdS{lGxNGx`}Mr@ zpOTarC$H;IUjl1wm?}r)Tl=-gM9k!mk3vdl-pUC4k|ewif6;*{eRg2>uL=AVwVX0A zwL(DJj!Ndfexe>!Qf~2T3mnMD=~W$;qfFY6-!JGA>lJPkrT>*Uo;+b4+&-I0LS%d* zk&yKcXlpr5g}VNRe%mAKYl=&0fuI;b2P~4z@gCAw@*=mkz<*P75yxoDa~}R0pNReb zp}~tGov&a8Reg8E-{5eLSOTou)+upl{}5A!)Eq3XE}0Er#uMH#(Z($9Pn@1a$@@#4 z6Z#X%!3w@$@&;4HpOX7_MSAjPH@X`{r83;X5U zd7{`b@K1oQe_K-gWSG|hMgWvm;$TJ&xqNBbB6PVG`WS4t zgf}RaM##`E9*&cYd6oN?trZC}Q0=U~Nes33yenYhyVZp literal 0 HcmV?d00001 diff --git a/fern/assets/images/design-principles-hero.png b/fern/assets/images/design-principles-hero.png new file mode 100644 index 0000000000000000000000000000000000000000..03e6c8d414bf3a7328c57e0f55ac60cc6630abfb GIT binary patch literal 169247 zcmeGEby!sE{s#;rf*@=GrQCFEr9q?{ML|khqy;2~p_?JbQ87S3MM4_s?q)!dP?7E~ z>4u?W-g|Jn&pE#{`271`&vUpgoN;llwZ8Ya0%2E{YRmTTXcK3E{K ztW7UjPW1l4?acrF#_hny4ng&*7L}*%A=Zq``xhrNjiTLeTtpEMp1br;W!5Iew;l2M zU~SZe^f~pRT&iXdj(S#=J8#kDf#Jbmy!bV8mhaq(UC!Fyt`%OdOY_U?rI*er6M*%t z?!dXUxy8cgqFujhmJ-*b=Q}NNwy@PrmC{}D$*dTjCT{WPHF3Axs3=|wT1(gA53CER z>$n+jvNEN=^<}^Oh6y8Yk4W$BrewA+Z<*_^EC0Afo~isj7;@ z3BD8J;GLnt!3W>YfM4k|wEujUKf{i5_T)G&4$gB34&L8u?t)Lu&uj3DIp^o+*|*Pd z2*Cd?fnU!A++VBDp%Tvi`hFHj7YFBtmYkv@_|!6Ywy>~wv377J?y`Y_1Lqy@=(*tF zP+h_No>A2NYYkk#52AhF^}ec#xVeKJujxYvGYei%J4eiQa3np&!B;yAS5qcWJ6n4f zaZf3hlNI9NJLWJS3)9IGSEv-reN}ZPIR|G8CSl%dyw_NyiI|v}B%L2xifi7I|NC_C zKPeV#S64@IK0Xf*4_*%eUI%9@K7KJVF}`cp`L18*0V{Z1yzE^~J$dY1SbtvR*L7}L zxR^Ub99;TWeJ)}j2u1TJ(|F6IN>yH0+=KX)2$uA=MAE*A?pZ@QuIxZH@ zat?Ojp03jWlGoq&{r5lqeWD~EX6ygPil2NwISLppO(e|S?E4-%cy1Qx++BeO4|Uw#jHt$rldd{K;=M#DhsRq9fmJJ$iBa!XmpGT!1F;%+OX(f2I1+-wOGIaI@d(M7WFO&R z`=Z0qej`CQ>}c}C>1*%^f8gRuk>X-I|L;p9IH}0ybev|(1%G!lT&Xs!MEvg}r%LF5 zjQww|`+px#taT8hWvdleOIHWA-C{x&Vi~o9E7{MXX!P9rW*+~>GWE-3vxrbN-eWQi zQ_X$`Zj)B8yyap<)>qA}$ILTV1`Wynn|;5I$Sz`RHUW(Xqm=E&`*rlc4OCX|48mA1 zywQ$gPN-Gc?sNl3JR>GV(b(%)sUnPH1>H~eIe3JETCxd!Dj!|TW%F=fM4jfFfKjk> zF|~6wca{g?A{)IjRI&RojPbNP0wO=>EM#-EXN5MB{Dw$NB0^Nu`A8D#IUO7g@>31( z46X$}9?U|Irc5KDPn56Z{OQ}F;bm%vmg~d{;ltcI`)~Y)NEJY&z?rTITqF^b&Ex-O z^{*od*^gxNxRs}biBL4NDZ;;z&1XR5g|f_|RN1_{G!nnjlqe2BvhRFATNK4$*&-ni{ZmeATpK<@s zxUsml|GzM9;j8Tujt}5V68R$A%VXr;+sgsS1NXjxm5I@g4D;E!{zMno`EDL@E3~-# zoM$=Tr@i0!7LrS3bM$zf??QskqcmTad9McdS6D%9hf0_a$_hBrvUmCE#a&vaY`(I|yy2Obs+dJX$b5H!@*i_T*AGNEgK0d?Uj6B6hFTiTo1E=n`b`X)!ovfO zoztL-A2B*Z4>jwVxn1CM6k70L`l!Hq>xspX6;a~0@iH1Ug2O(3{Z`d2f4cL2q zB#?jzWn8=#d^*DtFcFi;ljwhe6|3m~7g+y4n>D+YL-~vaB4zRJN1qz^v7b2)6_B^!g)|CS0_J+~}|>8|H!u z+8~R}?5N+tMP99=Z~Uy89`S9mgs(bW!Y%Rce2w#v(oWxwG`@GrL|ljO7Gyx-mv8vL zv8ekBQ+t+j>ct_Dj()V6*3tT?OG7jhkG@1DjOtZ-QUeLK6hvLyI&BjOcTRA=Y!fEq z?mrjB{%QuQG%jMdLHsFB!^dv?h9mJQ4lSV$<4_)997;b4IBx~|C0+WAiG(N9Fv}^s zwoU*kHbQ+%hkULl&D_KY*Z&dUa=eg-rBTnuvOh#zwL_0DH1@Rnse;^>vpwgeB4VMA z6k7FvkXNJOQzu1AeA@j?FeIYNPHIPXF2{5YAAiTTL2rva+rR!V8~?6z>}~?FAnYkl zO1=x1*k6rjK?7Qgx8of1ZoK2Xh&|y(++)S?Q1?8>xxSPqCfuP>oxwS!dTHkbH4f#R z@JX4`!Hw9jL)I0?bX{$+?iHg`IZeuRe}agBKYWs;-)g{HAl~VG7xNeypaFexiqEH` z!kiKL^Nc{kWXHT=n0N0W^rC&xrBk*+brGoZBb}4FuQ*-SC}~?WKO#WGo9A1e(`mcm z26rjiH5R>pau%I1Vqbpw{5FY!i0H-Bq>>M`;FGqSE~p0W%+LAw^Ga3r5u@J z!EOc#u_wBP#^GoOrg+yetyuqqV7{VGN? ze%7-uGb!228b`~S(-iitI9oH9g{kGS%iHb_MhjbiYM~CFdY?7Y>4^#9PUi7B!_!h1 z>lBQ#yT+W5Wi5p3hVL!ESQ5qSnuy(?+P{H3hjQ-PPd!`!d(O#iHoS&i3dNcjWfISo zB7jxxth`vR5^2np8ZsY4APeB=8lo4%wJ(F|B@|Aw!m{IzM7buAdunac_T1D%Jb8kI zgj(G<$jg2ggq;-G1PGu#wW3Ef6Wk!$Jv&$@nyT^Ha+?Ctj{EA6-shP+zH@^aRO#EEENl?AEUcyv) ze(Z5$DA%Qaq?r5CfaaNw8Ig`8<*`W7!iIk?*n7I2m>-PbxH zzUm+$+l2RJ1f7ebuy~$sO`P$jLC;NdJQ9YS2>E%{Xu;3c$VR42~27z@b9mBO5tBs^08DbcucAgi_Nt2I)1WYPYVT$B?N zt{x@JssROWN;eX8a?H0XHm;g;7g5Ac*}r-t_q zW(|V)vpTaQ+J{RQurr*@y6q&K)%@h$9rE{&I$#WEv9p0m-2k|dq2vw&3}x~%f+Xo$ zvgxO{q1x}BL?zFk@_%zx36CW1vCH6(Kl$xCU%jOnJuPHxpF}>S^{4@-sgbTgp`dOqP0v$G zHM{#KZkqDfUJ$q(-O5Q@olwr3$a8fWr#SZ9J2csO$J*x zlqK~ucEwKknWa$u(uXK!rf%rDI63U4Xu`sY_OSf3nggu9n(+CQ)ix)qX@0HdWMb+* zz3-U-LKBEnm;PD(SH5S=nL`$-!szy0Qwk`=`-}1jAwG!s^b=h6@9$fha z9k-owQS94h2M&VkNoT(*MrqCC|J(}6{`e_z6nHv{sb?{M5Ba_gVAqmIvh;vq;tyzw z^uVp4-#0(l1NGm^Oky;|KZR%dvI!p6NpihpKq4f2uhZ4T0FN`5sHksaH;sSwV?fm7 zXbUpWlmkAJ{%D-@6r->Q$}}Q;VU!ivlF+WHMgb?jWlQv*MKi73#?eAs0TiRBq@egjqqh<{EwWdp$PtdSA^^70nADT$-SUj@ttlmFCl z_vd|%T`dO#F`h??=a=W9BpL>h?tZnUyPw5PBk#qTS z1XeL3-2cL?w)$sv1BST2^~<3n9! zWG{uk{DTfJ@LU@t&R$rp#riXD4rhDS-b$AFa(IePAH7ziAqAh<%ocz8j>hxq(j1At zrokL#^Xb&IsrN!5^_^}Z^c#_%H4)Ag^q0EqSw&N264^4YJ8=cfTE{crSL@JrFVshgwhL7R|K0(sQwz@pzLuqQbgG zfn8~s+B-2MLLq-FgIUtJI^eV1`2jWT))Iyp0_$sg;}I5nVbu5UbS6b6m2EoS2+}y!PM9u6Ey{I^8LnO-vMpcd^^Z2n_xuzJxWBtvL*ICEl5_klE!DS z`Z#qbKu&|A5TM4JlKsNg-{H(Uk7DJNu_7ls&_AZsRG`wYd%Kji98+1kHzl$1=aK9c z5T3%+yU&5WDd7R?i{p%=9&0gU)$B94s-Vsm(rI-9s+=yz6x6PqXq-s6MRds+E9tTc z9KbuLri)<$?{hLhd?z8uL^!Y-0_b-A+v*EskwOlZ-=ic1@;JJoIFX##&-poKZIB0= zt1X`(*c~nGC9e`Km~{OD)E~RL4F&-LjJTH2UXdNYb3SXi$6UpK)f}Nyx93Zc=ueAXSWnJ*Wh6OeGC>Q62bi3BE zo?wmXKW+Dnt%TzsSlQJp2bC>fLs`G>SIn-z2yeK!YsQZ9#8C zR+$)>3rJKsd%K_zJB+*dZ!eMH1}?Affv$i7eiq2i!(ca&_lDo_#$qLg2Q#`beb53f zl_33Z4rM=c=#*yXvN{-QlSqv4kTaP>-SD79+$y zbER4V=YAvK*TADlo($3*~B59(HKfWdUynRyXdH=u(FJRe#efzlT{AUd~DPhrak zdX76-N%`5v@&tlJuyU)@fSmnjuJttU%RF`q8@~g%8SF|bDgz9d|JDMqLt&7NK($8ZEdlle3W7>$Z_*0(93cn;&lvrF z5!_HT`(GlJ$VT18T0S8%F)jDoBISf&Pu)Dgri?S)j#5H;jRqL7?vl*f1sI`lUyA(; z97HKQ;Cv9RGfY}Dk7qQ^`;?~LAUI?2YAJB5pbW}R(+wLr1@zdCsTZi&+;K5Z>TZIo z6sFTvU#uv=*;gKZO4@-btaJu=cmn_z-H6{kgXzY4XeD!qQl3J76}Uy|P}#~Wz*bJo zek&Um`Fx9mMl*&^As?vaCLM-&N+NK1$WvpQjI>ii#1QC5l(hJ{=>io#i_qJ&Y|O3@ z*pw)VGnSZhEW;*T*(wkxN``VhrobuqKegU2QDNjP)(HA<{^s5l!26U8kuHjuQ&Qey zdZ1rJ^U5AKAIVBk0{xO$_AJAOVuKI}>%_MTskQbG>_jDUph#dLE-(`#nA58gW~IYeU(!0;1N z@Ra(x$QMf96}n^f9FCrN`W6WueMTM)A%3VNPgIA(R6!F%U`w=!LfYL}v&s6G{1)Hd zMT)!G7AjphpPRBtW9W+ zM&f(M;CiuW*jH*JSB+5uYD32x!;YUXsMuTcj z$-AE!kJag>-Sfw&Yu5Zd#|}m#9PT2Hy7}Y>0%gI;uXf4>Inzd%H#UcCxN`E0n71>+ogo}LqR2P?r4bzDM z5eAbJh*^QCj4dZf!P8ewh)7L6tY?#cBQfl~igaofo9%|am2VvJJ6QILJy@yYYn47& zGCN|WpQGeUu5(xb!$_aZ03py}xSaBgu$${7b;>j5l)$R*>Tb9>SKGa3CC2^~8TB+L zDR-J`lnb^|)^ezs=5hGCm?kvcxt@`n!LFNq|Dsl^%2yc^j}bSg=`h`yRe}pN?`jzi z<6|W}8oShF=r?EELW+aOfVo7YxkL)>H)mQ3tlHv4my5eLIAl#Wqx$O0YLAwyziQXI z%(lLf|1;O5R(h+ie0@MbH8*23`aIf_H!#{ zqI`Tm2`=y%IdJ-K(>k}--A>L>?t1a1m}ER%BWYuk!57uxKjK!C%ApePyFD-rSspCm zw;wJahU~6QxG}K)gsf6h|1>iv;|tgmfrg(vf2+dUuBiy^Ou6W}6=pY7I{itLXkIg+ z8|l4M4FjHlMODVjalSoKE@v6+v-xw)!D?-7Z;k6=v&r#>d-mgd1aFTTKjV5IPaATnkJ8>etzDX-O2L#)M&H%7Z)3RQ=H082&_Mc%Y%u4aq4-uGlZ}j0wUmiag;1yP$sPu z%++t|ECIq!)PbVO+Yo+@)diXKQwl zt1V|UDUxG7Ctvd0_UHRN9ea#VY_`~&<252!$s07xmHpLYz#^i75xv_~ZA?=To||{= z?JAqho}%=YmG6W;4gc<&5TCysB!Y^SXI1;Q(`1e~7&9W=lV4x=e+KdWsC|4ihL)1v zsc5@RLmw$mp1Lph?2oTIN1HNemulcgWFEeVlovF1YM^Y8<@HZ!Vubrh?v6isQ`e!r zzq85++3!>gy*eG=1@Ck-g3YHitnug2i@GoMABuOfe-eWBQY?MH$vr1&N&De5vS>_=2AkH3F%vLq_Ndv6_1vi%ixmk=JnLGj0^w7v9(x>gG7QJ>A0^;{1m7vqHILPlDn9I2Fce{3xg%Eob5c08Yu z8#6de*JZzX;>@rm1lnmB%>1PZk(%|Oa!mXySt7Zr+NGZQXmu+O?wee)_$@~@B3Mj2 zAws@7MZ+#5c`eiRmVOQYfttQDU(hY(F0;F_;zA65yAyS}hb0_{(Gd9IluxmN&G>o~ zW1CzlO^_vHs|@vOF3BBU8OhfK{o(#B?U*K(qbDYKU^B1s|%c6 zsj7ivM=Kuurgcy6bSa02r{ckYQ*;Pi{GPJ4(8>PsiE`AvBIO8Pi_hP{pCA-QrkouS zR#04kJvY>=bcOhgpJn`_a3hsNxa&*973J_B6O$iq2Rf&9Cgc_ewfW2ltgnZ_CC7?= z_BJjH*=Rcf1jn6;3y9CW*Px@{BLQH<&p2_^bk^f0)+|HteuB<9Sc*qP9g^Z{3e&<$ z)oO4Mtw{Jva2ayhZav4rm&-&qH-8G?brOw@@;JUI%~oy?LRtOw6c~M7B{#nd2xW>~ zX;)qxOJQa8;G%Rs&w(W`24hDk@xj8T{7q_+Fby+k7hl@~8jJuY)z z7I7ci-&&j+^D$Fdzgkdc$@!ra2 zk)o8U_=AL%7b}cFxLRK{L06V9j?XjE(#$Q7)yj~5=(I*s(6z?P$WU?Le;qQ+B^-xT zJflODsidrhi*!BYgYPwac14eBFVdMezIv&PT+ngCcrPxKy>FtP-KOHDKp`FcHluwm z)VccTOYq?*sB%EG&56W9yQ;l5ozs9v>9*m-kLSSQp+I1oWC&Kaf4WW*&PU_lu^A0n zx^jXlJ}gDVYqLe92{qrfsoF&EVJNxPt<^Ac;XG=kd@9(79_dd^O%=jbed2|MEcHM0 z;`@aCf$9{~tb$awW30HL1Lp6N)+abJy<9fNfQbe3Ggt9pGKUkSn>hSJs6dlL7UQD@ z-9GD0bw@`NPQJyvUDQAcfOpoh}3cIQ_3=N zJpJ3ijNhcpGwJ5&Q=-=y(He9b?(B71cg3#Ee19LRJ5*|CfLa<5fWy;e9eT6%yz)yx zW^gC5o6C4TKy)#?X!1v>S}Qt5XtR^BLXgsu=KTG6**r%R(5Iky2MDeGh7No77Gl#X zc*%aQJ<)$bl?b&`i#`^ejd9r!?(9z1HFUaNfRsZjE>5JFhgxx_#ldWn-FAuYLZaG1 zc6nQ$bPc3*j>ZZJ5dyoUh)nW3tf=2RFgh>9y^C`xTkD;Ip&7{>%w65DFK{@(4~7EW zL~Q%Xjt+L(NF^}d>tjmEG)Vs`LH69X`8soWuN9bq=Qx7E3^mu$pq{0km!V}(70kC2 zg^SF}t>la!yuOUiL+|H#E*gyKXAi#P5a}w8^jBX4nL&z12uA`Jo_n96i>l9Fvq@_k zxF}rucs~9(!;>e)L3;NSY2yWv;nxmItPG*5iIL7lOcP-r-(ZrO-!%~(f8YZVm4W0= zQcyW*Ns~PywSw;Y!#hYAH@;#3Sdk&<3YA?k=#^SkBF{|XEK+{lW-SGir=%Zw1Bkl%4 zz8r=stV^1Es5BXR#)JN*jUAKSI_eFfgHGR64$iJQ;Oc0?JnGJ~-Kwk+?6J^i%vbNE zNaAo+wo5x;=W&Y_=uu3K7PkAA)^s?E5@ zn|3FuYa@5=(_Hje%qeReu!=V-B`9+kV?eKwqRGZ~8$~vbAeBwUSB(;r^Py|wpZ1#I zdlB%LYJMA`{ze664FQspDzushJ0l(@P9_toM8tfxhZ{hAen571DR1mkrjtYO2C&6> zvS6Z^S)@+CiEjCJM~GjsXDmnLfed;UO+ms@!)sh!E?T!d&T(Dq+WXYMRH3~ft>uFA z(B1D7>pBkc`N8Tm;?_5<15(RYpMZ`!C;vW*abHY~kQsGplX3#0X#^QLEu`Lv=4+(9 z6v^dl3F^q*{hmBOp-~WrsAbqfDeh1O$mRh>lHDQ%U5~D-Z7%@WL%Xe^;u7edC4EG^ zbqPav-c`hRIIkTmyZ_#Is5IwI4PCj!fu?>v;*i({)H@+HzWXy;bGjq9tC;>XvZz0o z%>t-+rmfD18bMIRMUN>(@ET2GLPFPF37*uuu~8w7%1O0xpgP2qwPweMdtOMOs?%L& zsU5EVg`j)~1%g7d$wrsBgUNnp#74$nS_hDKdNc#Nmz{QESAEz(rkEO~awVv;3IwC6 zs<9*c#EL`Gvl%U&7j5{w2bx*%O*ob+L4G|smlV>fkebrr;^q&XAMJFF2Ul-VAUocH zqdolE{K=(`P+fZz(eUHV_|1IYAZc#A(2Uk5nd9}{?^e>Gx@uik#-0=Yd&hze7Dlrw zVO?`o6s;-SAaX^7Y$%sPVm}IAy5=9q@lKi9)J7m@K4N7Hqo%Az=A00?Gsi1Q!zy<> z=_}Sz&jd;|*dcfioO%C_4-1#Tj8Yd~PJ{bgj_C-!Xyjei*lV=73C|MYf)hU0_#SM;+v(c*Tn*=jUk$hS5*7A{qW0ci`I49E zGHc&a`#D*(vhO{=PydU{{10w|xRSQ?{j$QEaLoUiiFaSJ7D5VEc+*LfF3cXxk_AdtK`&(z{8|DwYyb*(~^& zFN~r;MF`dTr9@lvk*L#T{h{x^T9o`|QT&nZx(h6{_gEQ<(@UM}d$efMme$El7n!=L z!g@(dw)ElGw~|j)`a&N!3dUMN-U~tR89IK#Pl=uk;L3d=_%^S`CG~74MLeW?VxcE% zbE=C`xU)F%>tFQI-}sy8$VsJlA4vCD@5gM%I)CHXbB&#Mq4_gXUMO{+BSr;&StSk< z(2B_fGnBr^hzX4DwR+5~cERyrw}~7GwPQa9d8maH=NfZ@k-X7PITJ1=Wq*-_F1P;C ztfx&6avilG)3Be@Y>r0w3bl41mGdt5z@1eX);A)r0cLhu8M-dD7s%5y64EImP*+N> zEXKWPq8?_G^NlzbSyyWq_^=upS*MJR1g;Xmabd?I; z%u(-TuM)JF!8k8WbEanR3FF}dL-^6w^FR9hwu|DYp^GVN*~OR;&iE!WwLnD+7S08s zuK`7DV!7o5B6>8&$#2ADBV=@f)m0>;G8fahcp7@h_hAu{NrRJm%=_oY^ z7-+AB*|+vhD?|5F=2g1b8z|1D;;aD=7T8&di=?rYCQBCwS;uPTrhPzhZXD=o4qdum zG(Ax6(E)EdoJC)zEt7$NiB?Y9Z2TkUQX1pYzNk|-SCYZ3@pDOed|V##po@@S?*ia? z?Kc`%G`F+wILz-o^L67BIe61YSl3U0zGB|+aEMHI;n_*1bJ&l^&od3+s z#=j61SIXwS*hQgIVBp!$ka2c zNe7*%?{n^Rtm%8<0Q|PPW9!vTrZU=+_ zo4n657$1LVCt1DiUEqH3!S>eZsC;am`SZK{`Lq3^Zfny{Gs(KsGG!u}XTtuoY4p*Q z%(@3LYP#k`1blq4HvraD!SQg4H4xyTqy{@uIA`3~#|LKz&_XG|8pL>|mQ5y$3*aN~ zKGVx34B$cncDxx6jU@BeWs2T-wLR3TS;+PEP01A}^G4Y@P$dP@}Tm7CNy7K){sjD~|l8jN8Maq7p zgTk;FI<@>D!=aB*q|$YPigjP@RRo={BAp0oc&^XIdTF3cAZ}<^Te27x3?QdQ1s1b| zG$NxVE+OTXgQP{*9=ZlCOOuJx;X;H1|1JF#;1j9eGXp zliQslYI&71#JIH=+p>MjU0qZd_5#WeU$xzyy_-6keP&87kSKY68oje(Fgam2_NDjx zkg=q0)FpTI)dHve{IdGp8?(1SUv>kIc`yxhw`b9DBiAGt^jx6X!_^6m={2a@CtdqS ze^SS6&B)yqGm%XEHP}S}iOJ;i#S8na9vfjG%uC7&-bH-9%K`M>rS*e6${W61!YzSk z2WnI$?lpwdLpStmuD)yOeN#hr0oIx#k9d}G-h~H`g-rctPVZDL*w|+1t5BC$f0<4g*`38B`ynK=yQS$! zX)NtVyZvTxPk~$C{?hE*+k2%vRlSW-JZ<+TT`3`~N7XV3vtK}zH`#8;;2&6`Q@9%KJlwr`=kz$Hd(C1`z+ zgE4!GJ^H-%J*J%4osVZp>tmtlE5gf$dbZWNeaB9{?H&zcY&BOwxuY{|ywU>Far5or zv4$gPZIo}9uUmoAzU4l?oHTh_th@ep&Zg~z)UUBly9%PmAnvPW)BU)%*9E3SE(55s zwx)2SMKkklX?!p5omKmpKS|IN5Le?JTRf972Dw7`=MN4$i38R(7x!M&BBT_i1+FHy zpA4!H-!S+itBflcFGP!=U5#T1?EBuj+II)(-nDFB97LEsQptV1>R_x(_vW4b;)OIn z$$0ys#Infa%j;bVd4Xy7Mhzq%*0mQ+2DsGH#Y#lE7rmHN4LqNGZAFB*hKiJD&r}${ zCvMW~+|Hlmd9jX0bRsKHCrkH7xs59SvNu>ezFz_{Ezob`PLOt6N^$BaSZz&P;9gn# z1*L44MF517m6kW(t1FAM0G;Gg=$77RVe)K<0uu1u$h%Z1Kde`m$G0QTTiD-==4+q8 z!3UivF;f-cgHQc8viK!v#{V7FF%{m zu)Ts={ITkh&4+uB#sCAGBE}5d=PWTucZ}9AoM?Sb|mx;gjsd+2fW_lOTy% zc+!2N1l{DdnQMeU5ToRJ31uq{vU$2?XPjVYuBkA-3c*UZ#omOh{vs>2!|~u~Gt=4C z+xx7BiF6WkG37sgw1IvT)8EJ)gg)-PvVB!yJY#sSH#dU`KNo|+aWstpLAQ89@CCl7 z#<82@T>d8WCT+aFte~-%*R(b_2KD@7-dCmco>BZT^I;btrj8*87 zi3wL)E_$YQ_@0b^yZHckVywv+6HU?;m{hCYgH(Y3$P&|LTA*a7p7Y%(*?Fing|c6} zz1r;Oh29D^d09p6ljivf(+q4eCA)(dZ7aG6Qww|5zBrhn5F@(g=Cqz6@WEmA_(Ej> zkAZMgj)w_j+ftm))X-)Vsh3{$O0rtRqcdk|x`UN|#xU6fvOBE%BDQeeUI!xvVqLV@ z3^!V*lxj7TyIZ{d!6TH5M^td3u5f0qZr_=Xvf0;{`(0e6yBhLq$P3-(xpFq`1b@$sLjBWT__+#=4r#N;T7AMOxIi*}lw z7P{-T_g>`3*z25vf;iH}B9qb8FTL&2wHt9}Ip*i$so);1)P)WtJ3RKA&t#R)+YXoW z`&Epi8>gD#?^VMpL}XN-&ygh+%HSdWI<3r5FU)CN7gCzT8$)J{LeUzTj4+kjd@n>g4V_vZERtdsbA26=8co+Z8_Atd`@S~y zqrGff?P2q1*^E&74r4!~#I`N(GjG~bDeA?-t3W?i0S$_Q^t@86)84x_iS|Z$b2+vXLz%z+v>?TlGp$#BES`1b}GxbKW_a_R)nlKHz!8F*j9?9}{s#xS%zjeuVAQ5siT_ zMS}ZFk)P7!K?K~q=!LAhVBe*s%<9`u0?=!E*AcK14sff-p z+gNA2J=;3nXQsIPJr?abTsG6ZzO=6Z%|3cN7%BRLlE0!ah5tCxJjo&c*sZkd(oVYS z&!jOy9IsQ{xi)%dD{|-1$V*mdUqHb~SJZFr{Q&g{?~AGDwcG~OxeCJjxd!091^Feu zi#A0<4r*Plscw7QOJnrVGD%uB>79ef=p*GWcMPZg;-H`hHEfK)vBO5mcBlBsBP4@W znC^L}2sDDvgaM4$UapK}Irzshy6+ttdFF7Su&> zWJD+7uH?EHe#Fc}3yCav%c&i@crdaU$8=*WH{t;nI)H34jQ6I|XM_p8SjV~u6!Y__jx6`_?_?g4% z%Gs%6+sYM=B)-POryvsmrX_+5-b)4j)m|>H3-5eG z-6!iKuUjaofSFfr^~Sfkv0C`zO6*nRqe6@3NesNh5A_`M?=eG(?)q>#0PsRIo*-oz zkF-QPqhR>w^YBP&KjtEbSn!uac+jfbEt6H|vvE9I#`PbKlZvjb2sT|8^er`{zQQ#OT&@;$Tc@=ASlo4K6=ftp4Z8;^c) zE1G)4wi3K$WCgM|sJX*?ro7@KiZ4NPUS@PX2d^$)e{k!M1!u!;I`?2V@0rGLZq4Qc z;m^%_ax3;k8kIvNc764`Qb82ve>kQ*`E;uFl{rCdEWY;>60x^i68a~hKgsj_A9)!(Njeop41IapIg7Cb_mMd7R!>eGlJ6|xU%43eVuHG4B=`EFm%4yya#4y0;$dY*hkVjs*LXmm|{-DoQ%%0NL0#lUnMEr|8g z)IkP1zDAVH!O!Tkz3uaG^cnYSiQVW1e+LrAYAO87n0H&8LCRaQZowJ>D5P+!q4(j9 z(ZfQ-gZ-)76&jiuu}SCRBJ+ItuN=rJtgbvE;d@k60z;sR&;gWOlj}>g8=GZEEW-9i z;rL!vv_2*rckXH#xvdUM?9+)}os@0rA9ga`rgLaXi^*9Gxm}{x&dg{x$VOY>adWKC zVyvc?{n-UtDx)eIg#OSssuS1IE{@;1-wCPjKihUJ8cpM@7@MfMzqj3QYBg)}NV)5V z`@(%c*lKTvys-RPP95e$zQUr`~drhW;CA#f$SQ z&A0;qc^yrSd8Zm?PW761OX>d+<|Bw~#h`pd(TAH|P3~HFu;tXO5q3l-=1C#&6im4= zcfQlXH_!3+j18>HXc6IAtxGlKHTNejzMTrh*HepNoMUzWT z4#6@!$EMt0YIM9E6`_l6&3^L?0&71+J?f?h2C4a^$^+W)Y|c~2Pq!EeN>JIOb+|`F z*-BT9Ot{|NB>e)V-{GY=b`|82~}gaH~|hj0WvxQw@1IwB}ZCw~;^b z4tXgMTqAh(p$t0pjoGr7QFZvjAayuVKUtfFz~k?F+s%>j{@!4Hwp%62WhCEL{`F=^ z70cBiQJG3TgS_xJp!HE~gt$jOC$(Nr?&TTbrzkU@96Ct~-^Jj#@@?CECWuX50Q`mT zUJb==%X@(|TV!b^c%9lk?_S@F*1iBj+S_N9TfPNU_VG;Kq50v=SM&=|i*b0faukdD z?KV$-ARi4?4HRQO-dX2s^d&~8X(Yud(K56{wGWTDYG|b@QD1TEP3l)?$LJt~G0vuU z#8v2p_^-#}W-vmFnehhap*X|QAZ_XcU$g~SAl3}v@QWI<9-4esi z@V!l{aBQ9b_#>_P#*eT34?cb*TM~LXdgCMm9e+WTJyyfA;`w9$e(?-K93Cv{Jv{c% zaD?-?jcy&4*?634mLP0Jv2Q>|pAGgQwi^e;}@4i)AUX0P)xzz2Yys zcrt$*xv!PCwUZ%{&WjL?vhE4L{ie4p`rweUQ0SA*UB&yCuZVCHueJV(4)>}2ND(7h zvJh83)zLP6#&9B7GyP4?deqG~Pvj~DW7TOs6;CC&?WS7R%9^+jvTeG_^;&4Lj8@Hs z`ygvn2kv|VBB*8fJQ=e@%EA`BpEWmT{uB~Gw7a}3Bo$KSz8r9ci>mQ6)Q?VCcsN3} zsJL?JMKwy&V`Z2}9`&ArZ3ijYNFpK4;0L?+ePr+GFtSc$K?@0BR?~KLhjghLp7Ua3 z5jky|gv44$HB!&&lNll!GN$+fX8U+wDu|3-Y8Gkv+Bcxw=jq7tG=TL$HK^+xQmirL zJ%tnum0vUZNnQE`&4%El>@S2GVVYP)91D!uLjFGcdWmW~%Znh%0qA192hhdjsYOmOG7Q+;oNWU5qm2y75P&ndEaT_nx;lznVy^O7 zZLO7xshKtwSGrcRzV?~4=^cmG29|Ba+5Dds;a zumJFqpwTgrZsk}$5I0Yq*l<^hmz=2mx?VuO-E77cE!C9+vLyU{^ab8R3n&SXbKh*pizt(D(t&#%E#kWh5 z)N*BUu?I~FBy0`ABw6O4M{f@>&KpO{RY=HRdHN#T(RcX@lNZR zNiy_P!Et1p5t0;Y*)f!Dc3y(Qt6tL4y)Ftl2@>=%xzJl>T9f6UW-XwVF&E_sLMWCf zb^fj?dN}eeW-E6GV%nxnZVeSVSOo>DK-W?;Ci$e-qd*b*P+CAXh)A059s+gHVBQR; z+<9p7z_7&2bbYq6?l!8;FQR<>X>CZTywU5tc*Fh`49dh#0|OBqJDd@jqF?n?7$v|A zxRZZD1DSggZagsq9e#_?74b()R!%KKef0uu-ErZZs_$I@RuVI^Cj$_x9H&gilw?*>J@cA4@dc*v;~J&9-<2#Hc9b&Y=#83L2VY3Kjj665*S^Ak2-2e zmTa^h2D;#{q6n%LkBrH!7>~;K#7Ag_Z9&(0c4lHUvz)Kj0lvc5<%GPBIpvw_@Ney zIE2x{jL3LDDvjW3*?s5UIw5%LP2}o~iKL`w)K2!CF@75o-JeU^RCfS6 zcIoFE>vQHxhTdxC_iZKbG8x#)<&YDz2dVMgTIM&eBXg1pf-FY}xTrFkR> zC?M35b_G9H2gnoS1FAq9qeW0GkhCsP$YzH_9omu+zmIbq;foP8*ACu?nTK z4#ulO%7qTLU1-*Kn*7Itz$;rQLpXf9%9`3EU#nWUZa=NvgD)8oJfLhUk$R-Qirjg2 zYc+E8LT-74XaW7sZWGVln0g*wr%6$JC&gjF)ibAiS)8iihNi*$no~mgyv0EaT zQ6_h=nM?W8G3o;QM|nX(-31v&{h{jV_$vln1fD}p%-_vv8u4RD{S+QpkSacY#Kc5M zhzma2WynNd*U~^kP2{fdycKx&zKq5)NQ_^^+-6tDG2bbP?c&}}75w{s1HSw6Qe~4k zy=jL8dKTVudpSC*47DMzE*J+?@Yg4D08^>*>itBZA)yN zu&lym_VLRkkV0^2D#;9dZ3WToMN9nWngp}lSUu9AlqVgz=Io8#|NtlP$<_-OEdvPXt+2l>OQZ4xHwJqhxbW1 zdcm#(g%*P;0oB!q8_i+2u1+=AGb@PND6*nFV^pqRpZKcb#ZA9I;xU%-;zn=)osiAD zx6~Gp4v@SoxH-bh>_-BKdCO%d>U>Qz{=)FpafOK~d9*^H?|SFx?MGLx>9QeTJpT4q z(GLRhKdZ01nGVMmUw6ZeEWZ{pI_AfxNPJ}NYV+_TuAy4tdqaY>RzfN1wzpC7%f2xz z_(Lfov9?l9pXLaz30t41=2u$M>h-Lq8oHC~0^LV{%=(~8Ubg&TN{LZDw|;3Z`v1q& zR|Z72Mr#`=AUTA9bax{?bV^AhAV?_EC0)`XFfbq;(%s!D4MTS`NcYh7ZO^&q-tQN` zm|<9JulK3-PX6Pc9GFsB%!aT36H%el^@>!+70MNhw{FWnx5=Dyvj};+Uy+S;gxHIc z$M~}c1~=uokNj~^d{v2epS0hWk-hV4ESSF)E3XM0lBoXr`=+pu;=_}sNN=$K13exK z;tUtsey$}<4-XHT{lzfWDcF0%u!zRhJwnKtB$?_F!5gC9d-gI0S&2WdOeae+Am6XQ z_`GhQYE5roAFGWY@6C~3mG){*P=fx~dK4%rACYuGt6iyBgc%tci`L}O z!UBebn$AoMoaj!jTDF7X+EdfzB+Q)pybngF=!R4xOF`{@t-$M`GaF1`KoQ+-9H+G2JC*|V?!Ztc|Ju&Jg)2Cp;^1Lc_Rqgw9qPiW-W$J*^{Q<>JPKZwJh^c^5)g<$W;FPz z60PWmHd3SL%YR2S8IH3udTaBe1xz`O%6%LS+B-R53B^J`y~5tD1_0Jm{9E=zvD zr%K>ikaiGbaWZE@?PjeYPxX%w`XXhJ2lw6YBLqGjPQWI6{E?0N>(PPw`P=o@K*RSm z>Hcw3v>7f?QxIR?eY{q0tN5w^K^pa#$b^0WY_NvuFN4H>WmTs)LI z^6s!uUmU68846|aEg!vO32FK$z49{_&qVXjO0;@oSH8-0*FN@M0}6MijZ~CRzPs}u z5xZM`c7+Qw9%zcyHQ8h}e4=Lf%i$(vdCpf+_F!zJ^k$S3PLtI=qRiqauJ$Q3;$qm? zNJvkCAN*%1Bz=!$me7a5!XREJ))M57!z}yr>^vc_$f<#Wi+$?yh&u#tUYp8~%K6FO zYO;uo-+^bci@8=v)xv*^@tuMr0h`b+fXqKVHa6h#Zc8)_eFDy^L?0NM+4gW=Hh>I6 z1iAr0P)~0G;yZw$lOTDeF=a=ar`Q&ZToK|w+|<>KY^_W zl>KxwMsrijGPy5q?<0BOrgGc&7jlJD1(Z#p%&*B;o(>hs@-H$LEiAuf$!qZ?eC=M4j9>{(Xv%BFT2q-|nV zTOm0p;;nJ_=W0Ldn8@2x!tQ0V(bEB&Hjw&$%SiwD#ef;i0Q6IGUU^{U`Lm~Sg~GaN z{_vF4zS3LJ=V94Cg$jdc;v(T}96c5%CEgO8|K?%g89Hq3z70v|xpfddvijHie{fNzU%3^8E2Q2}gI@vBJDn!vs;m+r=^gMpM*+P|78O#11~s5fZ#bVlhmLZ0 z$Ml2a>fv9jfXv9=gOx5(+z1?ndjj%w-fe<4#-P8yzfgn@ekB=T9-Bm&(W{m~Vs4fM zg!;jY-zJop7AC1xm@VaQDv)yNtj*=N!9cRTQ8wKuozh})R>?c8WTqrm_1@QQR0$+jcijZHCOq!My+j5s-T$JeQU3st)AdJQ{(^ zF0l}Q{hW8RS!Nk39eWltV5TN8Wd7#$??-Y!PwL@Tm1CU`Q&d?TAY>-3U-`VM(&G-T z=4#z!pos@e6ONACHvz~!1r^Sh;NR6E0O`~jgSRtobg!mQGWT8sf7MQuh|u_F%nhQTyGp?9z9xw- zaKnV3(~VVkFFUy`j-5Q`yzQ?@p&c~h;_Nf9Q4Hb?HA_gqGxbwe2r?z`nBjB-023-$ zQevo_WYE=thJ;=rfDHHRjG<4<+Gm+t7S=|KDW=V%4g;zb7La>5YK%n+zePnA1o;l+ zr_JKXMZ=1L+H%zPXmCwXC_(3APaeniu~7I5UBzB%xib(=6Hca(WfAQY^x4!9_&Y~f zkpd=#>i+1-SQ0&@I~;$knmwode$9Wo&`K>Yk`^zt6lEN4hr zbo_sY!ZK7qA>RiB$nkr{!CaGc)iNgkM{JbGI{nV6P&mVfD$Ws@UU7VP16nBOw56w((iAJe0 zD1N=j*)Tg-)i6!|?}_veY;P~1Za+#hJa}a8_c0^VtaODx8d=Q#L|7RcG%n|tlOTwZ z5)$qYN6}W@-YRm-v6_BCu&)_!nIy2@{tMAxA-%-UFyp7TZM(094#1nYx4)hnbsP&1 znu-8A#=G5dj`Yhp+{n;MfO*PBd!&NgQBhHuc897lB_)nlJNyA1B-R|X=`#boN+`!9 z(?s*l)dsSU+^T8lsL9;uf=!cBQ}u#gBu(?y*9q=YT7cudG=lk(f0iJAEXSWH<5 zVyhD7=1nwE=cl(=4jPoWo>1V_US0f)e;hp?J|o01XdpQc4pCg3{a z%DiBsN#_*0xmp2ZvC&4jtrD5W?oal=uw?e2jS%48eTh!WRosR5Eq-(ZZBFuN^L&Ou zO}Qv!PharH`}Dl3nL(bp$U2dPrC_)-4)ItR0(OiJRoriwyEU;H8V=~9($P!2RG8k4 zJxG-Np0%hmE>g3O4Ck?uo~n3OU0b_-nPQ}3wfz7bl2Wk;F9<<`FGSLwzU#=~miQrG z_Z<1c#PV9yzM401*ILiD=iGfk)55GlSjdby2%)(`40!EdSRrr?3Nf7J*GHJMzB`@)GGKQpRN$mMBA80~Cr= zPQC;P1xvl$EAl7(VBwhvIBjrru!vdTMa{^Y z`b{q9e6(DWNheTfJ5%}*;hU$K`Uk>BVl=xXFtr<7@)z}s0ON$%(~*gKm*i=r;s!(3 z^XsA$lih`PxiG}5hi#aeU)HnNpPYvxss|=#s^#)}=|gB1(oUjv8w&B^efb<^sPz`> zF?KbV6O|FQU`oVQCpA3yvLg~XyjiZ8!D4s$I=yp(u{;0pkR%(=y0bBMMpbuk>|^t% z{seR*WA*_vCXh#6`^wv(W@W1;^^(|~=SWuqWwo`h7h<$1fuP(guMt7s9(cm@>!k1# z{UFyeS_{xeGmu-*+hdRRx&kcfDq~1AGI@+~jDKma$C~p;3-zW*)D~;=!3x$(Yo2IZ zYI^((S!nWEzobgcA`esg31zKmtG`K!VrLEQg8;fcam0_zbS{^sbJXngM9Kuz#^Z}`q4fWUVZgGVY!CUWLk^uZm_rXP z_R7-kv^SSnV6g`Y=mB7bfaU!57gF0x1$go=JNdIA|MUa`$@i)Fb_GGQj7TEXG8H2Z zyq{3NuA!-!6H*Ws_R7`GtpMvOwM_VX8uj<@qnJ#t2z;K5qO=>{trGxC!&r!kAZW4| zX~yM#6>Pl&nS64AcPTV1OhQH`C=3VQYF8<>os*NpXs*oGRhbR!NnjscFr>c9Fq;>1 zb8Dc|F!VTh>$LkP+4iNZ#ygz9=jV6OCl#;~7xkhgl}++_+!JaPbU`GG++FW0~AW~M#eQb^B(~+MdNdxO)Z-BjZ_~oC-n+%M%;jS6<0-~-+PI4 zLSR}1|GT5xE_Qz}79NO5WbK>HwkPqCkKFquAC+{sZ^|ojWZWJq^uO9G1$~&4@qkLd zA@BtC1nZ-)hkb(gNy?kOd}@<gt#llL_a-My#LkOgr?ZjY8^zx z|01jTwg1mIjmfS?%W*A5jqE9}yF^LmSVqUSVbBiCe-$>66s7Jt zebC3cTVb}GXiUY{i! z4O;1&r_ZF{n(H_nyP3uwa_mN6yUW=1`Tl+Qxc-g??T;TL+S|qOKlhSZDJpU@&U{#( z>SRgnXEyQj^qh(yFu7Q&8KBlwn|#JEsW zQWhFK$aNM71uVO)c;&(0&Ba8WM23g+6zd*T2{R~UoXW<`&gx`mXMba${!4!B4ynr7 zHdl1w?`&Uuk84F2EAzNa1c$*X&N0>TQW*eB;knetrV?jI{ux zb@f>Q%duPQ9cR7e;l6p9LjJg*jM|xo<4z*(Ql0d$wDO1tKvGL3Q1q6c^ofg!!|W!_ z!5ldB{k`$LfZHk3yS@z?&PR{a*#$Xo`njOIb1y@vEs4BpIE(D=CZ2_sj0QW4P+s5@ zQIoSOH>2O6Y=R>RgAHd59`oqf9!)G_ix>37Dp#|SQvA|^L5+(3b@5pH`;F{B2Vf=0 zj)7j5Pb6W^c_{leNFhKeZlm=js09C~@Q{llnu8FlT70c4kHIg&2fW!MLGNUJnvWj<%| zWz2YKtv8BOLZID|J&GN9#%tPF6?qvndjGZ1>LjRKYP&-Atn;d=9c`Beg9Hil_1*V# zM%ySWpCHw|z)@~LNlf5@{S-yXL4gA9x%(@>0DegRs+q$PaLL8*YV(O9Wmyvw>cf^x zgK{&HJYY2p@(CuaOzwR}$-f>MW7Ilubk(_wxOo5l%|K9$YoxV-+kj*vJf&& zQ-&CuBoZ|jl$ILH8RM}`=|YT)tJ@==$D#|2$90G}2n6cu?-y6eWW(5t@&Z0B;UWql z)LmFDa?->>1xmqz4f=Y&gEU+1rE;NN?f1@idqt+YNZM`M%Ybm?DRIdLf$dk9a52njsb7;!U#oCaDspWWR# z({K|pzAcK@Ys7j>f7(G)BjZjKMCNx?1*2&3f z)(uWfgVz@E68d{;{5=&z*3us%qe<{m-121UCM&MT&l=i3HW9fE%Ce5#RG!?|T~rq7 zJ~mcCLhWv@Po#Gvq{7dP!y{8?3w#WN-_c0^Lu3BwB|(Z$p`(u3W*dg7mt4<2%BD=8 zxdgwY&4OWn`t3gzY?yfoVIOtK?9V5qM1-$KJ^x!0QxtSU3CN}ee<6#2zz3xcKvnMu zcG8jmI}C2p87azDY}jVdZ*;iSq&;;B&}U{Y&7x+Z05Xptjt{+36dv%36gpfF z$>7?Op{)i}B2}d?j=#h3E!b*!O_~2Wo`b$K7SsXMdc3nuQixSg{F4~PsGO>+UVr?a z*s@E$InxgI&o2LRN-hic$@2t42gYl-o68&UOsY762JOa@wF`(ywS>ToY9lVu_B~>S z5DXh1DZqBu*EUgL)!M&BgEAy3Q@aIWR&);K8-7|2(KAoMj7!?rdZ@s~=%Y9OHWBTj znVm&DHpO0w)b_)aFK&zl$;syWx-oC%Fdj>5WIE;L z{+xff(V(i1hfcKRJPC2!j3@c?W*MWxTO|~%$QPn@m$7{tf*&j#&351Ivv`x&3`-x3 zxk-87HubLEso$xRm9sJZvHWh#4hod;HI`f>*+$ zsbvelf)5gPizXK1rZ}z=lc`Ar%&#s@mxrgA-Tr|#fD_U#Cex0S(Q9Jdw);V~&Cfq# zL5yr&VB%5{Dr*>ncy-Ez=}cIK?M_M&jsCaqr2gqUP8bZP-3^i9b$W}Xs1F#SYHk*U zac{%bHatZ|MZ0@@O)d|Yc1>Qi9aokSm!bUGyT8BJT$mp`OQb>Yt3_rVC0KoPanbhr z?DkIN&$o^j1o>a3hmRmw` z&Bb;5or{&+F{EpDNW)HQ^ks{W&{ITD$y^0rx-d905h=O@f|wbAmANSFf{Xzws>8Kw z07dNS9v*7|Y~9vf{+YW2z8ot znY&}cYqzS3f~SlH!S>nC_zuLl?^pEA#q4URIluLBZ2lyK3*rF21%;qhxV!V);JNwuNh4hmDa11u~+u8FzxC?C%ud z(D*MV3P0yO66Mt*3o)Eiz7ryM@>(FJkfcVWr8kINye%$dleIVJjD#OaB%br~kkG58 zk8lzRd}A0Mvm>}N5nT5(qhg@YU_BO^64*6$b$IvRjNOh$`6jb%AM7J=AT4#PoM+aW z=AnH#_!Mhgxx%k=2KhmGfYfp*@;wT;s$4GjFWAHR$6rY}3l6MXc1I)kJbsFUgERS~ zXh*-Co_LVIQZY}!2hAX3DB6srs~;+;yZ)6c@?yhLQKkhT{`Dt4#8c*U9l(Nm(FKeq z<8p`__Gu`A6S#3hNTWM>TAPkifa}N?nQZ%*GJ_dl#QDX=8drAT^u4?m!akQA*4Oh3 zpY~DzO=p}>HP1I3;<)RlfgcizH+XrTLC@SiABTXbkAc9^MqyFuI?KPV=C?30tvRyh zitE=c;ZnOh@B5V5seWa)F<}|0jLd|ESXvcu>IvZ*bBb_tFj^7yul;6B=Z}~*p(?v} zC5_a|@ssDgaN>y9QD*OZONLtz$=^0Nz&EJhkoKnnF?5(xR?d6o-ncy)ulW0sgv*|oJy3@+sf>|pB zHExc2SD8fd3QN5Z5+TS#5w6LLM^WOM+*y*%JHe9CdH*@Rb&O_9fXoC)P36TT$m!SIE0GlH zm7-{su+hO~JU;pXcAU8S>gY(K&07guA&~npm?;QmO}cmdS8A1l$&dC$t?arQf7({t`!q&?ua_|S&6W|^Z zkeu8UcEwzhdCN}^5*XflKOU7~YObe)Ih9TJ@bR;PNGh4KIEr8cioL1{XqD(i*KglE z9_$9W|5};w3CcI=?F!MOOmjXHZVAd&$+d;6B-qTZAYWyg3|^+JKx{vK_bHa;OzXdO!r86DNj8J zeAnF4qKU7#4MvPgTBOsmV37(UU0|C^D4x{_rPjK>wY9M+Jw=0S4zQ>WfI?du)Z%HA z>M;F$EWaN5IF}HAZjrB73f7cRlQdo z26;9dG4A$K-oNs&Bwz`9#Gnf_m_FF9oS;P{L1s1%N?a(~PRp0EmUoKM7sAI$0hd21 z^SMhDk4XX9aL(Hia#IO#FAkB5h)z%pYQsiJZwSgA2$hJOIxq91#mAr%os*K*|?qS^`?<&`uGZ~m%>z&mZITM zPh)y><0|wE_c=Nx)%Wu2K0)wvq?t-TW~rZ*?jQ1w92ny5lCaPd>|B-tWA&DQ?ja*yy(f;~>f7RCKAm36H zFj-8Q0v;P6!Mi(?6{ag=8RkR`rBQM+4v+iq{Z=JeSU4PBUJfCnc&HT%-u!XeiP$&fM z$l}k>WJk&N=8S^GS{VI@OQTO?0GW20p%dYe?PCe_>*Sk_?Pf#8dwQy7I`a=Lm75GS z7idF_D|vDs;)tjE2)bVGGjN6a7;?}lt{+n2#D~tL(HJ1$UydLR6?LCHj|1=N)F8A} zmw7fF=PxO`{<{kxZ5)eHf|-b3xfWrnQ&{{~x~<`ZbmvQ(d@^WIqX}uhDqG(r*aX{@ z4`0<+9s7{;A@&Z=;uJ+tQ6hRO$h9{^9O2);LnLzf>D`?%KmWIbUO-R(Gx=!bvFw3o zG{e>)F!soFI@(mChx9roJ9HC%i){tIh%wW+=H~#et?_WJVncCBy;QS@jRhhIxC!)X}w31Bw zJ*8iAcrKUKB2C-Z0Wl*V*3GSG{h*ch)uw>fygib9`KPd9`!hwEvua8> zNedMM2VQtHU%utLFi2-r48iTGnsuO~JSTTo{DDRL8}!Ne8RGz-swR}KZh*oD6!0qe za=RM1Q#-=YJaFoaWUrGMN)-;p80w@=1iGdNo_2-6t#clew!J?a3aHD=(D^oaG8+|0 z_n2?2d8CFm+r^(@Z}MVwc&pku*zItsDL1tW>BadO%0eq{!&B=7(_qyqBNVN<-3v|4%hgF(p1tU)t~7NU)#x^K77kLgtjgBYe`%1)=fGkIMSVa zGM?I3ZVe~!TWBE*_*ger;{WHoy%c=5L<>ZG?ns6m_RdUfwP9Y*z4o6sJj#aE83XB> zK1cqs?d#F;gn-eifN(f44cb46iJ`Q3^Z$_8KJSu`WGuufrzHgWGcT8W&dsa6xF zyHs5Ew4k+8_?2Jwk(4Z@StZ)tP#wVf*ycY=@t=I`qe&~%L#GO2r`*ZsBoy(iKUEni zUJSjP#$YHJ=2fstsuhie%Lp{u`*j_JgoUYr99r;y=GOf7cJR2^@97ygd+)>|JE)~6 z8zz&yb60=bKqmMte@s9DxkY-L)2Pwq$e@_nD0JrI*uYzuq8vnWr!X z^9?a^bX;7JztW?$?vB>!;GWlBDl zf-#pQ`<=khM(I(^EvD*?tV;~G`;zp#0ic}iVp9K9*z*6cu(TKr|Lwsf;x_W<^yjrR zFobzUsH94fR|&lZ&(XvigI-UCwW-S)a8o9K(B76O(#5Six}bChME%gvFfhC=u5eko z41Nq$GCz9bL<`6}Hi_5dI#_!RqIR|*cM{ca<)1L3f58}S*-QLIMxyJ#EvCovq56pJ z6m_dA?9FAHWc}ZBg_eFp~;tf{zPpirMt1DsL)My)S4sHam6jJZF{bip_S7Xz>nFOb( zDAsL}h^*F_vIaS`23Jb1s+&Gm%wU!md#7C8XA~5a_YbRYPUgqQ{W3GDOtoyf9>!ts zm&MR<^KDogqU+!a0jPYV&`=!$OQH=oubxTnDDhVZK00^z&m|8%T4>v$Y~tRM7{ zVP09#yBN9sG9kpjhkN~ZwgFiMPdY+qpVbxU+uFRl>Qkw zZjPxTp!&E)F>oe~;Is|Vt{EGx~|8N?vd$^{JH(l*!>yOE>bD37>S{ibpj>k7ck&PMJEl5t-UxOFS z!cP^{qdvBv$ujXWNkne))O25-%0Yf`FrR**Rj@E0NlZ~3((@1KynMgZM`Nc)`c7i< zB)joKT+{+b87L+SJNbIE%}_bUFV0<3dtri0d1E1B_Ugsepq);7{q(L#{SW)W?6 z0yrM}sBKEyry z*t(~}P%=2}(NP&k0P#XGc}9RMKXKVj&zGW6|MYR;Mak|yA^;01w# zmdp$JP{B53H8uF5AN0yaZeM6jZmKF1gL;sFeOi(cw41jw(#m`GqD|bdOVBTU@bjI8B+mg8Q3k>G>$2t;EP~Q5Ej5??oGq&^Ip?mETPQoP=%s8-v3K?*URab%_5@Ca zS%|93<=ji(mAu*c@8Ax!Lg@P$eVYphZ&)auLIC4hJoThWGgI#Xqr`S%paR#6zpexo zMZhvest!@yKMl1-_3Z9hdpDV_K*h?$Gb&zJW_fH5sqfkztu@!q)G&d#(FW&%l_pY6 zm*yHGOQ?aWL*)Em>*?&TuVwEc_Cdqv{UU95Hp)H_tsKCyrGInI+u1_5R(XQOEI@FDyFSZllT#HgJgPhBE9hPe$yO~$PE)E^|%Ld*8P9?SZ!q(74C?>DL# zz~9F%Z+Sds;x>?o^D^(o>jLg34TU)SYx=(f-(JAOOQ&3q-^}U54u5oIUaL zq(a=a&KkXi*kXxBz{j0#f@LC0iFdxejZ={2|=fJ zJg-{T0fVW)Vo3jm+&~q|DMs-#dN+i{(dxtGI_<$=?k`bTJmkp=&ELbO1Qto3Vej#w znLLkgpIW{QXVu`I$@7ivJz{sO7CU{vrW-aOl%KsZ_gv4TjEfYl$XF4hO^r2k!^`0P zZ--={)Ks*+e|h*U2wTFo99AB#@>fRib;JfHxezjaOR(e@_L@9xRt|Tg=C4EK9N^P= zvZ-nz55UA}4e5#F6(9t-Elbu{R)d0tnb6Izt=SA+>q*~Q7$5TOPG&{FXN@TLCYRO$ zN+-qOc5}nER=HJ9Ts>rhs;uL8zIUhuW2ruG z!@^9DT9R|y##0A9S`>|pC`FVSk!hU{mMO{LYp(K$6>L(nm4gdRv%_we9Tmd<(4Ynt zDv6^Vk%;Ma2=AV^eqg}O*7jDu?h*S6@x#SNImexNw^VwPZMvHTEC|k7*6CNjpaL9| z4{@=tO+L=IMX5dbv&{{Xx>VbD@zi;6y39*^(J2ia08akeLUvxXTBE@uX^-eFT9-E@ zv;qydd7Oi7`t!iRNcUDjoAhUs41+iVT7AA3*$XwSFvLjKBtG-cvAg6)CoIhB{MLWp! zj(^9$j_JJ~?jSiW`SbgGP;?YfH%Z0Qzs6xXHSmajN;e`(|MwAPcqXx%nuWVEY&4^e zH?CgvhxlhJz^TEJe-=~kH(>z=$y@Qn*B1I*hm_qnBV2_|by#mgu5`S%rB4HH-<2h4 z7erz;g!EOc#H0ter1>xjc>yR}lpkk1uqHk$R2T&VH^%7hX?9r*V+MiL_`5brSZo?i z$*uO)^$b zzej8}bcMAxMFu3SB|(2UUn@|jzvs%yo7}O{&!$2^AHv{J=YA!6A(a&J6YL(Dkpte0 z_|%v{81zWy^Mq=P=0$dcGu#pYRikJB)d$cdSWplFqZ2*)EBdJ3yk8iV=`I9+P)(HD zQ;F_zz)(ZZ($PerTc+kd4_%=1t#`(6K|NtDy>oOFO`f9t7B-TqN^fRY8mqcr5wgHw zmANsUag?2wTrDfJPW4hKEtpYqK|pO%o}-{-xL_()qO1Pb6W1_*f}{( zg_a4K7*viSc6hnmy-h%&D8DMZq3dV|ZCjoxLjA(nBeA$l{Nnn3BAQ zVELjBsB(&xcvl6}re!agYq++mQ&J^={?|S}_oGUlFLbA_zBGDYy03?2P*`Kv&6pMB zQNRj)a{p{ud_f8rB_66`Ho^UuKEFqit4>=)$wes-UM5bqQDv5@ajfv>w%>4j9LH>0 zJPn&T3LRRAuF-$YKa`Yu6=NZONZr)@pn-ObcDkTm&H?k4k$}0Y-ztJdq+gQpD zjLyd}FBL!0ps%5*DLbGUV_;F{otEt2(ZRtV#N)cq=EjWH*?Xu&qt6-v_>|bRe-w5h zoT;FG8XQcdI%Cq`=i$-Rv{YCYG1?SQ!?%#5JvaE-_c4o*&%&Y3bL-8UH^&Fd@lhJr z5umX*I|8H%RFBzxsg%XkQ$KcqOxvs%T*!*q^M_Vboq`CL`0gHw^t}(dy}kX?Zd;BG zki;Q3_IPNzd3U@_g{U9CyYuvnzb15$d$^#l?zgjl{CIHB0bbQo4Er$8iY2LVUl$8g zjabRMC*8qMKF11ew(ts{;bI~25=tR$df@M)IrQzmziQovQb%Y$PcTo_^0-XjQrk)@>+qQoC*iF%lhkq{U4YEvTJt>i{MzG}4Bn(8YB1*`4u# zql{Y|rgC*{K1vrk)L=^VJx8>|&a7z6;!a>3+up<-KodRPx5@CPa&`{v(v26lOe&Ac zutA`)f^=eBq1%h2uAs_ZRaHYVr@QQ#4dV^}iZtzsS3B1O>cfChC+Z?7v`80YWI2&8 z`(^kwsX}SY(HbfnpUK$do=P(Mo}~mNhYuICq$e(mJtNXnpr$+9pnA)S1;s~9-uB`( z(ZB&d9v;Q@whT$(1Nzjkv~JAgI`$R6tZ~|-Bq2Uut2$Ne39P@rqqZ3!6&9?~^}Psp zeuvfhu`)z~ZRT>q{y)U!FMly3^j)SvRI_nsXsV!10pib360Ax_F|ClUHow`9@Ld_N7$SU+kxz8mKGfR`RDEofJN>~N zBMG<$dmt`-&`ihJ7pqwjzCSaCA}ziKv+_fR{wkMuMEmj?w6c0Gi{K-Z`td;@+niQ_ z?Lb%&oo#@b)_H_mE{sJ_2VTFETelzbZ((Zru%r`UO++5Owd7K*RzDviyzU%~RrQ-a z7YrdxFAeQppGU`^>p%b6zdVnBIb?`QpljsCi!;bxs_2c=|C`v{%39TU5$k~v=MaoL z0k)OY(1`uu7aW=z9*%Cs=uVWliTd@v;J$TT@XEV3>*QKcn(AwxM0jLMX-G?kWbo;Z zy7J;yNx@)6o0Y&DU8ZyQAatTvq7YC$#hsB9QpD()ic?-0FtD`E-9W9vrwk77F5Y-` zuiqLMZ*3;LcEf0HZdP_e*=>xWSINxIMiRoZl8Qdpjh!~jyq1zpWG~8?Mu=Mpj-0ik zMMp=!-V$bJMq7BdEbhJ|NX-Q~AqOId1iwX}(dV^(h_cfa>-+uRz;8FF8chyJo5rU* zPUy3sVZ7tft*!;{;fAd$aJsXN4mg_)!VIk+&(2dzu*PoAM>rNhNi}hkk?c{EkCTju-wP!6B#QOQ`3qs@;sW=P8_TS z#R|*NF#+E*;O34Gp8qR8AIUX88DDuDm-gY{D@id@hmU()z~~;4ftoJRJ1>!+No-?8 zy2YygPj@4pJdGS5WXcO^f&1)f%U7pi{uZcG*Nu;0zYvLB43qL2Qan_QsDI^{X-h@z zJcCKzeZ5%|3OC`+yC+BU-7x5E3<+3Y<(q`ILv&jpc z9~xaCl3~9fi1aR>$>yyIyGN`+^wgu#8tj%L z@=oKE;UN=ksIFwCXezBsu$aE^#bwBGl5yP7<_*)1WUt437jm8ag^-lR0&Ug5m7k1?@oY29iwytciPDDZ5VzWCgu5lhfK8axyqjVl9dy2R-D> z30HKfebw#)h$Fv*9(&iU>c-mIpRDG2_cva^y;P_qLrVcpiR>ALY>!(IDFkY;H*Yg( z7w6RF>u^Y-$NZSvq7NOSHrqSI`r3=Sq2C$^pRcz^AIz#E=w4Yu(d|f{5g;25VLW2= z1hYw~WD&bX5*8KCJ3xD~eX4@IGidzgE~}~_U+*f|dpusRld#EtwWHA|Swgs(8FREn zQJtMW28S0qS!jYG;tDnhd%MWnt^{Foi{KS6U84WnGtdLGNB;S+^ye09Tea>@pN$fYQT5TcaB65+DEOjh+k8F#v1Sq$ zFtk}6R;xq?L5%`=SK05>ix$Mg)28P>Typ~;|JK8yt%nc!Ay>ra>%9?H8F8)%mMs;; zJ&7>y6;w=#U8((WpIuceec^y~$_wXXo0h9k|NZuU2B> zOO&zIhmzJNjMAQDM_TEL%^R9iPcKYBJG{t7ty!yw>(BM{7Hn|Eq<_EA@z`d~<}2n6 z!~6>zrB?Phm@jS&eqajW29Q1Py`sK{^y3Q->O;bUWFsdqZj>=%$5uI{do>Yq?97dejJ=WS>+6@x}Ths{_+zCLCUh zdC9dQI^co&2U3sAHd30aIZ*Kr-TKR%8L;`)0g+UvSkXS#RA9~pvdQF@K_tpPIhLNg z@m2=b5j2C54bW{<1=z?1LUG@8<0-&T>KBlsev0Eg>nDa}p(L*E9nb_A*iti{|D(14 zbyIvf6vk!$$95BuZjYUf`Pw29*kq_V5_BLMh1hrsHJQvNufcSXBK1&f5PHefpYraM zU<1FlHx$mPG)6P?RrZuG@pDtGe#DNOi1V+IHoG!;sm@`EY}lktj2+9GW40-=+X&ba zkF;luCq8ORKOBaz!F}=P<}=%oBFJ5l2FNXOV7@Nq-0g9Nt~OZRQLl!y9MY5qi^gWZ zwiSOjMu*=&Nrz_YdH$YC+$%L}RX*ED`M3A+9dRBdqQf@?JI{`P-kl+I1P3}-Y{SAE z(rnSx`El)$z8m|4M=dU-CZC7A=`?P`uHT{&xhP*jH+vZy-zi@!JGAfuVymQVA?kFx?wmvM92(!Yc{8HaqCD6)a{F?{lybF ztEk6?6OKe#87S@s(B*!K0=k@mPYV7{%)edOP9)nNJGp_0QsL5*w^q#4r|MFK)fO)K zLNrC~mSpaHkEpDuujy_;QjW%97vyW8N`tz7sko$vgQ4KxQk#-#~qz3dl7 zm=CzYXfu1+II<4_2YGX5Z0m2$Eh1^I!H`j6a?gyo z&-Q~{F&+-BQZhSih14_O6**hGe`M~^1sF(>nW&pjl@qjz-r;}%aH|-h;NsnFy4+dc z#35+MS;AYp+k8wUIGC-b`hm=O(>qKu2TqWWQkC|0#KhhzOg45#0}2)P%`{4uv#5R_ z?bBllni=$t?&|OT{bG5V8ngxmt(9`G?(JD>vBk*u0yo;M1w&=u8(5ME>==>zy~t>j z^z4o$Q&4y2?c@fc66CtNx>3*;0%Au4Df@enzYRvl*9i_5RQG2UoF`xUFE@H2S^4=W z0-8e6sc5MYp9hU(r27bw0MQ}AV1Lrrw1nR}*V%lmeq1E{ zDCn4>?iSVU&(~#Pq5t2~KaCXg0ZLD4iq&LN5`k#`?UOWLi#MgMA0u;wi?RD5afP|? z&8w-*V%|`v30?QgS?BsJ_v!N5O6v5lhB-Rx#(PB}f>^ryeU*oI=tfEpE-Xd;M|5Tz zi8{amlIl{R^cP;d{0lF_-Jk~lLsn8KkpgCgB*iYPzP#xtu$_p$jAb`aY3licP>@{$ zSYg}yXwPB#U3oS7K+lV1Ss=n#HC6GAQbF*hLCVA(?j*H8Obx4#d~gS>+fUjT1Yx7x z;S}&aZ>MF2f#YSX=Xr^4bO6k=P+NQHS-#<&W#E{-2Z)%iQLa_Y84)S{ijM-zemmLPaDqsg3e@|y_+6T3k!>KwgcMz;m7$9hCd-a z^5WXqsGfZL2MgLo-2ER63?ktyRHsQ=8(%*%C8}rch&B2qCXY{d(yd zY^1WzyICXdk!1OCt zc&Jldp)rP`M2fq#L%+G{7`4Tu4{SND2v5|4E$k&Ft1NOyYic`>-@%o)k#9m6tDKeB!?|<);5g*bZvqf zZhnTHAH+7c-zyRupPyINhHaG^7T@FdCF#}!PKpaoeCF-g&TCwJEuVNz7#<|G@tg%Kbr_*h+(pY(~m)K)**t0?y6 zBx5d0MnGhq`wH~*_Ku6!m>Vk?S0H(;-fk+=thK_E-;qE6jDE6j{>z;=xMXT-YG`>) zL;BH{c2VGBFfmB>HS#OIgzOv?+r-I=G_E{%SY?hHVK+In+@Qss=cZldtC00l?Dmxt zlvvauu4p^0xS*il4IM2dFe01_F5w?~p=YAbgp^04|C}j8f^Wf3auaSCE*6 zT_ederm{ivKVeN(+_BvdXkQ1-oh?Dcmi7}bRUHD+{ytfFogr-RqecMvQPPU1&WKDF zQW1kUlkIdVUovRk-BN<&3H_hV-mRPEs`avxX)>~uwFDb|Q!-NVEV}MD-c1jRxMLjm zDZ7&OmgIbf)e*X*pUDE(%z}OX)mzMwlNEJro&<(L@PZ48%1zmQL)St#ob7ikvB<{p z@MFKUO8dr#lB3pP)Ff9nDZCIIwC$%6!kk_-&m2O7<5HiAq~agfd!wgQU1AxTLXS(=W-lg>g;EZpbTTQ&OT?7nH_Vs4MAFJ(U(6y> zkP0zBW+L2e{^6=LRMbccpsP8Q-XIY=@x@Q9VUF9T1x66|LTGKqY>T=w)Lf2)Lz20z2$4QC& zuiEH$tDhg4@a2N0ZKl~Q2z-yS=48gfycxVACS^K<)bYk{};RnMfeTb!Ow*A zMeD1^hxS84KNOS}uhT?!lF$=U4Gin2oFW;A$T7yHhzF@iPn#H_XqtsD87g#2+Q!p5 z#q_Q0Eq<%;YgXp8$$V!9s#Vc9o-bQgh#qG+b!nU#$Hs(DUs2d)I6Wzo-dD-ztJPrD_uO!l{UkJy+feE=dM3_nC#wV zSD>ou9I5CBD~7}-UfEK_IyeEMyH0L`qwH6vI4-^;c-g2QVZiiL#l^KVEsf^)_?XIa zu)Vo6@%dU@v^Q~7{ZJytY}tq#=RtW^%vg8QQ&9(bLofP2J#o#w%fnT@jh%7!>`;6c zt|B6QT>@9w@JYTv9~s>9_Yg&+%&H_3AM74C)^bUSx4lUo5k^#(g_tW+VurOh-e(r@ zt_yuT2Zxz(j{CLMoK_^Q8(oU8yrI=rkrTV_js=@=$^o{5Q7^Rn#%9tB*G!WoSvnb+ z$>uNrXWIE@@@d)u8XC0SN$S5975WE$4vePNg(aUm*z;a_21ZqL?MJFfeG_hK>1fj4}J+V+-#k-9rk<3{6 zQo;@(8Qb?7>=#H;a6b&1ylhRffsVSDNzvn!mmFd7WBDv?gP!PP7)M1}ujmv*%7I|* zFtdo)?4rF}cAF zeU7{_QJCr{J60#q^9I38lm=|9DBY06RLjsWbwo5CA0Q?6YE6w&*S$o+qJnB<5+h=9 z*~d745DEwooYv_Idh`RRXOt>JF+gSiwxS*O>y%5}m>>QdFZu^c1NVh37GF02PFET> zZl%)(yyhJMeAaKiF@>R@&-&%_S-+%8NB`fmmg&>wd_oHLl*8RCNZB@r0=j7i%7GOO z2A5iNGEcwcV06TN++39tqwBM6+j{p@fN3KsJ90w_a`!AWTc&45+aK8gO_!USYe4Vt zLhstK?l_MYuyBH?-BC?%|IguXrJ^h{j`)R4Y#%al2zo*R#v1u>-s+N4ayVO?t|Yq} zrtKY*zsv&#|{^%l`iE3k?tfme;#X5^o@<@^DZ`qwxIgP%Loj#E_kSgV^7ay5csMl zy$C2u$o0?tn-%q&*TV!CMjC8GDeE>yiNKKHVG&U{(R}drVQiI&0X6y^u0_7Va?7^) zT}9e8AtXw@yaR^jtaOxDu=BUZ|H`7DgVD-oJo1`l{}riu*0IUdQ-AsN0qU3O^-AGl zQkB2Hsl@clp8zD+_yCC@{igrej)CJgP^kVcTkuWzUryEvn2lT7?Ft**SWb;&*)%a( zwn0-=(;d{+v>u)wz*Cp=h~Fb9vDDPDaU6M2ZrFq!s^Bi%gQn9#+(QSDk-`Bj#T%7{ zg+-?x>AuC#eD_Y4X4QT7V+y}WPG{i@GxR^sLwF2fhSTK%ZC$GbIxX+b%^Utv@mV3N z`aq=IVm5XeeKXMyN}hX9VJjg#+&wJ7CgS=i%~t%V44zpdfMrGIFA8w{NEOnsD_Y-7>WK+NLBk&e;(`4YjpQ z>MHy8Z&BA45zONP@{wQUo3=;YJZ2#)j(uDWyw1+}NhIl9Waa-ywu6F!0BcT0+kdiM zQ|+hl35GaKxF+P(G!^>#RhbjyHnomHA^iW@e}FOK10+U?GkXT~zICF$jkKH?mJQ;` z51`@bPxu0Qr}h2sc847lyV~37+qM-h`#SLU`tH8Ay~XLP`lymtOHO2pfkWO1d0>n9 z#kJipmvd)_PX=O8D??F)889rtK}`%GE9$b#jmk=j95T<%WKrhpL(kMgcxMhA$9fhP>QpLSyjA7E z+3{Z);MIMNMLir@uS73TnP@J!_Zv%+QvPSsB80*u`84Y(Eh@Z0ia$;SpwYd@&3z#0 zAzx;I`YN+S6nFl=;~K!@DA1hUnD*Zj$P^8D+D}lwnpWA97xHiXMRXW>!1}8XRQ(uv z4#*2SCg7JAtt|&2mtt>BFp#~i8-Xom!ikfteJ_(Dsf%D_Jje;%;(z0=Ap`k_@b6z`3M_+OdHfE$5txJO$TEyVhv0^Ap6!OP#wI2exbt zCs4#G%QDANeF)o(>&9|y=TIcXZqq8v|(KWG`~zcX!}Z;Gu>F; zZ%ZnE9g)WseXEs@j0-Q`y>bTh8zW5;Q&N`7k`nUjx0+D308-&VQX9+^qXPKN)&!NyU&y6r1t#n^IX!`V(gg^9~SC*s>dVn^GGT z*1NPh0wv$ho^m5xYVF8|c(BBH+THMk!Eu-2&^^qobMILB{f551{!%~i7Nau}J3_B^ zXJuk8I2C?sTzN;FfoJQwW$a}y z)FD?Yk_QWMPd*P>4(AVRt(n86#Exm1B>7ih^VYf{GZv+6;}tj^p#brq zOUxjy$sm9Nu>7Bz6Da&z<9m!#tdMe4g)SQ?Z8eT`PyYF{tx^s?8i|gMPTX!o6SH?N zJ|Bzv_jcLhK=;6&<%vJB32x9LEQF`5+1&O#;25NNrvoXacVa?*l4IRGVP)*k3P}xN zi9O4=)!JH@+-vQ?wC(Po$jHd!qW=R!P+uWFqk3>9ra=|td_~zMPsR4hT@e6k}=SV`Ca6<=_`>PGGfE4&mS4FEip4w-Kz=^jNS zxZ`~^qKo9+CMrMuv~Vv{-{2oXh17wH?~IHKCYm9jd3lnCxeS){xy7Fr=_ z^2QyB)Ce>YhVtBowuaVn2Kw>6{ZB-X4i9$!p?p?sLpkNzeo9e<2#5@1A8>EkwBX`h z=STKyO;i=KV{cRBz1O(zexsY|Q(7kIc%=@kpJ(1{g2KonIQwjTcFJO-s?H6%8)J0ap-u-0LrySy^<#32ABJ1a&!il-+zDX+*h{%<-If zyB=^S>E*G@4cPl|t!7-)qG0M0zFhYg_4N(`-6UdXt&_qnhbm%cnwpx&&Mu~*z~5jj zM9xe3sJ_TEZ|I-q3tASc-G9_s;OMdljF(c{>BHx}exgkx#@g9_ZnLM3 zKQmgNxRsfjQA_#G;gQ{%HGX_)`1cP+^bnlwA>&&6mSe;jR`HEOwv`x=l&a)*_a{k~O zB`bM5g1@)#`s`up^&{!k8`>1+H1p8KQzW18sNbYD01alp%PR-Jt7I6kTB)ahh;1ks ztZ@>Hj7#b#Cnp=gu8%^Fb{8LY+&}G@&8Kxe=shnLnOPUTa0*}??`y?P|56*S!WPfO zVjr5ezIXch$TLW6brc*1NdLmXP(vS1gD;|~i6t#9{j26m#;CE9oVZd`)tYS#n`pt2 zY2roN&C`>FkT6#!O`1HEhK8n=!yfV`#5=HntxR-Jvn%DvbYYO|D^oQr(moTU|GN;W zDmN|^Nv(IOCh3tZpE#ILNiqMXtY5HUB59W_*R63|0E z@#|VHDEsg@f06^Q*Tz%S&kZDJDKEL1+axc@|yi@`~^ZxrKZda?qx#EU4dlgoLWq6 z=?9PI>yUlG>zC6Q)h)eugr&H~HA(TgUpjIIOcQ`T+=A*7)#*PWCG-3^8@`PnH5Owb zyO}t*mSfSK!M{!@RfqQ3@!cT>NIMNwe&%ZN+GM=3Yq%)J8-oGOJHO~};c$n@kky8V zn#88*T%nfz#?;`<^=WEJ`fF+=u{`{Gdemr z#BTh8(M0o9oKFk|($gl;KgkxQlrpD z*v~!lM1au|?Ns5@4VLLXE6Ct7jf$rC+NnRX)AkpJD|PRM7~>_lk`0;kz>T^NSsoW4_HOe(r@IGdLTuu*}yv<9&H$C z-tzww@QJL9$&GRUMOL8jAmk5;v=mZ~>BV+6;d`Czio_4H{>O)dXUhW{;kc?MN6YNw zM_#me&IGtrH)D+R1QmUYFFAj<3r_z6{+?a4)9qlR=`W_KP`tzzp;{NKmY=HZ;V&A~ z*QyY|Ttx#j5rsRLoU86QXcYyLO(bMmwQr^YQ%8kqp!cX)crBO-J@g{Er%)PLZc(_l z*5p4sXbV#=zW6dSnJP`H-mvM$5PfE;uStki|#AO>U1763t52T(n%Vy+o{E? zXV*k(Qai%PD<_`}2bzS=zHrf5Ia}4*!o7|S@C8MUmadiML*$I%T`8qvZ$>s(9;Ow$ zzQ<-JoRmH%A*WhR#+1W~HU7k@5J7+T)i?UqAjSDAeD^e!mzH)!#HE+~S+X8BX|PU( zl{{grI6Ry$X5+3FODfKm1?hq(JFJr(z( z?p3T9C2K%VS|utxj8ke&J>T9ZfS^e4$jGuo9t0+Oj1)LOKfjdv&VG;KohK=E{xz8t z&Nbb?70H!^mbF8K=Lgq+75q#-u+*v9c-}=z$;>xvU~%Gmb23JT9EHK;@PaZQOXk}~ zPCK7j(GahX?MCb~xx=tl=5XgkJfcTd9?2n4-OnHhHJ?MH&Zx=Ia2e=ySTeYI1;rXYuOG4U4a@~puYy8~|r3>58teDf7^aPA~d zg>I!K?nqrTB#xF6a~uZZ!X91uj*TS?)uqy!nB07LJFd)bJY6`&7uD;Lb6o=4 z+1zonKp2*_R2!ue7;O&#wC^d zhsHuYv}TcxvpbEhQLpXDq6xj&^CZ#F3Zrsch9Z>4)3VBQ(mx_joY0XjLKBXZ%Ma`h zjnMh!8@q|_2aNMsas^En;z=CqdR2s==T_QFmW=T|+R4f-lejVZSae~$hP7u~_UhVc zBAzhsB2RxkRha8$HGa$m?{qK}&D%f3g;q4jl8;l#7maH&d^eT|Nxp)FJDxz+qj~ie zrK(@V?05QA;-ElGh!hJY2}Te0)k0Wdy-Q{9dx8Fww<9#)-8XRo`*)gt+>Lc2LF_4w zcb9N<&lgY1Dl6FYQE$K|lLOqu=v_NdZ1Wb~s}&U_W=unj4cahj%W$AY1_}53{DcaT zF?22RYR2tKE&@cMPROs6C$>9#AOEA@zHgmQo$=X25J!On zoR0txXLZOAX4V-~koC4LKtP|$$ng0TR6_&%^!r(~Q-=o$oBbNcz9$Tz!M&?_CL+x> zHZl?xPmWJ=)iNh9WPv$L8dMwET<#G-$;}#9Srz)uPtv6;Apt4VkgW7$0*U%$S$3W- zxJTr>x#ur`b6E(jgw945VANX4)0TwoFFI4&BA2Nx0g8#2fh`hPSY9j!phW} znQqId%(;Cx*5{Az$P_Vi-eNGfc_kLIyO4eHi&^YsFi@aI+p!hRKb&tMjv=#H_fE2E zO#;4q(yz$=_91>u!1pgFbx}6`Ypjb&7{NZDDPeM%7^3@e{t%HgNjt8Cd z0RD+d7>#~d2!)78-4W;B5%@KCKwNAPa_=^$ zOX>Qkn^UyXhP{&Z7rkiSo4l*p4}4SAw!}`_;RT*6^KXeVo~@Gpn_|`_^2P1W5zle{ zFVErO@Zko1(uURbb-8`j{zRSHoST)dBIy`5^RZib+Ujez=fa$-B+x%B^Ya^ko)R4) zZ$42nwV(DKT@KsBD`t-XZqOZbs^B#MSGVxvQ6wwy-iD@ zV@KI6ms9U+u6>s>Wdx~Wj!`{nYR!rw>d?kcUXqRH!T#BQ@BiBcu*0AGZO^S2H+goY zC6tPl(`)IR`h2JgxPL#7hA6(T2Z$agx_)a35S|vI116i8c@J-BHM96W^oO-8!9=@+ z3MG0!I8Hxm^;eYG$$L7)C#VJw;LR=i+r_Lcp>d?%Q{17vt{Xn;gx#^+o-} zQb7?ySOc#V`^xUYp;fLiz16`9FQK2=&!b!n^UuLTvcDZI;0ITb7=x+OKd^Pq;Z#zw&N zGU>akZ^`f_v-t6ls7>9b9Bzh9^cC^l-5n~J22ga^PFQS6kQz}aYelI&gOK9$4iZEU zXt;NPLSwqw(W%J;);ZPzgOv;x_e@ZuH#ZL4+VAv5$==kGZu>jOv!=0u2^@c$UB64z zRPU80L{kjXcch?;>oD+sO|slm`}gc0Ao$u#b~i`zTU!_VqznnG)7vm;L!@GZ!j#=Y zgx0FBhnj$XjdCaG6bH`0 z=HL`rh{oW8oZt{@rKg;7S@p%qFE%I}9{1*RO{E$^NebJ>uYc#l{pXedeu51}NZGRE z9gLx%Eah>XCTwaoc|Pu$(Axq+k4YQKxCgv5l zJJ*#g?1_p+}2be-#i(ylzN-t10XEpQAt;8M;+PZoNQiOK9h&x?Drs~rr5{58bTwHNu zS;D`9DmK(^nbM~Umf=bbHGs0A-FHzB+9DZZUto4ns9*-txYB{<-($4^@cRkTL)tcx ztAq3bRdmAFGGYqWA&1Q15TANU5>k_&06Ft{B4IlmD0A8bfqzUk zdW?S-_0ImjDxN_>MY#p5b#-eIjdZ^uIdfd&) zo>EX|aOhm!SRdn{DGs%#i>-xUVfg(w=5Qt{)-V}{y zxg!~aQy(;-JMRR0*xl_oi~1?KOZFv0!;DD{@>d7#wxSU2NHDq~Wm*aXL+5PK%kl5D zX3h(P^carpHi%gFE7q(-SCT=u_Jp(Z^ZAlVPXba685vkKb>kb?r!V{LXSvVxLly8I zOUumADQAv6bkVIzTS=z0dDR00;@t5cTzJ*5lU$DgIfr{*bD;|Ym{8O`F8T=@%}c4wxDx#CqPtAN3}yE|n0Y0}F-Na#1?*H@>*?l{x3 zpY_H7EL{DK+=LoO-AmjQGF@tR6`KG1WsRg}hW(@hTJf}JLR7hf&?kOrbn;oecS zu9vVl(KW)AxcBe(IwHpuWg@C++zT?PP$8+0e!udFN_5B4p{JUIu2Elp2f$9p{Zg}xL#ZEU zS9aOkz-W_2hz^$&=Z@_EnYh6IjyeE^=-eeW1Fpcsmd5<6Cc(54ud|N4X1fmb?OYv2 z-bB|hGioN5mE@0Ihfd&O{=IG{B>k?ncAY>VlwKFO5`tD*2*8gk2;V~NTk}AS#q-JD z&!7i&g`X*Z2J}!5EiPq-_0?gy5*Bcs{YDi8f-dDP@=+q`PfYiT!M{LJA_F2+O754w z+43txVgLBfm{>B$G*25@`{ftK1KvvVQ^cQ?xklPXcl$Fw)-1y%O=4r!v%Ka9fy&vr zf(nkA`4fMxQ@mc|Q@qVU3ip z=aJoQg<}nDRHqESh6KF!j$yl*YeIX+Ke15TPAn`0!WQsi%r-)Ib5JjvtL>Yho?Kp* zRPKn}Sx@AYwH1AjMLQYwvn*9$)n^z5`6$* zUd!HxP(jCK3mYS*nLG@~KDtzDu3te^QGtK(3(h!cc*nHRtTqsCuT1AFn0qdqo^h#} z+tJq~_PZLG&*3GadU1{Z;eln~_v$uMLgb{m3Isthh%U)~%B-V)K<)v>lzvqGgTlPv z%cawIDIL4Gs1QFjD4Diga3uz%E%zWzmySliEa~low5FQIrz{v zJ=C~)k7%N)vmQ(h4*DvPSp}EN6B5{Dez3Z81(((jI(;}TyLdco0i0LdE<#Ya2M=(q z4!+#2*6Yrqi><}2DrX;JdI+s_)NU-Ny+L>#8|pHQi&eK!9ijb zRv)}XLbX07?B&XxPx=BzX(uhstY1ONi|C!{xdEVkzgH9AJ<0D}uGbgWeBJ8koVs)9 zb?pL|>k`-3*YCDXYn1%QeYwatw*r(JAW=N(b843~nle4IO9_kj4FCkw z<^AeR#{$mi;QC!)4YJm2$2;Yg*)i^@nqyPjq{SqRc&)TG;$v@$>K%=ww#?C{J{7?e0l+k4A#&3THuvS zbhnk)tSoc1Y`@mmYU4W92JRXc*y(;vNkrD>a|@g3d#}9hUgX5GV2P!Z@W|@!=0>S$ zSQiYbs%nkp?Rc`8xLB9;>!?WHMNOW!_;O}H@{$|}=hnC2gqwVa-@wVo{iMdk^-Y^t z7{JRYg>82x!9y_ank*IAGywrutAB%Cy(v4R_sh1oRa|x9%q7GTE(MR!A<;gpg2{x< zK3}9Q(=*!Pymj$t-Vj-4F5srXE?VBVRkvE1Jp6aCuwG#hz9jGU^^6sR$_IsII|usx z)P>Bd4B)#PN2g_$SRxh!Kz~6Sb?m&VeXrbPzHx$K=gLV5%H|Ka(fGWDVLoqR>NR67 z@qce&%x0(~M2AKRF#+eB0V=7`V2HQE9E?A(NY7H;QSVQ~ZfgtclAFcdlCh`90;{eQ zh=-v%B8%jF@@lhm^%{Uj*P637i4p==`ezz4uqcQB5`tVVNVI z9pm3Xyce2?+GsL1Oo8Z;W6CSS#U^`XzZokC8{{#OS7E|0=I@gisKZjLuBImaG5X4s z#If&CCS<_5;>x9i@iCW*;z1|{FtW+xGiFc{@hp-jn6i!<-Xw{s{^lJ5bg_aj2fPkv zue3o(vf%{`aBj0KU&rnyd?Hi)&+&y(BDwZ@;GI7QA+O}Fx}rI{JpCrPWVvj{Rx_gh z#}F>fA)NJgBcuZi1Zh__66#sDu6Y zgI$dgQR}hQp|LL6p&|_WbcXy}Fe1VfqEtk=KDf8%h$DK^7d`+{^ovYIMTNQT1*B8H z3#ju$6V4gK-WKtC%52?~CnYnh&Dc;$bv6CS_;`_yv2V%Df+6=_K2JhK3K*Z1qscfy zoj0O?S=peO~JIM(`W*DPN%Y|JLb)1AuR{RmPufh_LD@4tuC|bazLxq1 zwmhTuKEJ`#j$`}qtAoq^;ZVF5WDVT8!Ii>ZaAEFc_pFIqmtH960WWk$J(fQrOH-jc zcAk}0rW57p@EV1$zcBShk5A8o)*Cwqo8|O3Furo%8$_92ic$Cj+M>$5rH;~sC9?ln zG&G;%Or@(@jLg5|3~h*?a8xveeu+Xd)gVk-h!BW~Vl}i!Q!7)EPIjx?4u4LV&7eNqHATic9s?AF?mE{JCQh>vKa3b<%50PTyc* z)yJQ>=ptH%jRfqGl_GQSd}Dx&NR=`qttC-=8nRD87#1@w^du3EDw<#LrT^<++NURC z227{2iKC1Bw<5)6N7?3&!41Gd8^!+gFT^9@nzauMF6?pPh{hU$+i zoeuJcp@Y5sw1BtnVAwX$*CLN)datN>C{JxERZ`uBp2e*XA~0%-M+egqDl8%r9siSX z76um9+;*8JQMoj#mvQDLxd`K?i zLQc)OV)=N$v(q;ap1J%$vZvtUPN$&o=z2?7`~)GNJ7frnQT#~X2M!rQYNAmh=2Ef- zS58==8{yb^)yx&xHoC4&O4>j)&6NpOPs^|Ua(a4=rT!@t-gEN1%Nub$*X>#R&GYvEN z#-fqbH@%RQ#Hx1HJ~+NNXozPXAG9GEm$3NBO`>3ZBBZ@`iWmBSr)YFBL8897zN~mt z8;LoFizNcsn{VFC`@to5LiFURo{Qo?E@-_yUNyNjuHxju zsQ{1x8b$UM-PXNz12J$i#MB4bUI!H@ZfioeZjWiYAf6EC_k=I!t0J^^8ejXV7Y1HuGmzWn0LLXx-Q~>wHrNNJrhg`zHcQ1a=6PZ-`-upGv$0N3K8nFr_o;apA zt}*#n?>Ocz!p!cT?tqb0L3x|auIPJLUdM^c8N}iiOQMP42Kt?pGd4`)#8TY%^(6Iw zb~f;pYY^rt9O=K)bzTpAJ>H(@N(S!j1nL-^6fNgiAaV_Jr!p88ig6%?2FtE$1RYI> zL{Wh{;~K~WIFjP*YBDWj5S90Jjeql$`&&tw>-GiPvL*+*9sizQh4;vAZGDmG!0pJ? z+Y-z9zNv0dpJ`GNJJ^ZbNGw$5JN0>iFFd==p4Fioh)u1*1pa{0zFDb72>&b@y65b&7h~FiFkjxcEGbCgn;@2!e(9BO|UAGb9fEk82?!D_Hl32yA zgbl7-883e~52wE#!rGk=2yclNS`m$KyWnr-T-~{au{0B&wOu7O%6+#(l$%6h9YB7` zny1)>u(Q?T;6V(R88~Iv5`Ll4k%VbcZ&nPaR$_9lE#Gw_g{Vf_2t|?Vku(MNh37Ul zCfy%amjo7FMu_1R@mvkvH64PJX-SluPhiI;&Ldj2O#Y#DZ(Di@W#w`C6UMhnSOqK? z710EzpsG2?p|+GWM3^SmXRE&JT|&?Hn7*K)S}}%mclSt0k%7X<5}%h7Q&H8Cf0Ao2 z{bgOF`1IYm6BB^);qr!SsuVS=ii(mfXfv@vEWagL$oca8Ov!gx8kV8H;FNAI6`X3M zz^jGPq>>}Ijwh$Go=ti%VTaoXT7(#S7Ki1C5aQ}BX!G$Z{w9CMohpvAV?xR z@e?>uecl0u9BT0Yy#vNX1m!}uVfNihE6{c(lnS%*-g9dXV%?;F!p#%i4nX)rkWcY* zzRDn>>Y9Uq4HZEHrs9XRG<9pV5UsUXbQgY{?SS*H@o4tV-a5dj(&H?ki|V3{dLF0| z7|onomRn44xbX#y^+v+<<@;C(+@auGhY1~;osyFVBG$t@8xVcnkUIgJT{N=XPE2!MVeP)gE3$acfP;# z1RuyB>!nKfL_4YZ`!+@1c^bME;Yk6QZoV z+R(_QKK)!613X{RN$%bqw-oiMn6dq~gg`2^Xs7i2DOXJ6IPs&~jPZIUasvau9ID~v z^6DP-euetFD{Sh?ihOZBew%|8U2cT;pCL?UAuYaOE>y}LsXMkWt4*O;l6QI#1WTc4 z^Tcw$%NWs*?IJnK7h0iKm;54;m1fJJjs4iZszVlZu=ZtVFDEFdNEqKwRtw#kqAIF0 zLn>msf0){GD7T@GF0prV&w~60wL+`N+xT>}meT~X3dNplVlJz2o$F&dK0+GHu{mx7 z*4+6O)XH32;!1L?YK1{kSY)x62dP$Y=S|&VENXwJX!ARlp}U*Duj3^`zd@K1MBg4A zYz~>*HWgK)8;ZBrnK|AJ0@(4o%uiAV3ODm%Bl@qZjYW5I&yi>2Oxe2jmG)U{d)Q?3-?M(Le02#I#!3Da27^FNeRXB$_hL>OWoqa zg1AI|^OXM$M_DawT`l%kWbZ3`K=Eh~en}dVV`)J_AiRmMIONC%B9mp}iJmjTc3Ti1 zzmKF)7j_xle&*W$xHLgxJ#2qiv7<82`AKm}9a?*6VuVxEB-yj1xuWnTCpJ3pxi!TK zdVczcT5?!h9?W}iPg12^&rHJ0tu)9d1uG_XoKsTh&RTJe1%^*yO#+No{HU|>l4zuQ z%@P8_EsHJgk+f1-|NPwOQ0TU|diTrqBE0AHs3`2s93zsMF^#FD9Uj$lV%rik&cgE4 z)3d}dDl;i}R$NDzL23a3s}pj78!aAWn1rAGb6?@9i(RU#9y z9psK7{GpH7@Qy%jg?+!2c%{{?g;}OBC1RVxG}+QWJO~V{ZYCmKHx&&ZvqV-9HOw85 zyXBbOMGfrlKy@}cI0Ln@Rs}&)Yz(@N`u+&f;^YTm=keq3JY^S~D`Tfq;Xca5VO%iLsAlhsBfZ&8 z=MIMLKj=FhehcCP;@d>E#M#R2GSd;ZSy(%yKau zhhWzPR?z@u9f{k(Y6=M+4x-UC?*q=P<7M&={`N-^aL2}8=L8k9_F=YsSA%%qExQb2&vX>2U853Oz`51by>YRzhgXyMB zUl#HgCrP=!OwFCYP*thE!HTkt(Ia^OW8o#i8la*eg%wt@KzZB*%&aO9A*{rl2g~A< zIJh+w3tISfI(WQ z%;^Q9AkZ4(rf<=&_eJ9^a_>5o#E-Dm+*pgM(<&qO{N%_cKl`1YXxjP0=qn0Ih3j{NQs&c!r`3%4*$k4QRnR+^{N!l9Jjx_5~dnv^Vr5^Y+8NgoZt?+BJQ}|gc?psdfHFsFsmG5}Bue+^)q@ypAU)N^ zSbBV9&i?!r;j6V`pIjocJBtj0N46ndBtNUO8f;Ay?)@(uYJXWxG}rD@lovvc#2~RS zADdu!2&(Q^q{P^1U#y=-FdEJ04}W$CoA?zmfe}y=a$t zm*<~2^q>-wz6-A6s10_ER??1oc`n?X3AnOsbaxnJ{2Hq&CnHm^&gM&wv>V+`B3;+$ zoZpY+I>Q{ZUmUZ$Ff$|US-&v5J>u$Qqs$wC_~9(@l%eo^_?w zr#-9kl`H&_zV^MsOzOAm(CVm!mb!v1LQ-O8-Yl9{*)Ky#{XeBChGUr0rGweKt4}Pg zcwD>K>1bK9y1KdnXDTVSwzehXp_@g!E&0zmO3)Tq{^D-z~-c%Jb)vy;4q3#{8!`kRc zeki)v?^h@sAUA$~dbP($ujqt&wmsGN_t2zATUM%!UyI0?x!Y#z@(Xkiyk!j~hi(Gt z-N0#9+*|zT6%Dkkh#YN;xj`^EXxhud(|YMw6M$79{W+R?`5w^>nyt~DXJQur~3-6vGZ-~Jz)vljJw(KqfNGxQm=PT=9tZOf2 z56nIONE^*7NTgl4VP)Ix<@=rc)u;Xr;VijDGCm^Slex4`+Qab2pC!`lEYL04Rj$r8 ziX+t7BG>O2({)EYL$lw?q=vB37m8o0nr0k8lU?cQhjniRGyuRfEE zh9-y!Ya~yg+Qnzj1mkzD&R4VIw`f!(B%zp?817xglb}0edwZq?4=t(+3xY-0kWM|G zhQCz%iEZ1phnKi=)9j0ivPQh_EjPkTUyf7NGv$9C@`~)sE%Q<=Aif^~fH_bd+@~*O zL6tM%k&y9q4GoDg$sZ{(>OPgbVv(oBuNc#8aSHLkN`^KZhoAqFL7x-`36!jg@Z^wP zu~!ptc6?PA{0d}bo#mZx}W78 z8XS`F+uerPu<*VZwJ|~oKHwlsTj<)IURYy@W>0-R1aEMpEpXS*S8zM}3u8&FaZLk@ zeclnPyqr0RE?%_a4;)=#gV*AqWZl6~XC@G*iy$BiG2(?is0L4%cbzv;<0y>T22XWx z&eYKct2`fm1In8IMzN!yS1*ox1{?%ymyNSDBq-JfE)cj24_Xc;(HUK8y~CjRZxauQ zJ3E4S;^P_hoI75xv8f=0(c8vgVC>h&9@G|+?fi%$#T)uLnH@4|o-;jtCgkcpW9v47 z#(8$a-IkHIv^t*sss;31802F~hhbsvwi^+4ZQ|+^MY;E1tbN#JcDpthK^%BpgJ4Xf z)%z09VSk$s=Iwjz$COX>sv?UJd>1;oz3C&!xbtCEJ5(78RbTqkFy7F8kb{d*E6-zu zDCut{>8i;%`iB`Lq_y40!p5e!VF$0Fdrz)>7t={PfN4W}QY$sLOV_OxO{L_<)ty20 zZzeKWBhfB>17x^2!wVG5tz@y}VkG4dgEpdj$coxZ54xy^C5=`M2m4HIyuz{QxrhFI zjN_x+#NSqbbcju9dV2aDLwRq)`-#LoW&Krp>T&^jXS}C5n?4qF@4*eg^75ZT9uI6!RTILg>zL#~LGrPs(Ci zWGev4Bi!GTitEhc#tu^hbB7J&h$Yh3-CwZ2M&LVY0PCUCb{wQFA6U`gPSWBkP#&xT*wede*lQ4DiBxPlqnOq!d_)xsG^1pv7#JYoum2)Cq)#gu9RX6~eGJwLN zFp(=zHiQRZ^AG()Gx#h>8MH1+-a#5y;;T-v!BqIjj{lZL2$<`XSZm5S5EzG$I;2J@ z9n~kWe-JDzQ>Gm(Yp1-&j{&&)IdHQ71RtC}5~%1~S4trj+UR$bZPVyBrZvX$)I9+Y zh2f|x*(PY%1KHWvNx5s0`YPA_|1fn9>|w22*KX9ND>{YfHBJl)OE&GwOk#X4f7CH8IMeM!J7^iK6Ma(g3hE7(O#Z~EdTC{(%P@F zGp9@MW$g=`#4p*4D7h)70prrpPYm?oUBOn?QId=ci;J^O9X`_|SB#Cp+ARbqG6GEz zL6@r1>*5qwa6`jF>Rh-KS9pTdEAHgN&y?}imIblL8N6O@5mrQ-cSb(^&k^fJg0iIc z(1NxC85Z_j-cy-Yqqkb2IF$h#_d7?*Dr%djQ&KpeOFkpy3_;$qPD>f_RQ(cx>8 z(rjnU1=qBh3K7EtT-t7P+T!=OK)?fc%lMl>sLWCH$>-4Nb!e+O2PvnAriVT&DP(rTx|;FbGVP7WFVf^EMtK!C;XV3M6LV8ys|z#D~m6pX5`AdbJ=>B_Iiq5-ab&R>k$ zQT$*7d<_jYi3MbQ#>v$zS-N|S~WY7Ve{wtDAH16mwzN*E-A{?m$2 zEB}JMP8qx6`u%eg_Iz;U{crV+E@-s^3D-S_>NlAm&BLa7;STO!L1tj_wPyqCGkq?R zuS3djkn-}q7ePJKljYJB#^IK6)zq&~g8N-Po(@$*l}?u@Tq6?;t7L3M5V9tx#n$9g zie8?nWhZR#kM{>YY zUpe^B-oi-0wqG^;GdsBmBa8T;KEwLbyx21fZ_)PcIgGnPAwle#W#;?%kdTm{ z^v(Zys2_R;7Jm;`t}8um6?u@>q$9E@nCeKyW%#EAd4Rq6GP)1}#!G9*Ub&aD9&BJ8 z^SgG~`$Nx00dWLozgfryPwu%OASMFd(x&bdbVSng;D00|A*1bL({QQ{G(iw(g`dzj60u-xdPMlu9w=k&~^a=s;1B|G!#F`#gFAA`YVAeGq)J2T9R{D z(&nE3tTPFWd`I7F=5x#y#I-@V^LW4Bup$%6jSQ{WSg!>5<29HRLZ+rz3Bq;!^lBh~ zK3Dpy(tj-y6$l!+OcdYEaYV_+>O?#h5pwsl9Kd_vTlw;@(i$Ayxxn-SE>ON_Z4aRB zyD~#OhCnm+T(b+}(hACAgN8PpW!cbF(-lR|t}4y9W|lpigCx)fOG#ZoxRFm+vc8+B zz;QPu0==78`f~;mV&i?tx-ydL59x7IHdebZaRa+y-jV;lX}}kM2k=1Os0^-114JXv zBA(+zTPYc6JUmDQs)rg{&)B1X&9NIY1c8S> zCY&N2#qI5VO`=^4lnvs@FmF=rY^*!a7G6b5?Q}qh?}C$NEuy4aI`$oA#dmCL4S@b0n-}f2rOhTSpiTNxSmRE#$zWh=@-NX=65HV;Q;%X zPdX&X#iwR7rU6&}+^%9Ut^Pbc-h<3NTH`oiSqqig&Q1jvip$yW#Eq_BusaZDGec=P zC=XCHMzbskNJGs{YYaNaYbkDS((+G6y&7nrrD-h3QH-GW*n&|)$@?c^@5GE?Eg+d) znU!St&Yd1zJ|u1@yP%sro-rK~-DI)XEyb&rrlzzm&L^B*7jNQW6<=TX_J~YI`U}I- ziKTLBj|M+GQ0k?51yRbmn7P_TA2~14AbGk0y!8D=o#;)p-9*6|nPgylJyI{JZ37Ba z=c@X7?M@M}<|b(ma^=_0bkh52*7Dr*6>3~1l~?R=t$oIXr8E$6!Zzi_TVIu)ky`> zTueA%g`Z(B9ITmhCHYpscJ!GEeC?1C)-WS((z|_)J$fzQ3f{|b{BFfXuVBBMIMEr= zz{0|uM51)-FNfy*Vqqjto=$B80T7DWQMZhxpX-1%h!rF0sLoKFRt!m7?7Y)Kr~hyE zJ4hzoql<6o$?o|`BX_BXLq`wYGx3M|gajf)V187FHfzHoDJ?-F>or6<#GBPwEb(Pd z)!Zi^Xr@nderS>O*VFbJHgQZXA?#2bYeY~tKYfaK&%OY#Ekof{waX&#q!@Ai>W*59 zTsr9k{pn2ZkdcZd;SgB9B35V02Jli|A`Wg{0jchSjWLMYkR_`>EmCg1X&(&~>WTf=qM65pWN zP4A_$3Mb6mU>hgm7pTxMKWPuPgL7MVS{qQ$?vjKfNRTkP`qb6evxW-`34xuCdql`6 z+3vV|x0_+~mzM`HPTaPoCQY@a;^Ai`oc)iC|9HAIAJ3g>r`z#zoodu`v!qO?BrU1^ ze5{SG}QnM>G((YEl{SXV%)Hn^;)cw^HHG7TLF*_*`K zJp3HEIrACb8cC0rVN=lO+Y+fCK#?rfPsnf|6vGE3pc;vX^|%T3;GrOb6Z{lBh-zkz6!C%E%yA=<*|U%N?6Td7 zfCc6?Xn*ZchG5oiVS4&K_Cy`WIb=l5Un zHxF`ZLkS`pf{M4XKEp3(-F_azWyNEa!{#zL5q4uFigl3hiU8j%&0uY?wyG|0Mc}5e zQ_b#nIjc68-uC9t#(_wm9YIy-bF{MWyVxNZH)Nl=H?5PdCG5|9v%lu zYXr;c4~%7pR2zGch4Nm10-T+vtEu((wDJRUa^FU||L;UV|ELTsADE#6oj&_CeJj%V z|I91^?Sd8*gZ#!pO$K_+=7B$)hWw=nc54y=v175yL-1mLHW)72IgT>427}^iqcH?g zr$6ojg(s>qbhC5M>{tYFPyQ;O{`*a-ANA5PO8=aLa`TT1G4~x2A`A=Sr9M)~=K>>V zM`Gn>G^ldzNC?KKxnlLOOTAn;m5w`(NxDC~G-5o%&JZmVMh3P?7lLR%Gw?JAXG?!# zvLa&G6x`HRg2zDoAj>s9Y$y5~50ya(8azzv9HkgG%n~@Dgwy@hB|G?P7DMf z+d;nvk43r%BF{_h^xCn`a=2&fw4^-oNH_Bcy;}={ut#!H^Cf}lewurT-IO@7Gd!TZ zI_=R*Xh4+xToBnKW$cH?3u0hP8%!;@GvxRfQwVbc{|N;1pH!@t;4Lr)j53(uQDVk+ z2SzWqhvZ+la@1gl(r$-A9+UEQMdKJP80hFTRWHA(s*R6*^8!pBF7hOe7>AvaW8#X7 zixDV@5|JpYYc`Qkm!C_^xT4^9aYD$dKGUWS4jO|raBmQ6V0F8UZ*^${kk$L;L_>La zr|Vxx9skH1Njd>s&c zSPfQxPtV4BQq$2%{q%rg`X8H*;s*i_WW}hJcbNX^hNi*AROpmgqx)ZlqzLwG1!mF5 z>&CifjKPA#{;D<|86r}A#f^7_KSh`F*quxDNP;jZf9e$`jUpqjFhaLfb8G4iC#XIA zs>nT`Y0Wb{GZ}yas&ctCqv6naznkL3fw|*{czi39gj8lujh1Cya}#2OzQf^uD@hmJ z8;{h|w_g>%ee6dCRvL2Y8~MFv32S}8fG$)zpzdP6-l@#U;1+7ksxa9WwN4BxgzSdS z3)9H+A7Ur8MZrRzdzzv?p@>E0Rz+7w5yjmC$TQ#-YgKh!r|j5HME-O2@|nPixl595 zN`1G#r>Uzl;cnN9-@H&WP~sDu104A{EkKS+>1R0}Ca&Heq4_1h-Y0htf-ll#zRd8p zAO#X6pmJot700e9#k(?r_Wd~4>-{W1fQ>`DXDz36VJbu`);4sFC!L>@quJJ89q{)d zIrL9?MsEfc?*>U5{jNK)bk&Y*u(gj5pFm!1ef;+K``gPOO}er9z=-J_fqDJvkd|W4 zXwOKbLw8;coQ4|+3IA8Y# z{^0P7)LuY;Q@sPR&weVJO?R3Bm?C)XhhYx{?95ohga*ILGYIBb;V}zgFynj1Io0|C z+ut{ltBZ^2Uqvg=hjYd8QBhGu=Cz69|I?%tp+OV_3G7k;Zl6I?&yWJyWaw&CS@BS3 zxb{C&s-9t_7XH92R8)^-Z6?9$3g-CAEkyetN%45L45DuP-7evh@2fIrW z_IbSMCMw;4aH&M=P*D8Huo3RH?fg`t!;f z*^{%oX`ZmcV0_7DcWddA^y9Ak&UkYVZJe!z$Cfd82W^Rb4+#HFHckZmJrrqb#!9G9LiVYw;WKc zh&6GeA+CQ{4$dEh|MyUDe_yybEYJ~jBx{B?@;ep!h2F4&Hm0L4LzG5 z$Um|Y`znM~Us_t)(0N8(X8Ne>{Of)x3LdQJT7~#|@@k!9;@lV~KHLUv$q{-&R!hK} z*U!+uvyJcnlT5cE`~XYJjidXx>tWuKXaBWB$Bie=jnFm0;&5)3!h)Fyj8a=ZA;Of3T31l}8<%kLvmzYXgt`c-Oy%K+ZlmC3t%BKp2 zt|Yp5yHjvPi{MlX1k?Y#Ro(K2HqP+UNj#pY|8rIn2u>|Y6i)K(d= zxdZ=8RZt$n3WIge-Eia;U2Vz85J0IWr1?#f`^+$=K4vLa#I;wu6jwyhO-A&Y&ZxRW zGcYdJ0YS54xU2IWnWl`VYU1-_GsPQ78`U0xP7NV$n3W`|2UD7c&yIc89Nt1SYQc0_ z%d?unjkh&{OAgzoI`-RfOgi=~YNL5)7&tuN`OsT!nvU1a8K^%_ZddxX8%(Gb>qYNE z`DS_t27oIAO=}niHPjR;DBv<>BPv-rMPjIIJ%6^yi9^P4D{8-OkHU>}@Igx|d5~8G z;VxQfT}j%VoRYfRm@=_BJE!UFbHJ^B+`!^!SO*@-A2BP20Zv_UU<;v zCNj#wOK#8xvhHp8o!L7{-&~{N;n6+K0AqV#8KTlsYbx_Mfkr$;*?K$Yss=hcho)OH zz9F}82Mr;85c{3YEzyaIi5ua&YpEK7&2{1S*5;J4+8172k|!4VloMx6^6!PAZeD#Q z{22cwzruO~zxUt?B7VLs-zLa`(En{7wN1IBM=4gE8>upn~`qLDS8(Yq!a^@53$I8B+h;^7YDym59-6imv?0w@%S# z;gWd#ojJFL#^Ku{1>K27P$j394TOFMW)0*d!Ha{Bi|`X)+WC3p;t&1o2ZO=U;@>O1 zR7ZUW^B*sfLWT6pYeQ{_Iybag=+$?{VM+Nillr)6J@iEx&qvCR9c zPw5b`AdT-8Q!E;okCMQ2N-jKRba6IN$K0ISdgL8;cm8yinCCQwOdHP)u~MogkWx~A8*~#g9JTkN5SRq{`_W2 zG3j}!mZ3$2C+5LV{%fzZoWRh+t*lfU#;JdmJK#2`m-i15&1~@ICjP6BLU-r@ zi>Tv$u>}OH6{fz8nR_3w`-|1zB=5m7@)J-$eZPTZe1}r_*|RMMr6o;(;3p_JpVkS1 zW{Ez0>cmperf$kCK zAh}!olxS5L3{Z@X)o~9HlRD}Ak@Wk8^A*B>b5aZ?(!@sd0dI=(m9T6W9X#% z&*^}U!;lc~$t>NdPpwJFjoD(oD zDTz=fvwM*~u%wKQyVY$SrCxMbba35%mCfgbS8u6hrfgHruC9tAy~v#7Mmplx_RK!` zHm^(<4ygn(Zw$=ye->mKUk|ssh_V{I{eNTch4EH6ARpGG{Q@(oT^45h#>r`6cy_3( zsHF5g8YuVW%Vti%PYYir{WjJzd8|FG6oHmm$K z?T`hp2{}mIl$CcgE8x)W40v$yU$Pu8e7RzyIb0Fio%kkVEG^KnJZVQ1QU&F0TbmE_ z<|#DE&RM5)^dx# z9H_|CA8BMRJRRu@Lf(-y!O39pbIdYIh9A~E{{My#hc5g0n6-_Y1Hlu7rsN&sSycuOB?7mb&$vV@`cwv=PCFC1Y z9Gh8;DrCZhMS+@H*?MB*$oWQ;mHcFAC)`29@!22q2O};hwgeNAzpAR>G|nbD2U^@0 zwIOu?&nt+DdDpQ@{L#PC4mhNO(N+U+<;Hg*hPTq+6_1as(JxQX z)Wr(79|*u)=l={zU<9KS7<*;(QP0}b4?wUopCXGiH8qKZaTg-=NB_;fV=r8M4JivE zD;$J{&oao0&TU|M0?7)gqOd%kkq-rCPZNkOK`KM*d=>=eSiehuYB!LcuPY7xl!pZ=O;f9rI1ALp7sTlV8Yo=XrN zGs*V1W1;=7;V94QD`5;SUW1AD3B~eEfgB&AW=AiHIpmVU0falN{JZ=c<{R)YV zVe|Co8VNO#E{Sl$q_wnc@5f+~8u!B%nA*=#%PuwFo~`^s6oP{kEn}qOWVN(d-J9Vgwsb3j0Pn&S#)N@5+PDgxfI_> zvi<~D+y9tom2fxVT5szRlFCo zre|ritqc;VLZA;uE%+9ZrtaNoNvm9|n!;OaZUr~@CtzxhI@VxCMX_TaorGXTjx5nz z+sxB;x!5V^NI5qnH)AMII^%euf#dRd=aik(zLZ}0;A8Y#)W;~Vb5DUUNVxbSPT)Fw z)ar_m&J8XOJpR^OE~;{SR@9tF5oi?%A>#;FWLu8e2nP+(O__kpOS%2j!F;DWdmhx7 z0nTpt*cSO01}EW@^Won^x}_2yE`$Gs9+e^iFNiklz3Q(PJV!Ah8eF+ve@?KqIQrVX zFQxpyap459PKP}L6e7Bj%FK@Dm{_jt;0p9q5c@o%X+2l`=7r!+KdWE|FjFWgBl&3R z!VvuSp9ZVlH~7NoDDJ$z)Af&zh{Xt{7CHSq^(Oj_VFIMK#5V}St#Q*Yly5Rgf$lQDNaCyJ!R^7d2zmCW6%mK0qH9zTMJd&!py)He|F0=-9y-Mta>&fi z4a7p+Sj3{}$L*27I`{0YvY~VuYPGcZ!Fz+8WmFajK$|9B#1Y!lY*Oz3%J)jCkN__j z9Eo1!a33rT%2qe^bAakd=ZA)5xGSAkEgE(3ug(Rthg-&h1D)Uxo^Atf{(h!

@Y%c=%(41Z1?N%0 zWzz@-$J*@Arh+W9YPbI5JmW?TL#K+GpDBa1dI1`&HaY(CJffCUl$4NcVv#3%F0RR^ zCfOjqNg{GAYysovi)~&)?D5FloT4WJ7g77@=x7R^L1S>4?4R}qm5>d2m_C~2;1LcW z`vZ`nBxmka6~x3sV;7Z1;Y?u}8(v1^8|e45JBS=5Xh|cuKpvOK&Na;+>7r#1?B49< z=mZX&U7Tm5w~zlEUuat#;w3Bx8nEC11zB05tec2rLdB)G#ID(5Jgm*aE6`VYK7s@jMmO#!6GQrr>}{*04n7T1_=l z(qM@iPydTa*lP(EkC@F|$m*}$+`#Quh*F{(1Cel8Y4C|2li`7wz;W0j7@e^s>Lh5@ zZ2Pm!1~C=)&C=fsy-iUirSwq(k7(}Fiy{as_6DXCkF$tMKU&E88mu5lqAidlb`u3* zLj+F*H<&v1U$m(KW5F6)DCl0jY8H>Yo8G+<*MJuY>!e}j!&fL0>C23Qh%RBvJy&yN zNO{Q;_51r*SjJl~cfx7alYv4qw-UODHv~XrA|@6n-oaho`quH~oM$=dGCUfcQ3sD$ z80)k>We;3i!jEqKYuX{;0QWfWi3w^bR$$ceKG(WY4Zjlq$T;F!3?vARsh)k8#k!(e zY`loirnVtwFpir1%vrS5!(TMq(n_I8oZTJo|Ni|;UGK(sO7ERXqgk)~ znkCX@Gt%SC^Fv6tm)5*NDL0yC*M~wd4uMaVtF_(eTwSwTv4yn( zjyLMY6H89@T0g_UWQS(R0@+lhsLgKe-~D_-1Ox>8H$o==F{=IBbOA{@2GvY%-qvG3 zlq;TlLv{EBCYOJI^Jj=|m&M8vw*|76qzEyuJl0iI@ci$-Yi0lF?SFqU?f`@OtQn`>CTL@vjY;19zWGjfT6ATCt9xtYPsA@8uti| zz_;dPqH>Lq?iCrgD6JDydx^yZLsDA!s!BUkc@rx0(Y;SmxmA-T~QcEzV z3SpBHrV$qq#}($wi(Lq(@KXT>rKq_FG0R=zXYCal8O0sO)cHHX8#~}-oJ9byH6+xy z_4_j@l8EO&Tpbb&_@;l$(v=H;=ao>}&)m4A3f{*KkSf2W4x zk)l|b_2^I33|%KJ?q_p(a(Kr@T%ErYwTD5rb(ZmtR3N!v?z{r_GW(vxId;HHHWq{K zBa@x&rqh4HFAz)BvLE68=1h#zak7~09mpe{Du*M6AQejC=@o(@tV~Pf43`pz^r1#{UDoj=wRMN^6c$Uz~&wEm;48dR1|DmRBs6A{;#cMK&1gj53hqR{3pxNYh?pD{Z4CW-z ztwq^Lz9h>F$`OLO)BSVr{GbKG0(;~F2?qUd>n5<@w;G$^r7#+?@CdSQXF8(Yu*;1e zF6vQ-Sm2|2x;pL;aaF$g*%xpYeqW)Ad_oc#UYYNyt?K^z$Zec(KHu6uIGP3MGLaXv zbKm|+Sp$fEGJzh0Kpl@i92Q2MhMu^pn;cO~R@1`4&JiC45fD4*pLln<#aNnKnE_2_ zo0JrZMuuU;udmH&7~Gh&G>4??CHM;}P}SIxp*OFf<_@cyN)~h1v@T@L-!7?9KiN)_ zJl6iz)N_DjYbI|>4fG63^U!7X=8)CQWt#j1-3}TsfQ75^JYfi_iCLN$)Wy-q*x_T8 z=L)Ybt25U4dU92~WS($8=tN}&y~r&Q5fB^9vAJ6C(s$fChoPv*3Te|$eRv1jfNTGy zoCJz{n_4R8Y)mrc2)Ck{bcJIq#Sim+?UPpBg3|~88q34r9m>Wbq0^pvD?sTNhT|Q_ zjNMp8C_S*db0RDUXSwR@>qfBvJK{*PkL;pV=Wt+~{isdHjxn9!P`_NV5;(223nt*d z%T_jL~Sir95s}cH(I16kD`zsL1JSf?3ayb zzq^#dKu-^3SH>J6dFarkdMoeLqv0m%spQ9=t~g$yF6%zlMMqjC{6=ZxJQLsCG*uDq zqXIU;QB`B*qynFMXIRQkAr;!`pN&Y;K)D?qkxQ}tl1F2!rs7h%|wjj zxuT^mbe-0Y@4w%70FgrW2z~qv3d7j?Fs`RAA*P66%*>tUOUsfNtHr5SN{cF)AREZZdJPOJS`Pq$|Sl*il{DXl^a$hnYL&+M@d8d5k{0P zQr|7K5R`&xO!w#%u02j{y1b%3K@f0?H_aQ=39qQ;$eDDZ7l@Y3sZU%}u*sI_a6%;J z+UtnIaAc~k!ZvSNb#WH4$=thzccq#n5&Ox%ftvRKwkeiD=-tj#8Za`a&^z zmh$>c_p&XLw&b&!|GALhGh+IpS*Ipb29RsT)h(vORuPB4`2eq5+?^Apy?2Nv1-Jm1 z%dV1xIl)JU0MLO3PCwx~C0^^xIW%!*}IP2;8&wiT>Bc#fez8 zpyaHd{wv!-sznY~lNciM+1W3sg~`?n7^3gp%{ST0hLUGkSJMF}$IkXsif(q1f3(G? z_`at*WTAX^8x=;*qIR-=IUfp~7Q!VOP>3NctX$~cbSY0T8E6Mx?)rK`iH42+ZRqs*rX`%z7YlfC@7c+UtACQY@+jd=wf~JJJ6EAEX4qgM|+CA7VAvr&wOG zD~@J{kNwq(SrYKn{Jo0SVN?#)Xtuh0TO`y~*_6C#_LMnxq5C*XvP$Ow%vI{()wd7h zdrAGo@=zMp-u)9w*2RM+_BGF@n1PCLUF<)VScxIh$|8#BMzWZf+87DG)z?E z1PBT_2FXy8GrWTCX<1vH{>x3hPNM6V)cHPG-kpe=rRiP=kecvPJr>lY-4BY((At=C z6eOUvH(7y7GEc@d;`(rI(1iHc?AMVyfDFV2F(e}xqzmhaTXh$+0#yy$9KJ}0Qf+iu zoMF|iT0A`R8q>jG_I73o`TAAi0UFiA=QoOn98n9!Ve(dxcc|plF&mtli~UOI>NneZ zE&Z4#hLX=KJ#~qbeX->;Iu*;x9LaTn3;KD)REYk7_aa0oZhoqExU zSMMsFd=*I-tB+e~o}!6ADmxKoPw(lTlkb*(auMBdWOyf(X4 z2Q(i)c`8oTkUcJ9|0X_j;?oDrn-n+6y7`3OY6z%~dt`9)@H9T=cLvWK0}qCrg&SskCC5j%mDOY1Da4e_7*TaS@|UJLM%golKv zXw9U046dVbjkw@p0#Fkx^&q&~2hgw49D;wq+bI$HgE{clhUD<$*%2qf0V$)X<0ybr zD3EeBtpzejB4rYq6(Ogo?rND zeRt#h3>(uWM?p#2i3xKk9d%!j;iUGIH3}5c-bec!PnOjRm8FT0GV12Ozr}*>c)|UC z-K=`tKj6zYR+buYi86tG|JH#hO!pPZN-XJWT1XSXa!#o4I*78-TLrl+O*MF~y`zgg z=iJ>}VjyxM8v~l}dB~ok-be+YHvosm9AIo`$2NSa=FQ#T#S-M}yt&qIgn_=3jpid& z!nGr=s5XVHD*UUEMX17)uiVnnp@rlT8>IS^OHxG#m-8DHFhSIjJEyD)wZi`W_W6%Fv{ zJPwY0 zyqr`bLrx3#uS)Jf;JJFcH=7`S-Nh-ym$X6#E2*s%?7tf;HxZw1+n)ckN`gU!EHpLI zt1Cr<_YH&K2_fuLtgrPdyp#+FOt~8ut|`T}i9*Q`p1@J>#OlD>5Xx$@^~Y<{_q_)z zEX0B=?u!)DZTPXI6;LQ1pQP)Au&z4>c2FXJB!DyCKC5($UfNeC(#31Kb@%}JS-&)r zKUX*bclB>j&vSwnLpuZcb|_P~s3+uiK&;v&Yp0GT>6YtC-3H=5`d`-uXdWsuD}!o| z*A#5Pig`G=Q#=_5`5VC`!S&8@JbFr&ySUOBM;bHBd8R9((Y?rpNjGq|Z!llBE>|^* z3Wl^MA{9yp-?h6`a_B@$SQx@u%qM{+&9T9n--c&o-$jG#*~Y;Ri&Mv0Q=3X)I zy+VIz4%b=`amP^Jn8SL)BP=`}p$=H#hk@8{mU`$6t&R80b9Hy|doN}{Mo|&fEA^7Q zQ29qbIa;O3l0wE`V<-37zMV7923Mk*$D<`&r@T8Jd`6++#@5Tm2-}h zGh3cjl@mkcy!PtMG1s~feaze2+Zs&Wzz5*8l!Nlzcdrp{=Fz+#YjIBMh^V6v>zM}b z+E-*@U)Wss5~eAyjXJm;<k;ofS#$kcp9j+I}8Ytk_f)g9HjkpsQ1a3a@Z+W2u6>h?UxtlI` zlR|i%uyIF6M`ntPigfG^s_i-8++f^WW{FeVXoQ4}k&BVH(r)|sJc3-u*(RhMZo}{r zVnMCr#fbuZxivLRb(zG( z9Ppd+0X>n+b}aYSsp3)kNU!fJ`NYP$dciSF8`aS>8V;6m(@vqot6dWiUZ^A|$L}ej zTi{R8e~#OsnlXIf>Yh=KO{Hoo$ zn!?H9GylX)9$h19hY=k@r1&=_i}2Gbxpw7+h|TA{CJf_J}ndlb{j( zc|l|kx?gnr)*dUDyK)Gd>3?MH_BwBaTfoBAvZe(R6malyz0nz~o(-HxG(^5thWW%M ziTdon*Siv2SZk@a@twe*bl%g16?iZvk>68r*+Vde*?eopA!*d)jlMD67JLhVh#<1kg zO?^(sH*kELx1h4Z3hgibD@N~@=86{qXsD>Hxh0Uq@V_Q!2G{N6maupCa`0mc<1=X? z+J+QUUOCXE3Gnfy5fGA$YjA+818097>#pr=uH?I9CXeAS#=%Ozwy(5<@7<@?&(FIbMXe(;)G_=C#g0+ z#PZW=q-W@2@cM`JwjJF#_?g^8MM=)(kA7L#k5EW{4i1Q+e-Tf(YC`jFGm^)9G&_KSP)_&@IYUr6j{(kj2pXC zDnrFd=e!7<4()tm}?6g!~-4LQt^oFm+TAe!-aYeAC7;{+tqzK#QK_0e! zGI{c?9i?P^=GVQsN56Cf>*n>&E=mE*?!>8O)Tyb2ZmufhixdQf`RnHMt7Ut(^7{QU zhh*|1%1zI)(FmZc?YoZ3f7%zdlTTR3X*0ZaRi4n1tTu0Z7U1qrsq&>~;hXgFr;n4k zIj=X@t-_lIPhVxv_!An*6)L6?5QQ?R(8^9oJ7^~|jl;{>T*xPwZg959p`B<9MD}(j zTuU2M+PZ-};8D1p7Ae0yQ*m-8wv=*vlyHSnF0){_UP$A>7A%5uM9X zpX`ZMxU?q8^<}N(=+B?!Xl9l*ACoB}Hg*mUhsBdQr`${9-PqjQzaX-52~atCk%9w}yDGs}>KhrOpQOCy=Y#%;h5muE|;Q zHCl2Pov#w90kZOjlFVE-kzSvrpLi<|H4ZsrH_T%4XB=SmtsCaAyT zBszKZM~YhdSa?0OiZX5M%1b$QG>InxixZ(f3Ns1KuNCPA4x|^fu8ls%;@d`7!zDK( zY*a-h{C%bsezayu_x>Tuu#5qxL#?Z-WRdwPrS0(!{eIPm>xkIa% zfR-49-*J*3l%H@a#sC4?@cyg|1D!3<_bWJDp2)-9!<3XuD@G@Uf0l5H5#`5A(USn4 zRG4Eb0TE=?{ulwPtAgJdbtDOo%psHv`E7YwK={`Io|h5~cb8OLrRk7v6>P0QLtl(z zXd)8LCB!P%QZ9wCoaNBup!`B9v|Y^BK{k~$p^FQGg@ZNmT8vgal9~)S1?trXEh4jQ z+AS1TxVr-5l!q3kqU?jt_+BLKe2(RQ4nbdVMLBz1bInCV8lJT%tIg*}NE>y<&zTIb z?R7^SE9)z5ArXWTJojZ%-R%}>jX%@ z+ojM?0&Cu1VoIwz#e5{bG|6?a-|8lAl&+iYqJx|1v+OF3NS5to=0cgCDIs<1d4abp zFcO^SmOk}$Iwz<*$HG`39@En|^8WO1u3z2raoD+8F)}vaPqp~7upkTx^A=KPxyDxK zJ78a9?cZshvTfpG97^fw33+sQntPQ5SBA@3?*G>=Tamp*B`4eMnafQ0MFZw4oE)HR z%1s;uoZ6fYQSj-Wsg;{daiD9ewE8?Wd-!JV8k^NJ$jx-7+e08$Lg*%^aq;j95M@q^ z<14$_N5ccY;-6(oeEI&`KI)$lzUibB68n2@PNe1AlvR8$s@lEx!zhRz5fs=u=_H-LeubC)E+nn;U5;O=1V?knlH(< zfDY;@Ct+6hnX=?V7_?Cpr9V4sa`@7OX#PiakO#?YFq>}B@rXdxLYaEz}YjwA#Onqz~g z#8XeUd7d z__FKBS8$DPi<9Xny;Ir1`0BaS<1ARQ%7iSxkT$^%v95xhfZ9ojbmq^!omelR>3Z4{ zh7Ru7JMhUmC7|77i3NIQv_BP2G2!ksi5$SWLvhR z?sM+ivBFZ_KQGO zob?kY?=cm&Z}2mJ56PPd93QUR#6WMr)(@B2qKoHm@vQR3H~Q^tAE%BlPy>=tJwKp5 z3|EMf;L9I{je2%}_tl1Vg91iX8Xk4vGU^>h0q@w^De0Zr-<=5c1UcIxCY1OPFd%pXMsNB4l zdd6|+7)piQRod`nmlkbeG1sjN35TW40%sJ^-C+~vE+$)~I!mjN4Yg$V;tv`_R;&aM z6wU?8%sRiy^oa1an3^{ETsejVn$SW*L3>WT@;{#wzNv-w1qf_o7#1Y{cPCG`|JNsI8 zuD1F8&-Uk%#L~9`?5d45U^OOn^R+fjDW{M$nDFS9YaW~g_^$9%k$l4(fyH@sLEhy3 zpUnKQsfws4`gSO_af+wYqNvS#c_T4UQ@dqNmz)#we~U&?4;dxJM_`J(U^I!2z|vTr z{P$|P#e{sfP{Sv$$YbQ1dZc9|e*z14Lgn5#A9bi?&U{NZyXS4Zj8=V-p;3X4Z?0dt zfZhmhUAn;Mu8ttiluh$Utn@79hp^_u;L1#I`ghA|i(Yq#~kDWzL0ZY)mH8GC3Ae_KHK^UBe z;n)0g`fadrZlNY)jR*J!(o07xZGzT~K~bdJGK;Z8tJ?^rc?YquB-Aj0qGr<}_Wais zQgh+ys0NA~4hPrkf|b1g$Jbj$#nm;@qJcngcL*LJI3!r(PJ+8@aCdiy;3T-aJHg%E zU4y%OV|VlYcFq|0-p79EF?O%6U90A-IjayH^0&qn=Tj?sI%%_hhz9>C4H;?2J){3! z+?=tw)hJE8Y@<%DQIXyBd+qbRZ}MM!uKI|f^cCzI*Co_u>!DH(d%ra|>}w|tlKm{F z)y7!%1DnB&!qy7pc7~Z}p(n%}@qA!2FhBFgps1wLC6pjh^<{N+_2-JSGsQK>*bgx7 z6;D>or)YbkglD?zShc;c2$(oku}hrl`;t2)iW43_>YOFNr$a8}wlefQqU0f&+fqzC zC!R6U@b#4*XD*C?)BmReE9dnM{IriZ)q(r|CS9Z1d?6nr%FKzTP(j~x#!i%;>q)!Q z?F_Z~b0tQfChL=v%|-_W7t!61kr>p~EzE)Bk(tJPEU}VG@>_s=#mCWneKah{-=^IS zmDvwuo{6soC~qK8iQX69l~Gv{B&{RkufqesBXv}oaRGRGyTs2V93FE}H_s2bxD!71E zQM1qah|-6bINo#iam4nAM@XX-p@vEm2pr8Abwz1Lwl5_tQU~{jJ9gtcV$rUd8hsd> zc_S4&4K&C3xx}g+3K^Rru-m|(SZGIC)ZQQWIdk|fXk{n5N2S6{Gya_kM0JbyY3L#P zm7fGzc)bGj(YvOCLK7w*|GIB5p76F2uk^oz;+g5dJ=lU1Jc4%{VEKr3_3=UdWcFs$ zt5VASuh%)nSr02HI^y%9dwg3?^|~E7Q&Mz2)q@_|#qPm-(jRNwzS=(uI#G7>%k7HK z8Vnqa+nRtqRo`n=J%Chi=z9*LG2*N-4?=zF26yWZy3fYnR_*I^l*OnsitXdd<`whq9aw zPr!XYTaQz-4`$0aVkIJWj5feU%DGb)j-1Xo(E1JY`_rv-U%}biZS^S__LM>Kw3@ zxUlpYYR9fMp(&k?l~?4oZ#1Ysh+%sRUp$MAi<;SqUgV)n(dl>JquMRvv&sz4gu6(T zoa5;3l~v?g?bMmmQqmLkhtkdOtZ+e|OzD4m>}pzb;?~;l zF?VX)y6(b~19Fh`pJ3NnYq?H`F)?bz84FD-G@6bN4)nsxAe*A?Q_W6zdZjF1xz1;6d zZ1@|gPmGL`eStPQ;skdZJc2Dv5BHt zjAG{X@JNh*&_j39Dtte^pe8DnwxeIRR#Mxkn zSs&$=S!t0e#_d`qg<=t({}{-3kz`ncs)`Ra66(^xRTRbkdR7(UdNr;&#*zL`6Y}yX zazbQGmDZHw>sS!F7{8A={E~E$-n>HTs~Z*;R~yxEy)O6l{MKuapF!0N2^7|9{0r>- zQ?o?-=*o~JG!JH;H^o+g;+%^%F@GRG2^4eY>7qqHq9fh*&Yaf(-MT`ZaDI{`ZkRx( zIQ&|l;F^>&8tN#sI^8m}@<&`;S(MT(U*_~Q!ud{nr@93rwA)|6P{!ZWPi#ANA2TDZ z44#pgK1|!*6jtt)BW%*uJ(?(OGf;d4ai|i_5K_A$cKi~LUrM_<{Y}oNjMiGtztVvGv-UoIXQS(0S8wbrMX86`hh@>|fp zI*YhW&sA+l9R68sQOTM2QTIe$6#cqeJHky|FMFvcz&Jr3??>X-_<=fWJVxY;Jrag@ z>w7}+)PPnwDUFxH%>E?Ew@J{dr2f(vYaF}E&)-z;49fL^Hlp;kPZ1?P83Ky3c!=!0 zZ9qgjL-L6i`b+>`_UvlfGh%sn_OAhHLES+#CaUew5a*jUGqkI6B6rZ03*>;NxjsRv z1l@kxPHA;@{?(RKmq9a(@qRCim0XgcoL7lbtOh!hA+QsO#%L%A@6dCI^a7?*16KCffe{Eg(jHmYNE-5@p z%7U8$#&smQP?Q$;1j)XXQDbP8ldCJJRp%Zz&WU*Uf=76G@`C^;j|H(-_uloz#Uz_X zt#G@Pl$4^Tqp$c3|XXmH(m*P9PpEA#l% ze_#O~DihWIYU_)!CeEWetF-C|+)}470NXNUU|B@H7>Xk<8U3*zt;-o1az1EXS*OLp z?rsyF_?k<4^cFuKAj32#wnFqHO?q>smCWmkGp0J^$}M^R%nS+={p|hW*V)@Cgk{%@ z;v!bpQ){U4olC=Drc>VTHa@13@zA)LN)nT#*E3pSjWmRGXLXRsDTVXW35k@i?g0Km zJR7#|(9bZT@k1D+szZ^a5H@8BhZ__mF_>sH-kx-i_IatM{txA;X;(_47yTl#Eqd3d z!{w^mQfgE50~ZTP#7&`hF@vJc2`(yWT!&>4m z5IB{f&2CxB%q zzzR0%Vyoe=<`lm>$oARl=p)XhcEH3IgoCf&Q@9`pz<_@|Z|V?|<^&x5Q$yNzw2qVY zg_WCDm?vCml@OA8gf=Pr#5$+v{!?6)v8gAIK{8gt-ehPoRbl3ZevUnfkCoCry@OEK zJY0NMzLbtog`fHUg(|kC;zVc1iaaOXCyH={bK~O`!`G%$L&4T{93S$YA`BW%G|ggj@fEf zo2U#QUuAhbp*SwV4=yqlDn=|v;*Q}e zGV`4Enf~x9YYpzp=1K#f3WMg6Nq;3eF)@MA-a(E{*;J1mopO?d#jk>ewDKdY?iBdBg)bzeo`2m!f%zITm;U-CT7d-Z398~_rZ(#CJ-^A~k_UiNZOY69<6>G{K> ztfZf@Eq^vHTUbwHU86!>cP*aDS&jCCc}b-$cI`WM?ZGEsj9J(w+(GR*l4C>NszbE` zwQmIFk$jt|Il1*$Pqp-M3}0Z{Kh5W%ysK52&k~C;mj40eB-2a%0Q!so8r?a z_0%#LIXW(`l(n*lQ;w8ReDyX>q)#}tWQM&Qljco+L}T{1D{A1%jrF2G^?m!is1tR) zH}19O^9*4X1V0Q!458DkT;h*RymC{8D2E2RLz&u;^(O7(rj1PuH`U`HK9i+SYLLkM zr+yhp<~NjJYxOt{E^dj?GoGj9>tQWBQ@OGTS~m2w1K3NcF)?kdx8NCp_7$Pu7kF1< zNt?NJ3$(79`2oiCcSkVXCwA;+KMsGo4)UOtv?o=`tF3QKa-9fxmWhfTUaI|eY?g^s zNgqHtQYEtm$6uO@X99#2oCDGEPVTuY7sIcm^l1x^J`XW!86peX%-}jFND1iO~vmDGex_?$y0Kqvb>&DDi#=el|u8Uz1^! z;r-jxkWOqL^lUt?8(a+|IH&hxLOK1jj!YYO(igS4ET^byOvtdfcfw?YeH!;?=^?HN zuRO7|ER;TowSGBvlp3LXD%lwkVwG+4De$F`GvQG65 zy?4mHx7KWEGhA7-Q9XA;%E!uTHkCN`97?uXk*?O$hU0IbDILfS2R|zg>R*oeiel?M zy26^L`UZtn8`lTJ)=Sk+-C_TXQWZ%*<=x)j=h(UFa=ThRe$7tp^M6u1ojZ%FdP=SY z_WrEQ_b=)SPH?H8ez z*3&98H{=iY1@3&31weNH8_MN>n&{nedvSXr2Kn#~o9L(ttyO`WhqQ+Gp7Nw*>Rv`ZcK_ zUy~*3>=``-rRij`gz|+8B*eQyu(RG8wfKZTA+gyY$)&dWw+FGHjl7^~x_!dUCf7Wg zDcmF%IA0BFUfr+rn_+S{_s43fsZk$%QGQW+ddRraeaQ~7P%s9`a5&KrOv7d<*42wE z2pWEp5&nkp#@iY!y4-U`UTncI=FuZUMzy4tw@33(qfDo72j7ev0`f5f$bp~jvJZ9W z>t<>T{+113_Jc$Zsxxzxk*CJyNARD|Y-=Jt%vV@bf^?Ag;inirKhD$ys zMP{nZ%Z;e{=ok(6ih8n-A)h*&1!XA6PdGi-4Jt?D&368YLU55xkh{QMMmH*cM?S63 zo%$Xa7fD_g)+D8tI6m((aLu{>-~!p*-959iA{l3yMDs4c`ijL^ZLa!Ai0{mT?pPqV zZx1wMiqRWbnr{=uDjsfcXD6?}e$i@=@`Ojt$f$37I<7bYPM1?IsW8X1M0tkK>$RrB z!NGanD9my$J$L2o>(@R0`9URA3JqmqtYySKv03Qm%IEoL4JAeK2yyu+_o&gKVP7)rh8^%N*EhjZ#snYC|h8!eK zzlsN@8k#KB+3_@MOuzKn{91J6fZU!Z%pdvb>^kBgt(KT_iik$ZYxA`%qCp87UC-l_ zr5?KMqUp(r;%bt}VqSk?=^d(L^#T{K2FjwTv{@;EL3_|<*PNVPN96Z1O47r$aUMZp zrDu7No{6WRL)f|IX!df#XEqisopou$eN#W8`5aMyuD)DkQk9TJcs?M zEyYD!fe2f%gQaK`F8O2FXU;N-#qPzx-Ci~WRBT&WZRrQSVgl}pFH$gZN~Kkaa0F5) z3^1rWO=Bvxqji*7ZXh2SFEtyXuc$&b(X+@+Mxk zx8*{#9k02pt{udUP>|ZWUuG~&0H>;{9y;lWF|=4*L9sdFm3Ef5J!yO#OM1@G934fn zz&8S{5p-%&U4&j}EBRkeIgWR`RLiJ11(Xslka=YVksy_#N8jkBC54<)N#NG{%yUec zuG%8!%~(wag-!m}bs6gUWzhm%k73RJ?j2jhcVPj==lGwA^_lBLjSZ&gxVVRTedY3; zDleRA^zLyrH{iaTcch;>c#@~ct@42u@W7$~yA^Ra60X8WV7H{_?^i0rB0$YET(@{< z>GJ!1kjEb>Gt;6>=azdlH>yW=(M5UOt(Ddns_lX>mP7k&Dl7|Uhk%Nb7V2M|k126^ z^&4KOv}hc)07p4CvfSYq^Bzej{svm|U6;1KIeD7CEob`;A+VeB3+hlfqo(0;Bb36= z{*zjoYjt=yIA&l-F6;cXZ#bX4z`REBZp|DDY=Y{%pW|zwTrFH( zVm21G?A@!~5Si)7fbspW*Cc&}+>e-s&O8bbmX(kRtS0Ea$RAA)6w`_NCVt!Ms~o&0%&YxkbT)mh>Ix7j{|*sVU>C9& zT&v6NrNRhQxSOB;NgWOu)b2^r z38yNH7XI82*rzo==Y-e9Ctu75XFKhO;L004mUUG^E$R;IR%q3njg_E0qQySy?1@LR ztP9Gu>EGJ)B2g(35-vEhoxkgjJPDCCM{&F(yKn1iQ5oCWkzQm!?vRzJ2&uquQbn?` z{rMej=0*d@5vDAxMSqCb0EV=30yYuj!X_;?qJCnnEVN<|s8}Z+8@**2#f{c_!VQi# zKQXD(Hbg_$yk`FxdrB(Qt0qnL_qS4gh%!6=I?20K*Vf4Z-il$AXVvgCM`CSpqT&&D zSMg9=&W(?Ubwk~{&3I&{z6WAXcsK(VVKBA#M|X7)H}2jFl~Eo&W=xcGQky}f3|6}QCqUB!#ijs`0t zTQ&Q~!0M)BZ8TOps=U0?g3L@3CGsU3;|29}M$af-nn%Jvj&tV{Ie0d(fBgqq`yTCU zK__lX!N`7VRb4o7nZfw;CIh!CxG&U(jiLB%sN0e0Ab#T|qJmwb?ow5P%eg?CxpkGr zU8nJeCZ`7WnH4Vk(@#pE*7D=q#~@CsCpvQ6@n2=P`Yr5o)%}E+gjf>g4CNT`0f%He z#BWIEpWx7yhHFW?G3QhO|6GgJ;96W7Sa#8TM#{P5STe8vD5F~r)!H}q#V-tR*Y!`( zgV-k#7hC$1B$gR0Z^pYj(MXE@z0Q-Ul8(-#YwG@gGEDggUlE4=iztUk#QvNNsv@RI z|72H!9KJz}3v)G8`^teJv+@q13G?0Xy7+Q&B@D{?UGhj>ViNC2Rk3(&yL05Wu^aI8 zp6zN&>LJQZg0h4&QX|M+0e&7rxC~cEE8r*g)R_mC@`vEZhIDO41UD+hK7Q7)I4ACy z{m#kDlm=uJvkRb13G?-#+}u#FvRV-C`I=jGJ})u?l(jLM8^>_`An?q;hMIofiLWaBqs3yxk*uZz$aVR>IGL%}J3YM(%wPk;=f}~P6apxSGuNv(6 zJ2hI|w=D}S$#xyc=~%$fDu{!xvpq6awG@GR(*-Gi&^K@3I3Z~`M)TmpPE`*BBhQ17;is=4L&UBx}ogcR@-DQR9n2Qu0qTg2c^*ob> z)^NB*u9Go|3R@>jm+r*e`6YcZR<9uGkXk*TDzg`D>tpzjYU7_mHgk7Xo8y@$r-Mbe zvjNxFuF^Hw=aZ!!q&>xcXA#s~A;2_&5zaRdhgzvo>>G)P%^Z$UTB^zX(y7tjGE&D6 zib@k`9!YosO-}VE7D9SAb*vQUO-Ah*l||fVxG@WjoHdN6eX+87(y?6y3GMots=ZJK zF-dZw8<(^h zEaXOh^;*oSxAYK0=-7XFR`i--wp?q-ZoHvXYf!x81FMVAxXs^l@fYk;!a;CoRHwC) z)5o@SQLTv&#l&z1tZj`=rW-PG3#D0BMcf(zg&#=~^vwc(Bs(oB0>87Y{fSS4qJ+GA z`vt|tKaqbw#WJuGGjpy5);!6to)u^v+(iQ;iV3M*;v-eKi|VI(gjc~6kFbt*(I-b&yl z0LR@P^1x)e3pWy%>0G=`0gje{uDx0*oO@d7I_R6RwoA%eUCKA57nw-GW>{1MieYF; zT{(ESdo(@|*XWwdrUTq%)+{ds{A|~l(K?1hr@BH(g=MA#sFZIvxE<9}B8!=I=_$YJ ztG69$?d2Y&?}#xcPWH_eSwW~gC<3Di?Ygq-y<>Cf78WOk_3c`%=}OOS#evgbCq&m$ ziHHu<1ZiEXX?!i&{}=%UDcFEqY?;bFo`}p}7Y%9>=`PaEX;cgA5OlcP!0Be2pUQ{e z&8zM_t$vGgb1p1auo>X)SGc#kUxw@Zi{LKm12bBDitz$jNhwGnuRX_Ta2FT*`p22v zon@GNaPk`q_zse6lir7251FA;cElg|aUzm$v{Nltg0%7+7SZ;d90kmbfl{jsz$O)4 z_#7x@pQ;q9e3`Fn6^I~3B{5iFGQGGCwZ6J@3G({d^NWXCCqK=?Hk5h9z)xGlny}U3 zcAaC|YUBgvFT!~n(N)&(DEN)!{0$`r=3^tp&JS*UyPub`-%*yj3rp*`HB`d2=ws z-xX6e|xf7jCxq`Ewi@4H*CSBk_6D_=Oq%aHIL zkH>EE&h+FMhn)YwXS2J>rSHEfio{6&1-4wtD}j;>4LcPaXE_}z!8kP-i!-y0(PpE+ zm`$Blv(!~FlhU1FhhV|jTp~uyqa=O}e;s0|NB^5U^?4vLup^1Zg>&AX5F}~gSPdH# zqC3-ra2c{U!e&qpDx~Xzz#jfWi&6*|$bbz&+G2`f8C0~nNCzhUk=vG&m?MinxX$yl zu4k>kYu&1?eUgrQViPkc_s zqR&PK=x#NIA^5H?y45Hr@>`~36Ygn?8R$)V+ zU6hL0>D`k5SnKl8*)HBqc-Q7VthT^4>==M>7*%8&bAZ*+^reUPyC)y{GI7#(lR}+% zl`G~}w~f6a5q`^UZ^fY1IT**S@U?RHd3r^12b0)O#P2 zyCSJ}7I->8kulp-s+5zSWj8aIrytyMeq)}lsZ|VS#8HmrO8Q9Q&bjm`kd@^DvO!f2 z0;V6d4*3R>3Jg__b;XW0O+S%*>4xrJXI6{2AlGu1S5(C05($?T?Ih)#I)8 zkABFRhhmwRKaYo4!S;3eW58U2b{{U!*bOKApG5)agp7j2@++Dx8LqjslOOD09?7F$ z6$*}5zoYvXU0_YjiD+co`B0fmRxIw%hLZwYV1GKT35CX(Q!mh_Yiel?j#)W8S+uls z|Bq+JhVIBc1-4_qR3Z?Cb9v>s!=|mop?bG)=Yn{HOt`ez# zr7oo5?*_}HeoriXsCcJ?f_P^8d3FKp@Ojw+?}SKaIlXj4VR{%ZM_ckz20=f99ucgT8SAL!)gH?ke}X_kQmJD z&3H({*uA9mg)0i0ff3z>q8GRcl#{s8N`p<~ZEQXGww;x>#=OY$XA9Sp!u%5C~+iW6{p|KO#u= z9O}{xzP<`Y)wNSO*!_bx@&o zW{NIept@B7h?DT43@~?? zYeNEMd6>VtJ8Am&TAr6_556Vd1dx5XhT*uOQhR@0!sD!$FZToX7^D&RvNM^W7bTM4 zC$e19DXckLDb%sKuyd)rZ*>fTvakpW!eTkT`q(nNaaHFqM>3L#NgXffC>l9#yC0wLH(+jX5K^RC`lkSpL&8eLuhn9|V_m455_;&S zC-}pBrGP{K>=(Dyl$?}%+s|)7F6=QbKj2h6*3YD0$Y3NrLjA+Rg~hDFM_Sy{6cKyj zX4j^k0tTJ9uyd(}5@dL;_;_l5{2CerhI6eFpg%A!n(CQQ=F_395txvdT_3D|XbiJz z%2dQ2I-V(LsYJr(PTwYtp=a!nXpvEHdie>y&-vVZPZ!74CR(=2`n1Qattnhds7dpr zslQ0$M3o1#BL4F7G7ZW*ru{F12X6Uq_ya``AtJ5ocBU!fIqyg{i6G}yD(&YSsdbdD zBFPRTT20CJBdVfV|0$h3fB=a4x-2|F9;atgl&R9WX@b0LEHL38M@#hdstH=FEd7Bs%`_*-JYI~MAkoNkLb%@)lVDu}#qgWjqZLwZcNAXHA}eSffR zNFb(_zE{*(5G!(cYU?fJbgb$Np7a{KI7Y0F|FKe@_xG=^VFgI-^)mUt7k8x%9HI`9 zyss%^`a>J~jbsHT#A#)EJLqQF3=t`T$uzg`SuE^DG#LK%M*ulMXPrpBeCfRPraS>G z0UtlzQ!$9K?DG5J_p;}EDS&cDb7g*WxB)m<)4g$J{~V^CPulkQmhzwXuu>H^B$LW7 z9b76+B~($&sv*J{$F8NDGA54Y%jmcEO3{s+NQ^Psq5^(NcY}5OPQhWA>P^K zX9*oaU23#_JQEdfMjV=TWyf|fsCm5>wAftK(EbW(giCt zh!x-G{zokKe$NEzpH|n4gdvc*$LPwW`x5RITvL$c7NBalkyZ}7vNHCbjm=_r51RT1 zA?h|k#fN7Ge^b+`XR6cD7$q(wDxfMuO9(8_o!U%xT*`=71e z@se+-lAOAbBdI%-79i$D)Xliu(kkU3ZTFo^?4TVZM{|Zw{f~cc@tcPf)A-S@jV|G0 z^+(Vq0tjX#rst&Yu(+fY$RzF>^;WsjuHsK0>70!`AuD^^W?YiT;#$yQnK^3G8ho22XUIm-JgRT4AFD+8gM8E(YJojpBOWKOXOI zke!Ovm*cPB3!pk)`hk`_($+mOFhcK)7&NHZ%m<6?X6PC!9tfIFf2yr^KWn33z{pTVSVCV8D$MOOV9z9Ew6^dPwUoOb#kywQo5mi@qW&pWyQ*+{a#!qEW zjPR{?CsRm8OvXtO*HJ5x!5Hm2UqbR02QUFzN>Wy{}wt`X_NO4{}_sjE~*!uTD0WR2Iy3)v2V6U3u z^@6X4k405Al9ob*c=mX1Wjw4bU2@k06U&(P>|#&HEmPyde%kCP_?y%#?i2v;o()(# z!1mI{0N);8QI>kNX=ro1O1h&!*y%y$IhCk@FLI8K_Gp57tvjwXm--!vZeK4}=F4@y zkj5=Egj8(`4FZawb~rJg3$>*yT8A-|a*Bz|s~o$`TXy|Qd?XRfXE=Tfp_I`pf!KQ}t{ zm}Lce!(>$J^Sz3 zRT@iATV0T;%BkW? z9vY$XWeQcbebs?slls^`dkaIypEMeFg>^g`U!Nfd@cpz2j!Rv#A@JM?viY)(1;k{y zwI_~<59PXa8c-12UI=D*@U(IRWq^fZpu@tTp@kvXO>$uov}?Kms%AAw-C{a!2dE9v z>T^2Lrd(%_?Hz~bXMqt6Ih80O<$#V-Sj|#Se#VSPG(E&D)&<`)jb|Qp0@swf!DBlX zg)^Mgo4fM0a*W%wNiXw*F;+ejDEjXhkoMnoZET_e5Bk2vgZb~tr_>k{7-nmcj+2e9iLnR&wJjaS?y|VAuF~_b1may%?U0q!Ry@p0Hf0r5!sQ;Ioq#+)Q4p{v#z0m3V?{JA+4l?<}qm72!r;E zqtcLQ1i6idkdir)$ST$uU>pvSN=yJJ?QY%LM5MVFRLG^Ic)kxEiF5sdg9}-Xe}lI~ z5gQ|*HL13|&B$!)N^oL$JCt|9>t`lAMyy>pDTD+x_eXNFbW)I-m=O^#pNTxX3@?gc zM-vp&*!@kXr>LK{W{0So&Eu=O2icQno6W8}O%5?kXR?jRzSY?IWBV{A+2Rk?>aA4N z{iCCt_XX(%;dOip>l2BV`N0zJR#9&OE4!J$8uR$M-{sZS{>ji;^^tIq*VpPR@AVGnI{hR zhxKX!5nop5-h^xb(Iv$X)v5;T22hh~?}ka}Xw|C+RtKHepj!pXbzqnor>K+`ErY5X z*nd&_tw|ORGUr3lp`F`=#|IXarQDK_C$K{lrPQ~qdpABdQuz$b@DaUaPll503e42h z>aCpj^}Z^qxh*lrv>jY`w;$UT*(U{dtBq$?E$( znn+-;n#1AJ622S7*{iIf`K@Xnwagz1n6rG1*fud(+K>>G9iRrR#dx zKm>LK%NG#Hx_y15`n~S(jj^Y5*X++-f$4biBdErzH2%h5eqw!HtitOWY6pqim<0~& ztU2ja^+o|OAgh&~431G({O_#7GWe;DbFpSK6diAi)$JD`TjsM<09uJ+CK2H}gxovo z+s?<`Ui)9^<-RmwK3(^s3fCcY`m^x>ZK8M!cX`LS-T zGh(gj-h8OoE$Ty1#Ck@VZN?^I*Z7xLpN9raQ1DwUcbGv5PhbehcqR%@)GV#f#Gb%z zYJJDxD_l*~CWi9U^|@ z!V<7vg5B{^L&e3q_8RhA>vt1CiJ3F!Y5(TV<`tWoN*CrrjE|qKg7Okk9lX$FU#Z|N z66&?Qf2+h7kI2J=riKRj7V;3lRW1pK7=$0eeV z9`ygpNxXYCy{aZnP~9>wo$ewvt?613iV@v|e~ zB{2xkUR+v`D_!So0v*6$V(O~pf2L#C8GVhco@gJLw$2$1k{2p3NKt4LN>5K$sT&mh zuZ#p05(A?{H8YyPF-%!<>=KG*2wCe%Hx7Qt{Ao=iGVIuH9z5RwWm9jIllG$PXa9>26iz z8?J(OLV-rJZVS2JFVY`1ChNQ@9s>niyRc3!oxX7-q)QwW(kCs9(_(fZ-~p9w@W+Lk-ASl z5M78S8t{@_jK*&Za`P=D#{FyM`YvnZ;;UMP)$q=j?>G^3vy(D{f0dg+HyYFMMyZ1j zJGlQGqb7j=WuSO&7SRQ)%U^o1rQ4HjjEgKO%QZ2Mgj+N?&bH)R(1l8*0L-+>MR!*_ z0s9x`@t0V8HqMrfXUOgI@$PHB?AbOoBaYx-xyUw=wD8Ui-aqZFW#lK~U>LNmf?O&t z)v$Lx4b|0O+_t8i3-ft&*2{ji66C>n#IlL4OS~=S(bv!M9<_n)pP!$f-hixIwpg}S zyz5IiDYuGx!RqbrPvKdsWK;tx=YPM)0UZWVL&=P?YSXU8ro%auV;_(3O!mZ5ChzsD zw<>ncr_rv@m;Xv0&_bENykwo0xGrL(F9#mouQu+ke+C)sWtXXNY(xx-0w)lU1t{u- zClrWXa{HXGS>;W)Kuv%g+TBN`I>CWP1e`0EYYkMyQ)6@5CmP*re*RL3bd%9xNmfuX{1J`wSNJ2VFXCB-BD34s4-(-$!E%y@7kpqWiJQ$%#?Kkk?y(2a`U4)z^p2r1`%U{O>5UNhj9k`EQP$ zNJip#RSG5Fga$eNNp@NL8)Motdn7v1u!SHcK>oWm@jRo|bgdM{CLG+1h?%+=OzelJ z=%2~;cX}uQuQwwkIXU@n*9%C`iF0zZLppX&`Y$)~9#+|(qBI7^$7OhqkBb9%=n`8KI{g;=v>bv{aE`3(k*Ue|>l?OMhX*z8;%wyqtgFlmIwAxNo$C zIdBg9R-D*L`33XBoM@+#g9BHA{QT8d*j>#e^Oc%#Bq?yLJb?7lDU~oXEQbpa+xP>t z3@xp+>fTHp*2Ln^`9+$~LbESS+aKJ!tTi7Y0agm%D&CQli4p2;7T)O7Yi?@sdp88t zlb%SYv0VEL3KS~u%acWM^6t8sY%W1dPe<-kOV(oYQ2mcCuK|D40G_*kaGjn1Nc6uW zPk@pjksY$Ko*4f?1;XLIkS7b!m;Kw~PSO?Z)Nuu!e#(v+Xob+bt}AzO$|K|dKSLaGm-ReVB<9oE2xLWOt|Sle=_ zdt$}k4E0Z9@xJ>!Hu8wv3vH1)xM3HLn#c;VU>vWAIWbsVP+0g-3Geas1d}awERqvD z=R4d&0%KI-X*Se>$(d9w^Y!#^+%(EX*Dct=4{u5QUj#)J8tw)&%I>2qwsD*PS+p9g z#AFsNYkdVJ7mPa&_Gj{eMQvwRMxinKw=Yz}zad$XhkX0idagL#z+~%?Z%N+)-(((O zm>-+i+N>MJ+uq&hIm2@c8S8u&{<~2V>&Js(Y)4y+YQ3(TfF3VxPpRiARt4*Z9m|5X z5iJ*N)Uax^+GQ2tG%d^})QQYxvK-+QPbH}9i_x*(y%kKXs1iDUF2wax+fkv)AOlOM z1zC9wlD}pLyd~(b3q+OoqLvL~szA^AL>f$Hd}*nFpz-b*+TDcJE7DkwO(D7!9N zTA5qoA%Zw^m^1BAF_L=_W3MKHEnroQIg561a?-X~Xxs6>C-R@m;_Y~@r2mC?95p-Y z@_91E3C0y{w@mFE&l8;m&c0~ntG*&(?j%S9eF!7-HOZSneB47Nw9!Np9b~_BkgpWR z7K=8<3Hv0QAH%$xw)#?MLS)+ATs zgL)%F({8NSO$+!nVF)93NIZ3Y!T}zFf3dNz=L)~G=0=a_(klv})OjetnJBdKVi48v z^eN^}gTpQ~3d+Qt($WJ;0n5;jKaHttmW}hh!tbyHDtoC*xkRZz8Wl~fGP5U4Eih>{ z?8aLsnU=OFsE50t1<=JXTTt^r6KX>-l&Gj^;h)w}_@8z~OS|u}T;}IgWIQcsQOu=T z2nYz2_%!ymB_OQiFiq_dDEg>=RXD!`I8UwFql91o8Nl_w-}{yU!U4ImGUl~xm^GnT zDHXK!9$87cSv9}EHIe|zqRXmh+05*^ATTFt^5G{th-S@e5W($i??s@S!&*W}VC+0M ze;Emy^Vk^Nv{40^MdP^u4l@3civQG`8I3;^B^5TLpU7n3$E6*T{0;lehX*lOI$cQA zGa!k*T^+?u)rG}-P>#1Og;6ynB`6}UR(VVb4wsjg@+cu5Z=mJ>df0zH&^Z87S~a-| z`&#?A28TT0pqK(ZM3>|wygb*(XU?xF$egs^{MK<#WKbsjh)m|Amr`V6 zMAN30Bh$_MhB_gxpw8>*9Ir;MVpRV9mRrFhA|uN?JF_DphR=o6*4I-3I73%`c*8i6u_YaWz=V7iS`Js4d;oWZQq<>?3@c>3dN|^-yOPkyI_B68n z1G#~pz}JVveN-(OYS=ZcRQ?ZO@qazyKOagkPzA<|{nE+vf^_=**03+-{j_~vkcI)0 zi%LQ}H-F4d>23_@4FDM8dx_xP;T=fl0+{@?D)Yv7e)%FvsvTNOy=w&wRw=p_Kx&m6 z9xX}9Qdx}`gH9z?1!&BOGzu1$FZ@S2r`!F!c1Gy{dn&Il_F6vl6IFE$N_a$L?W}#} z|Nr;SkplKEZa6nQEbA$N8Z2ro@%wn3Mm<2jR+>4#qVye*f?#eYebdWXP&&I6 zM7st6_veaR)Hrq>U4e>JQ3jOIOyjh6u>-*T0Q#3Bq5y!qnK}tr|F3E5|2U`0)HfTx zgY??=8JZM8>QPgh5J0>2P{X74(Qy1KZP_71kWDh=UPS-of5>UzgqB|Ue*)`8*L_UCHSYI`?H)Al-23u1j}=ba#VvxAb@LecxX_{=YHqa2P}GIs5Fj z)|_+Ay^c<3t$EPn5NOA5`RAq{9wF|h^EjfYT>{M&N@Glt4uU{r|kQe_iw?JRPpkNc(Ui%lbVqK#6arqERSV zv5ob^gC;eX9r{|W6nx99GhNvD2%s6P*qe6p$5B!LB5Vs8Ew zisNU+2T5SAf{^2(n9@K*7q~R~x10!hVCCcm!{osXw=BGKk3g?$tF_LLJ~KmB zv^wmCd07fb5#0rczzwR`d1+!?I~T0Q>=DiRprVs`D7rI*b<9B^MXdouC52m>AG?z$ z+i@*_jw0!`TYFyct7A5(J~0Vmt?Z7g<_C*)01opr10hS^*GG7u(?PmL-+ZlfB=P^3YkiC4bt4hoI{T$6O~9$d zTR;{aNwitAkp7ZoL1i#|Hon3qHPwTUceL$hQ(s8=4$|iaH~&6>Wx1x;J4vkeG%i_$ z44RdfS0esKfEB-H>}zapNm#1eYW=jj`|Km6F{$!N?vvc*tI4sMF2{4f*h3>Q;P;ni zH7O56bYr9IP_8k?DD55UOBQa5QY&tZEi;OrCNNTJk=^ds`Hbt*>|tFRt}XTM^_2{< z31DPP5sIwf&V(O>O z6dJ?eAqOA)oRv0f)eoWgFA3uFV_?OCvUXO&%7EkM?+&%#QAFg_SDy74-CYdSqM@88 z!U2LmhwA>aU;Y5e4^J>yIp9|rWD5$eLnIZ#|D9`-dhHz+gf&0yr=5!bF&T{x9@x8|$9o>bI( zKxk0&RG71#KYf%#E~PsNu#s&QG?JWS8x?MbPNmr&9vco|S%FfraKO)hFgriSZzP*3%rSV4f&8@sxsQSuM34{6*Mz8>1iWy zwWs~@#&o;)6UGJNemJ!v^4(osz$sbG-Hr)6pNFL-&AE+DoqWZq1Hvd0P}Rft)b)I6 z02J!GDF!gZxS2b8=l^z^{3lub5BSq|kEWL5M~GyNx(%nMK%>u8lECr{k6T)~XLi-5 z41MiK0Gw^u#CVM0Dw3Zac90pZzK`E?e)|mF&t{j}D_y)58i^_4#fFkj z1h}2_%~GuUe;fAxvU66=Fw|oF2*F>RqRN{sHpC@2a4CwbuO{;k*x1F)n_j8k=|`b-U`ouxbB6S z)B`5vT@rLFO{fS~AHtd;XnaB`t=JxLJVp1bQFw=BlRKm|>Q5YU=YxZ=<+552ZSwjS zuy{3#-jz@=V$qx`s8Y|l=BfeySG6;kE;IYS)^0oh|AA2c$4>Y+D%KQ(3RM|}W>GUl zw!>O#Q(#oC()`t6Wl~?GiHW3pILXn5>(t4&SN(E$z7v>Y#kljPP^K|7#d2m+xfF7| z2zn9vOQr!nomD%RMp1shHwA1m;0M&CMmJNH#GPLM5^8_G_4;%Nif=+Voh|DAVEC7) zxPH&A@)hkt$FoAvq&b*mIef5g*%x`3@IYZYgVTyx2*op|*6@?qxfqT0uu`vEJskSp zUCuWx;Qch$rg_thvHDl8+{;Eq@^GhS}GO`kL3-ClBk&`kOTBbSrY z{b`*hGR2Q9D$!d4a`t(Mii}Zg+#coK19@_Zbbl^){>CnI#SGgxmvQ;RptMw|;X})@ zVXD`s%4-SW#Tvn{zmRFEql`gB9C*~`#jh*&4Kq>bD-Ty0bC-d6(_5N0f(ccjM--M> zZjvrG(hpTpL95gAab6Di84Ew!;7_Bm*0;A!YPaoT_u#dnRAX|^B>~7P0`x9_4$4Q~ zcUTOK#6aUNP;l8dzKwsI;C5Pa+qW8SA{8;Kh)ys+{e8yFMrZ!xoV{Vm0bi5c)jMrk z*MP)v=Him@qP%G?5x5@tijet*)WEpDd&{TTl0GC^Z6pEjk_m0~T`J1VtSaPV!`rqd zQa=L+0hJDab*WISmwpf#azUt}(6;MjoycR3W_?P&oF{tboE7>Zq!%XAP{2Prk}GI_ zdhZ@l96i)0Fpx30wBeuDoRnZ#+|m$D{grHVY*>@ymaFe7D5FeF$#dIRT&4QmAmv zxZ-1KB(De_$aAbeg>njQ9WK7T$D0qnjGTn5NDQGH~Vr4{enNYAcat*Zsy}Dr>90y5Y@T zEqsikR$kvPuU4iJgQ=2UyML{~|M``Q-Y1cp7 zF!~KnmrUS$P=u!n^0e z)pD;A8R5r^e|*}4#lav1H{ZP@W(E39RDVO6YiaTpBY|34mCr(Dg9KY7j`H;q)0AE% z4E)XsS9qUB_h-#~ULShYG+e){^mC}n&202Y0R=#UEngsl4leql)K~P=X;nN86 znm(4pW{!mZJe<~S>;awGmAG><1NQ6GjL;safm8#yf7}9aQ-Tv@qs1`I#F*#87qF78 z$az0^O0gva<2;v2O=V%5Q~X$kEE<7`H=0^5V^vvUpU@UTbPIy!Y#OK^XL0(DcAheA zUyXL5J630LEi>n3Q=$HHOQN7*DI2`)lzid?lf?HDSm~?U7cYU!pNtZeGWHwPK92#R zt}(Z#4&ZgL>~dZbj#UrYTppmBtiSENjQ8hn{6JT`fYoz9~D!J6mO_^9K&(x$QreY z`8IlRR1?&eEOX|O+|u!c$^`zkP*(_QO7}O4O2u;g!=vJg5yJnaREwOqE>?!%Ufi+OD?GKA;2(Jh$V9%ml0JTE8x7M+ zqqGX~F>&D9L3p=hS@Y-RWAj@U8!tLkLr~f8T=n_j{_*XF#oI-am*^)iU*;un{5!kz zo5spM2aDi2ak&m3SJTyb*-&K`+eYLPLriF(fIfjr{gQ)jnn%s8WZ&eho5UkMl%Yk0 z^d>Ln<^6@g-}7Ov^z%i{e;UQ04$TmcPYz%3YXlZl96_y=-S+pz1>3KGKP70WUv#qr8%pt>c#^&h0?k|G z1vx_lHDA*hS;Rwl7mN!tgTwp2kU*pEw7Dzd_d9d%>qP!+%KyEwWRqUN^7U@!fW?6y z%wrP|BeDAaWuBw9r8YkX`JbKQ6T~5O++yES6lp}T&e17s?K!TAPYuh559|E5@+yh( zybewOV;z8>mmt2+yNv#4A>5%FQOe?Gyjm22yz4|}g3=%@cjVdZsW~+~EHYp?T}VaF z<{m-4N9D!bG@w*(`rChfg|xhIj~7U}|NGLP|7ZjC=j&Yl=hyk-vIOEGFBypU4@GNJ zVf@*!zUt_+NlgmNI_x56p^!tYc~OTr{GL|w5;E>NSIk53GD@;eYyi(A6@zL4^>n6~ z&vyl(<3%B7X$f6XG3CU+e)lL0JS(jbBi(zc4u1{F6(wZe%gi-=E2C8w9O0Q(If;TF?IQbR;p2c zY0qT1+5OCq7%Zr-(ri{9|Qq=dy>dEGs?qPd3imRsQ>YT z9zc(q)s+fbHtdjr0Oo>gM?Vu~>)qrxj6ZKZSNhgcJeTF9!QSOni2KAMJn6Is4YC|a zvrSlrlz%HOZ3xoOB~p>30sxaTqxf&VzahFUEio)2Q14XZT4^s{gz)qgA0WSmuh_0) zw9Go-f~Z^(YE!u*m&G=9$gn)sDDoNE^s)ee=WSzgbtR)^bSd$Z6l}9mRBSp8tQgnZ z3p&olC_xs|)wdUEX)U?9Ln)tuSR(dqK(~>Unzp*J9WVz90n|C&Cc0`)*lp4NV4U9J z<}ZE@iIp0R=K!tS>Vl%ZCnV~hr=ZB`j;-;H4$8vDpA)0Jz6-lB_lC4V{rG#D_=5Tz z9^q7(;F*#kc&>swBQr;rD43nSgBMQ*j8o`1?k`q#|2PRoF2rsp6 zB(mfYi;So;T*R8Mn!+vGfBae%DdVZ)PXRXZ>(*{XrW&9IMp5{dC99thXq;?<;(GrQ zV}ejra4!+v!uOQ`fRfLtgD8DwIPvHTF`Vg*%AU5Ah`ja78HBQeH2k(OS@h7~bhP|z z`z3?;QA+*D3zT;hchV=YS5qK<@E!o4h#Mc@dajB^#+p&>SdLw_Dg$8oDzVu1f|+^; zBv?=3Yb>nWGnjED^Ci_JYxPeObfHivJUt{XwiKvS>u!>l<<+-_UArO}u{3DVvCvF@ zD!*Z?b#YtXQ;L~s?WDFFD-Sf>EtNL}vJ_WJvuZ%IpIx^`!4%P=&3Zj6JA{`0svk}N zZ969!1#rOE(&`J@^gvuAej=FU4tjAuEzi)gn7pgXfo4%BdSJJhdd z(Tpo52bI0!mI74@Jhcz$E`U_9xmPi=msyk%m3?|MeVj~-uQV$?(XF;z{K#F6_uij7 zjR6(Ppo2#esrNk7zIc)9pZFiB1zrfx=pY%8;8mtBC07RgET5(@=k{e(0PqFim!DnQ zGg3sD7rYp-eDb8clrPcqLwt-<#Z{A)BiCJl?Y||b>%3=nIeLqwJo6I}Ad|)cfI`;9$<8`5fsE zA-Jah51P5kN%dT+ec%75RQno>wNY=4ahHL#+~fde3dVyWj>CLwG@#;GcGDR*Lr%ex zp-CTPnTMzVKo&~Y*hTk(rFarM@DZAff z?6}JX0-gj7{I9pU<|z$B5Y{z=IXB&mxA9mFQ-ZHI6fhUpH7-@JlO~K5?%&p)Pq+8; zr4JMWTs_(a(p&}e{ zQeC%-^*A)D(Zl%5rk$g}{5+(VHq=_Lg*>dH_c3rD@LFE^LkUm5*@nkmb}e*>AfSjO zDH({>`&(xEA-M4jE?4FM=jF#a5^7npCrb|p164pth?vd2taz4Bn*Hzzw6Sy;8#TUe z$M%eJ*0gf_eUS4IuGaV`G-y7VH=x9?(BemVIqUAgc6{~#c97u22r7ZRWd28WfFeCEvsccoUfoJ3WATW-xGH5 z9KSxBn4Y1aJyzr8H_?d1OD@w1j5g9QGtaKJ?F23Fjna(J{|%T3c0O0{Rhj>O4z5rL zfhs@E&#hU{vmLdgTNhJ^fs#xC17KT-^pcuaR-ARPIJ+A?0#Z|W3EaDB8N?*l5B7QY*YS;${fuJ_eszwjVK+sug9J)a+=utz-Av(y7eDKy7ciYl9XM6k zFV%TVv|dp)ZYn$c(E35SV(kS|IZh=D5Ua02k~eX!TjniU88WNj&{D5DVdT&@0UbpH zEBcSmBqIKgf@aLp)l!J9C1Oh+YSa_V&9yh zu*JC;H0qFca;H;e2t`g8NYw(#@ocCE+Yr=|=f~+J5me7nbUWMT0HR`Xf-^PaacM5qxaq##Z6$9yYX#(H(c`r)=S0 z&Q5o;xKox%DFTKcV@m~7V^MWR^dl2$w3poCoY@dbP)?yJB z?Gwal?m^HcIX80+9Bwk}I<+J@v}}On{%X87)_ylw&68;@HQTu{+R$bDdRmJig^zwuwLmobSKm!zlb5ag{kyTz5VF1s6+G@z)vJ2c8=3hPf9 zebU%na!Pp}A|SG!+0dw*3!bU-(=I5QY*OlUZ0mROfO_zUWo%0Tww8yu&$MfcL0=s3 zd%1T|F33wsO@dg*_X_9}>=Isz;mFI%R!|S~@=*wUWs%7AcM?GjGZzz2W>Her>FG5s zBm$PMOHS3nw@dng1DILqOe0yC)vBL`P>$HKsXX4>b2M9MOtljjb2vu2u8q16@a?Ni zmaz~W34N}-%HLd{fjG=iMX4t20_hh`{sSeiwpjDIp=kU$1r2MdCa&>6qXqbz-*JNe z2yQ{rQE`%_|fL(w_)?Qe0jJ+zUL;$vQ=)YhZ7TR-eP2%?bzUa?v(zz05lqQXh>MC8ar)_W7 zy7KdfoXHgRM@@h=+#fa~

(=5J-F3ZDgzFm>ldFfV8j7Xm&D!s}>*5=Ju$Ri#1Ja z;oIM|o>E=6$ZcG~&fc)i+16`D#k7E0mQGOS(jb%O%5;O~Wg})L$iGuB%y??1JzXtR z692i;fxoeW!IK&F=K=Et&g__NdySJcls!Ye(*`2Ex0~$5Is^)%Jf|>xb!}1#Bxt0+ z)(%9uGShwW@-`2Aop1JPba}`}4-iJV%jQ;~V1N$5w-@pBLc+2KG87FUUFvNsSA7OVVat*Q z4%Lmz2W4hunW|4RF5>krUFl&)y0Rs&OW2DK`}uV8jW+J~=Iv|Mp|-x%G#VNBxGCXe zBXv_=G|YDqyhetvEVTR6b@;#cyh;Z3hH|I5_Co$+@t3Tk^aW9;$fJs^Y}G|Ipe?hJ zRZzzvB6Qq^aiN=7ySXZCpRW59r0#%x=nPdO*c-vMjnsRm|6f+Q6ZTRka6*CxVSUhd2R*dF|s zw*}IEMJLZRrEHpSd-^q2`NLKW2Sq-fHnkb;j!@IR{-KWHTxsGSZ z1c_gNQv5)g9}Dwp{IX%;P4_?1qOY+uxE-ds*1KM$hCs8J(O@12%~~*U!cd~O(t*O^ z&dpc;&!PYgtp0D=^=|3+q%LTIBSEFx0~c#3GXs1bA0@%{XaF$azRyL$o>h9)@U^O9Bg1tRTW*Dm5=uiQMg*g_%*(4^JG0dA}f~wh6N;6hUIDh zP$d^&it9JQ#x!cy4^nPjVNn^W!%U$A9uzq37~SHugsj?d#r$M`2ERJ=u8We-`&3kN z7UqGH_15-y8Fc2(l~6x%MD~GLG#B|X>-!P=(s0oLa#syBu7*)b!}vtiyB%2G3m5^ZNB zI`-U9B`x7!%Clf4DUf=XlS8dIgk@b6s=mV3vNA1xbLQ94@7UOfb#ZD(n{zj8CMN_@ zHGWndu=4Cf$6Zv1?N-~Juxh&Uj&o)Db?~kzdFJ?TxXxH=XgSL&2^sHntn0_btZfj< z41Ys}7AzxaIp8ySBr=R_uoL!{DCh19G3_b{9mhu7sjNAb9wqF7^+mUeQK1@+WNYBY zqfLx-Go_0|XfBQ7x9m-JVYJyd%sFGaU>;OE5q_<|M@*1za)u>>!Z^}ULr4kKtX=6? zq024GOND6$mR9;#Tgf-Zn|Vpf%h(YvUG@ooXeS9!v3qA-*}nF!1A%oZrK@vG2`ay| z^iivIxAOe9Oa%Ml}NtckVU?4yhjC!TZ0Z50Y$zT-{nViY$#(u!T@k zt!BFIpFl1}O9DUFd{U>QC?%~FzQGQo^=tWYvTm5r5b`x2JL5qt|3?rGeYI@PynHF5 zOTt9RQsuMs8|CcK&7~P1er#KRVZ|e-z7t~FbO4FU1HZafP7qMxG*G?;s)mNxp(U44 z*?Z}RTHK7HHF1gFgqPzLMo(@S7v-5A-DxA=Y4a94WKs<%VfpU*Z31ETKDJTs0A$vR z4wMWd(dG;@y(>%uugBGZISY0tbpRTnXL^(R&1(wtKh#|PwC_qIBNH*KsPC$CaJ0lp@a?@m$(YYQ4Bhm zw_4vx+C9`^FP`mrtuhNQhsWfcb#Bdk$CtY}PPjx_X61l-L%T!4IPU#jJRew2zwtTaZmWIZ z{>Ne0->&B#@Kz0apbEfbYX4j3?IHMa?k6&;5&gTmcxq%VMYK9dnysjor=Tc(n8`mZ znW{^u0$meo&0mdF8l9gRigZ{kpyvy3TmiF;7>gR&x2e-^G!-`k3Rcl3qekN|yb1D$ z*dI~n<@US^F3jAcKsT>zl~g*U9Mvd~lxf8dw%=wa-}IWj?p)Ivo;gi5$E>cE>_>X- zM5u1IrK6ByQ2as<0cWK!9W;lmSi}FxZVMUIvj5rO6h0o5z@oi4DqO~IS~;{O`Y#EB zOUrdQ7A9N=%B99P;rcWruqlKab&+oDjZvzK^>ncxl<;P;uQSdm;s|OQ!n8Aq+0azp z5=`RwF&0MWT;+Gh@Ud%v!54&%R6E(bsBjy!Su&UQVmfI%I_&`f1Enj|U8Lk9rIe$6 z571QxOsnnAW0f=Q`B9lq>Fx)@QNCj%@P2mBJy})haqni(GK^1oSqo;UCH3<1T0d6B z>P4M)QybkHGDA^`FhOaW=u&Yo_j&by3owAUN4pSMK|&~uSWzrB(w}aP9R;dHYs@{u zQMKmq3mMgJs|DuZ#pJFN>xR2;&-8NCTlHcc-Hu-N(f?FHYQ*_cc9ghk1~ECnsf5fn zP^~iL3u?hw@_&$zqx8_f?Wi}!ydV}8V*SddA5=^UD`Tf8yBP4tqh${N80%|XgBtZIGI-2UH^WSu5 zwKp=VK3}V#8t#)RjuxR5T13N$K3f3{z>fbG6T?JbsA#I$T=ew^F0Bvgac1l#5fHwe z|LDkpl=RHDGtENRvakpuS0to z@E*Y+9OLx-o^VDOzMwBBJ10Zmur06;ckn{T83-=x5Ep<(u|La)Ww=}ola!G_uBBAK=mW5C{rvUhgR!g*!b-n6068j77AUp0cy?SEsd_;NCTUEa~J)yonfh zOZyVhqAPN@zzP={Y_e{b7<=e>X{5HWlh&hU;@QoR0p50HbCH2^qyj|U1{OmkVC%I| z{K#MmX;ptpv6#K~r~kdmRp5`vP1cac_`s7wLoc#W@0gTn>r_k0_yH*&KsODk_JqHS z?oNmC&RrDlcN=`nqk!JWD(~UBC$B+AKWb)H?CSD;VZ}%TAB*~S;ry-es0fiu_=8*L zIYgy|Sa0{++a03f5pQh#_*gxflxfutKMt2(A6UxYoJ&94t`OE7!9Csnw7;zyB!+P9 zx#iOBaJf=-io+H8s+PA>=A#FW55MyrVV~+Eie*8^Cd;~wF7a# zZaz97nWAMwX#`zZdl!B`^a^hHad7O!N3ugN`&&&5&XW#w6R0%WuPRg)+x5|5?6)oS zP$)@VT@||avf(xn6CHe%a!+TeX&dAo%DJ%-n;@$OI9ln6Rb7%is$Wx$BRhWA3m>M4sb`5 zvh%f_08$2c48BeIW>7gI$;Ek^colYavAvap>GY&&i@qIVz0^{VR|F^kliRE#lONM( zJ~}2Fr_Ge>tW0YJ+-hF+H|>XoI&Z5D6FS7p95~9B++G7?olV4p%+gD|gj1m2uk-4# zuIq(S@o8&WPe0C0a_Ly#)v17B1C8YO@Xj^EDqJXeiBR@E(o(oioG`5C&zl@m(&FjP zYwmli3^w&b)uvRxd(9{QRRK30hryE^MRM*M-B)7tIcI}gKy#dmPN|>hs(V%o!UopL z*42NM-t)`K?b#MMSg@_`47?U-4}Dc- zl(Qu&x%!pF2dCbiqcw8`?|3g^KtDgA(EQ9kaIvJcf28OWOlRdNlqc4$(}uuMrN&&> zLv|j2I>|>(3q#fO;u%!I79~B~vGAuvllMgeb;3`VUsb=erNpX6mz2TqT_C7D>jP=~ zt12yYH@lSw@5#Q3ktyf~he~ZG;$4OjByt%#_)47bwQLGrfSsYmd5#T$Eqy zHWCkUJ67G|)~|LVa!lnCUefCbqtv`2W^5#uEjjySD7!wh1s$K7lS9#CWc*cqm!z~8 zFx=tPCZoba-~0d4-9k2aS+_rm7&ca`1lwXr=CcM1y#=S5&?1vls$a>CdwNSu*^z^G zyEo@lZ2NhVp|G|p_(1=wL{r@yVBwmpdwNA+;@6<<*R5AtWx8f9%GEW3UrN-4`5HM$ z;W~59ILD&{U|D9z*bpAaYFuvpVcpA%6Nc?Xw{o>&(@hVp?WB|n*|_}1kvEbn$@O|( zx-O?d;(PO~`_Y1x&n+-x*Ey~)=$xep*_%i4*YV3mr=BzO8((XElPQ}SY{IltVMd@U zRrpF7x;f^%-D?T*nDz|(#K**(zg#c$)6vypguFw7WHW8 zAiC`|DHx13-%nK6U?Z3dq6$#$th3OGng_ugW0zw&P^uw@A8`*QDzEV`jxdhFI@?b_ zX@xkp6QHltr?H&KRrfdLcewEi{ad7aBTLN2rfg?#FI&sR#DtD!OGhWAY#=E;as3Gd z%2gYPqo<&u=`ZQ`g?^QtpZ}At|7fznYdPm|rBx>+G<3fjrB;%y-QV->YWJ?|)9Lkf z{O{i`wYY>_nQ1MMN?GlN8cPb!IxnojM#p}CM1+`_7>Q4xqPT#87&H|XAEG#0qU{Gs zNJ!AJv7Hx@aavV>F9{=}pu|s?Xb{yJY_>_Pijt0k{RoR&pp@bLiCF<3gh4x;P$571 z;~I)0N(Z*~3%a?Ru`y11wM5==wN#QvO(ltx3Uv{SIDfMtll8*_BMsjW9f_cB^jXr` zt}%b}0WUmnMm4by@OnUx9J4NevFMcV7$zUC)j079~(X6hlf z`ICMc?uo_MuXR5sz6t1JnJedcY4xure%aa+pTg*1tmy{%xG^Too||!1ABP#+s#-2w zSAzp>EylLzQR0S1w6=3*BXCMkkn6~qhv9cg5VM z7A%yHyK*j7{ekvw?Sf6E?xI~fjjnoJBg4u|o?fZA7cxlfWA>)^0#=?&mJ$Fy?KjFwXGLmWl$-4I%>&O=Uw)|HjNve!#7J`&n3@e$ zlQE&-r&1@cdp4v1J$-GU6V9VuD!LJt#o)IbA@KOVcEzg^Ev@BdE4e3G6l0f*h2=A$ z*Ka-7({5~|c0X8VVAN)9B0+?owz5GkV5dYFdP%2W;j`XycN8t~uq(fbB@$EC{tlh9 z0)e#-Y@&H*I>=r(n>f0k#NGNk|Fyt+$0nu`4tA3HR4nD9obgs(A@Qa)-4JoVP_f8s zuDWC2pjR9bHu4U1J>sD6TwQ7Yj_N-ez!&~M^Pbjnml zx=pe4z!bGL-vUnY)Vbb!s7b$a8N=618eOzPyiI_5Sk%UEO`DQsjmNUu0WX9Ft0yi-tIfO+n%?a0@7kDvnR+B|Mu7VVb& zf&=7jF7y*QpuwGB)pfd=Jd6T+tW%vwWn2n zw}iP$H?|+)V)d}*P5uEu@0^FXMgPkX7mfk-_98qQX)LHSzXa@jvMOXS6_}Ckx|Jv_ zRCa`Y3PfD2sWnqD`XTVFZ$x6arSM4+hHec`Y# z=rps~mb*pCb{s1qdqzm1O)y%0p`6YIbHS9k+S+>GYzq+3P;7UJ?@4Oj6~FRVu41vG zcz}}C4o1{Mtv~TKeKl4jYOtR{xW3a)VDcjv35TJ=jO5vVBt(~>qIXC!-)pP+VYqY7 z{WAJ}@E3ew1ZX_|4c-&-`B$63Q(>DfDp6RTAScwj`>7#n2hN!GrnNW>N8QW78^OHu z&ynax?Jwe74;`y{w10Hh;(@WkpuhiCWyIb!&650*Y}9`B6o=p@e*QZ%mp{1jFjoSQ zAY^yFLuGcIr9Y=oZi@l}uOGU^SY6ALwoPRZy}0zIwaSfp>uMNBV5%$Z@!jP|8RJ3AQal%5PX$taB>Q(Rb9?T`wM zWM)-Pm6o#FG(T-M1!-5)y(TESo+1mT%~rdP<*muuX-XlJ}Qb77Z*1dqf6+k`Da>^X|+IPfxv=LujvE4k5yMVrM~B6AvLl(YKsA2<=GNVDDIU*2QU&y`#wNH2jVq;6eB3^I3Xf?(ZBwtCjo*ek1SbHkJ;)RL3*qevGfmzHt#SJ+ zd^jVl;G3;?#b>0|bed5;Von@b-ww0fl4o5b@dQvv^*kwotSK39$$(5 z+E_(*^~|^d42YD$F8!FX(n8BV-m3DaW2l8Pf(4T`YrqX zo)|=ef!8xC7_6qyw9~JSN&t8Q zsc(H_@wMTMHi_Z@LD`;A#T96on;Ff>F8Mf3xY$PaG*fm}UNjkE=rA;{mZQB?d_`2J z7J?WSixZ~t>hyK`^j9xGvli$C;AZA(-ShECl0zpSxTTD-VMVYO6YrE1EjuGFU+ zr7y-|(@x(4S+NYvOcdEo=1|tCkG^fM229A(b4g7IJV!Y`1U;1p-ljExt}w06|KpJ^uOw@-gpJ z9a67-DkDZ3u1>mlG)TgzIy6AxbKt#Vk5lVpf8luzKP=yCtFi`*^yV$ha09Q^rIwhhY!vJGDBvqCwd^Z-{)Yk~I#% zq8pYsDCY_6{$qG86k2sbzd%*pJ6Y>Q$H&(Qi;TxCkQp5q8k)Mc*?ZHkr>t`LtqAmi zkx@P*B!rxXrZ9IU^BREXK0bx%8b#>6 zh3FUK6B9$tL3b@R)b#YSq-118Y}yx!QwioQ$oE+9xE&<)_4Ua=d?;s^RRXd+`Hhkh5z-bcjR3=Zu~~|8A|&EC!BK zKT2Wxebj4|^Opv7ZvuYD1>R&KF9u_6C)bn8>Eh0qB23f=z>4j9WFB1c_UaOJzWFk* z7%E`!rT{B!2bysSbzp>rgor^pZ2!~eT;N?d()@MnewW#??{`1|hVd=@<9J{Y-RJ#R zc?>6mvbk&qNbuR_yvfix(zkE0;#%{sW6uMJe_SaxU$}PP2_#hbm^Svv`zHV=nELjM zQAb>W79#7_vFPrk8JNFl>cI##O~bgvO_RQh-+td7IM!5eF&W*XRlg9-{cf@rteQEZ z6YA_i{DG$tr}&eeL$KhZko3`J@5cbHx{ZS*3W0@bQ-SBVQWvQ%` z8)Ndo@xZji2;Q?GTh%(-jgJ%8<$9fli*>ek8(|z$Pmd4H;aQZ4K?H$J57(_vv0YuF z=SOC%ZNAKU9WeFBEqC9g+d~-|B#6JuBr=N~wLW>(ZZYK*7t@icsGZx<+Fa-YyuKbwceKn34k48DhlQ0#%7J*s zc&$dKL_34taJK7vE$1bQv9Ynut+hcPm#cAU#zdST^O;h%nHL+~)jc>iPfokz-JtFR zRp-Hif&#lfiueR>vX9??n528wXlnfAwEGp*bUCfDb=`JL-)1ciP^Zm<%8|HNgf4Bs z+XG^Y5`)bzJCGQE@*FoAcqN}KyN-f|n7@pZyPmYak|o^w?y%%IlX&W4-cYkr50E;g z6TS}458~wa-}J&Io)>&bZLobzp!#rN4(?hK3}E#n@HUrikV4Qo7I<10HBM%bBSQG2 z&A9iy2x}<(7o+07)xuX+al{u>tU^IF3TVYjI+HN33UksCb(ATZWc(7~2H2(5zB+0c zP<<~_(*eiq^-7^~ofhnLf8?Od-&>Ow?i#Gk=$?$egS*ZJh-9n9L#cKc)yd zPkq6&L1<+^HOD;1k_K8|m2E{3xQgEvx`BQ3*UCMWFkMGK;;ymc z#@h>kd*PE63r?QL>E67`$YIDN?20bNa8zPK{;Wvnp+CrFxFPKhRfxo?J*l5XnVm35^Xc)rRW|nDY~N_9!I9bU zC+djt!N!7h>rxOFRk{a6&`_kKS}3JCB&t%j3+ry>=}}ctvFzr0*}aVS;bKC21e6H; zKNeE_(y-XJjJo)dQX#L`&rWA&FUF# za~-N}E^{JbQWydF(qW|~rsI{?mGIxfK+IQpQ zXs17{Hsvbx%l>4EZ@|$;U%X>vEW>jC&d?;G)4Z8Iu{SEg2AE8udCX`P`m_T3`ncG@ zO?_zc5N_^=VJXt~v=FP@n{rM zdl8OLMZn5nt7z*BVy&Xz#Kz#k2@Og?Kt;Xm4jx&SeKZ0LC%Y0Fm@Q)>B*OJ3Dn zKzXUkyJ=l$&fX<8j?q20{d{GT#5)Q&B)i*DP^k5wRFst!fCqGmRD;^@$0Jnh>*)i! z7acdW6<#`s%6_xx?}&XE{~BSm@#xw!M9ItMB8_^`|DMpDzsnDzvUW(L6 zzJcf6n?do--E8{EZpKA;#QfKRkn?RyYb_H?#J}5lX2ReKz^N7HznNl`10_upBwL>C z7cdU!4zxtx_Z5kViq3Pz5*!9c70zEr6i;qY3Y*gE(z&Q>F#b2Bt=R( zl-i}cOG+A~J9lZM8~*Qp=X~dX;y1%M>*x&cKF^)kecjhP&Uw6Z?vKUv`}XqCwuef4 zq0@91FpQdT`19uq+;+C<9tU$KVN8v#lKg;q+Jif|Y-a`-D_SX|@tD8X+bnIx~;)K}oD<%d=h!H`p&)Fp*O%yy?1ldudV!eV-iL zpOK5G6Edy`Vn4b`r1kL^66#bm zRO4?g0FEV|%J*Ttbmd%M)y9d1d}^zbB!m$dkZha?kxgE121zkY5?W@O967t@1`6LD za8dramTgo4h=yhP4(okK~Mm(;~(i`>ek$(syMKwVNT#ZVbUz)+r=I z=?C2Nmjpu!+NE6~uVzv)U76X*OmX^53ijff0ABNE?{yb0atA7=JJ5C7^j!jB2_=k$ zI?|`{SSF6dIu3k?m;$auuWfH>RN8sofs2_)(QFod{CdfW(i{7H?>0-X&L+8u?qvDX zq9V06dAAGpZU`z%W%x?-r8&_SITo)H5(!U;;e~gxtx_FrX!LO88o?Qdx!Y`X40vF# zq3dfqbxuVIbSZZ9OnD_utE}| zrHob`A5(QB+rWk|1|P*@D1rZw1Z(ywlW-9izMGPw6U&eP#0*?GC3iQ{t><4Qa@mjc zYBD?RrCZp63u5;npuSV^3wZ|Jg|96P$(}|G zn?)xb%w{$5M`RSV3;~PktJzUeQpS9F={&@>T-@;1aN z!pj>olETV|7D*ZyV{pzpC8MO~YKCO|lp*u(Au&Y-t2iP%n3UZ?rnQXfmhluS0Li5ov!!|D`* zU`Y{KnnFW;8*Yq`gR{j%SveTCudbaD!YXMxMv$rz+XPT0X36rQQMionJ21bP#P0K< z-?w*WF=O>?MCo6r!y7(|zNjvQ_b7@hNrEJ6ei#tYvLlpiXNpG|v~^Ru7(O{WS` z9DNa`!m?r$JAN?dJU5pQQZ9^|o(q2LlAa1BE;zE6zI_+)S^4FB3{h}o>X&A5Ec7xq zma0mImVA6iuHO77;hux+QmH;zYajxu5O%4fy3h>;2q!FjOrbDiI5 zlHq;74Ag`~S7Db=!m76O;}^L?kNX`7#n7bHUgEVZS8c;nw?*3?3y6hf#xeN6afogS-c8 z4B<5*>rTQO$XX*UvUtKa2pi9<^4_)Yv}PfM?b&QxrIR>_RSW~qT*&wG9o}b+rYOlJ zmP)Zx3wkZ15VG4)fpMNyKkOFiopV}#ajW395QNz@%^3c~5545AL!r~8OuQz+=ExG+ z(*ch+mafZyxeoCue3Rg-3`-_k2+2j87}SHe)E--JmKd%z{p}rP=hWiePwMGq+r1Pz z(}uXuGX$Do&`;*oOl;3Qcsk2@`eti#@qAyPQCTX4hNMTSsM9?8=Vd5{aHSclxKk(r z};@lIRBi`*!UU(v#)hi%)gm zEkiEI^da4C^Z@xS!l(UqRD)*crkZuk`}7+EQ7DY!9m~#lDF>SnDrUjv(YLsedtk2{ zTRZhSi)s0y>Ogsw(pyT0EB(&^3;sauI>tgd{%;qIf#x2^kI{WliBcgZ{?n9vjaMWD zB2qxtsI#GWnFvfD8cZ=uVZhXzfb8TqDzQhx2BTzNmSv_qdWY)u_OfCaGj7QALyCfJ z!>u^J+L-dJWG9v94tkfW-TZJA7*$^>+{#z(k`F50q);qa9SJ zwbgdD0>;D$ODs2NSvb08z&>Bgci8CpY`EysXBDb9SkZnvXxkLU0f5$oi|`W}$r&{( zHDkhzf%lu_ikkk&K2E4eADn@vr!Sm#NnbN5vs{4UK<5ax>=Xb{nYAlLX%*8yHiv{$ zEh+iXHVW4+VRYM+Ddz}hOECz2ik5D)@8Kj^;xq)>s^pX_4=hb1%+xQ&-YC(+mLUU{ zu|sFs?ooD_#T0v*)bd@o!&rBZr>PxtSD&Y~G&a7~up&iUTU%q1kl1N>F7j{Id0)Ek zmo*UKR~0`~&>p+?8vXRfty>X{+dqcYTFUVcTPbfj!a~ZGb;9-m8cLm!l*Q3(p+)Wa z4gR(4YJ_Q*7eXAEdauRVv3g8xSHFMbbJIolMe%9B|EG~So_!=ke)kSooIz8}e1HuA zgD?QH`d9?pJPyF!{*D5dA)mnouUL8U!8h!vYL}ubA^+dyG1jhol*=P1&xFoj{p9 z*ABr_l&UU$9{%zvf-2Vdm*)*TfUEjF$d z3q5F;X?0B>Zu?d663y_gec34p&C ztPgEEDjM$sS_v9TV{J_HuGT^^7OG~m10TJcfP$ZKuY2pK4~95~s(A<*(_?=A624HH z{4Er~<|Ydj(|Yg2kg*ZO)fBPxX}X!;VoB0Rs^QW!3WQB#s{)3GdvQ1%F<0YtVw%Qf zo6`DtyAQ!Jb<;Vk9Gn6CSlDVF93$IG<~J(Z)t9xW)^8om*w{L^P)u-_z17u zkfKo+`7qOS#T!B-c5r4k+4f5eHDVc2DCC${(G|fcW`kMPW%KpVS8~?71P~TkraxC$ZUpZ~Oa{$zeQTy~#)_XX zHnP17fk^m9Ukp>?6vt^j4!fKp!O(jf7h7VKWNHmVQcQbW=L9WFW8L)i1|uZ)u9(u6 zyHQWFRbR8I$S)78GDw_)M8$*K**U`JqMaLpn2X_Dy!h>N#z+~8#>GS}Y zbOs^p$pT|fcZ7jGz;@_Jg3p&e-q>9v?x~*5i5yT|#_W5CXHGnM#4h4#J17;{z-gD_ z`D9=Wr;_t@&?44g4JM0yTAd&mF$tddT0Ubs`cA%=q46^8S_@b{1WsADeBrLZuK@3J z6GNdcy?XCEYHj+I=BP5e2Ij$_kvXzdc{eM8#fuU>3L)9|i*Jva+SQ~cs|kpB$?>LC z3&FNya5iWIe`g}aa8WZP;N?w`jSjNOO}}ahHhY`Rk+lNt%;(Z25`)1)(u>e5&`|7dvP9LtvB`}aTCcT@(YKcNL2qkTYY(@qf z>Yc8vbU3f(OcqHCE`irZmREAl9N>&sF~`h3xRDO+EC5+Xh-Sb1@^!|Y@Elvb;?9HvI zUP3N3`Z)7UxcL3}nUqo7u5&5^QS7r9ZPF5BaWIxp2jjd;r8tvWjNpl12K500xq403$xqLRN1 zWyp|bUQ5>bi5PIy-^3G|$M21A+N#-0m}cb6f1heiynDx{dRv!4op`IyT9shH$mp;T z5V$K;z1u0+;IiqXUp{O1NskQUpT*{yB;7bc2${YeWjp>PBDvoi0txfoVa~Cn<95bq z?1LNecHgTb+7x{Np;Kb)Qq15+UJBAo0$dCyXh1zJ7jeI8>*JM_&L}s9pH9_MYu$4& zV-G?NmMN&qe0{Ae)O_Fzg+=tOUV}rOK@j(++=vrC18!aC(>LPccEObh!AO)c2H2(V zOv;dKh5Ve#1hK=Ik@T~7Q;h;%Ydcc%ZEii>&UU2mta$z0a$ITEfngDsWg>Lx0O3b3 zgcE@)GLuUVvxj0_w}d4mTg@)?P5g0H1@_WvHyR68SR0E$v-;eNzGu#_e`VxC!N>aU z2tq~|8tWguLlB6*20e~*{)6z7X^e1m^Dz=NrzzvnJtQ{s=RXufUaMTimb17g| zygd`JdLV0WxpvT!Hk7X;QXb-4OK;dln;jA{fckh{dNp{IUX~v2qXuj)BvcP4>wWo9 zR==;h1GRT-iQQhN33S@V`7vAYi(wxR588tM!_n2+PoSZ_>_%_#PK0#V=V;Nod`wel zU0$%dYS5z$9<|}ALcb;O;qu-mCv{o`rb7k$8UKR7(sr(TJ;6!tUuE>Y+_CLKv!z@& zEeQ-RKOFXU--UW8l_?$g5H*Y)W;TQh``^xOYmB=x=7$9F6^RGwwL^pZ9;z2r#JbPi zlKVZEoFfd3mIF0)4Vfnosu_Bc8Nn&0kk;mB%$}oZd6X96hkn3(;j*c-Q#2Lj2MaD$ z=JcU|T}bVajUccR&wuWLI1ylbYkX@I#}%A4uy&Z@qb;~cs&G&g6R33?s3T^&^0 zq**9|&GWJ%Zl+{Zt40bOObi&D7W}O4O?oCxNw_aDJRuj$B+ul*plMX9@bCW zjM@@1{w7A;C#eS-l$zy~W-s?_jytYhg)-yAdi72>TLmeWyQQvB#O2ZH^IQ-+9SzOA zzhW9kZ<#lVUWW+L5b*t>V!+goQ9xjkvt`h1DMORo#$LzT8@J=Inqj9Y`C{91)SKMv z?(p2)$RTkJ4*4AxT$yv+Q-SK5+?dbjp`3)9XErwsWYPc+2)v?RD-FrV<#7ORHzw%VWSiuHf zIGn9D1`508h2`X=`reD5g<1xz3`2M7m9C@nO;6{u&_+np5S6kcD{5|fjzt?)5#9v#z5<3QlQw>JxIY0@Nd^-6}lLbk02KA;7>-_O% zEO5c>&n5f{-{&S*=z@70vX8T~GsI7Wk~-|I?Mn7xaq@TnBLHdg@Gx>PT25Xt9|OQG z7btTV2$C46tH$c{8)(j= zxzGzCqX1~;RdB;9w$Y`=yq|X*0#OtYX+Kep1i*q?Cn7?oDOuT>-?Jo`?pS6e!VGo5 zCM^!(y8UQcd2QwcS9)S|h8y$TW88k;SYNX37`@H1fa|yYT|UbeVQr5bTfI|O29lX0 zDLL~{+qYuJDi8;%@rTX3A$^QdBE8+#uC?SGM<;V=kp}m<3-lDV#=w!KwM~D~sPq90 zZtS^ZIC6<)3;ieHs>B9AihqY@o`-nSR~^ibOf#f*baa#ijtO0PF&dV&Vu;+2ll`=BSk|=8 zf(;y4qJ37ddgyHR_iTQfBQs(eaiM>2U?@+Iv{Rx0(ANmQwf4^b-;`Kcvp*^Z5!zsl zp@#@I?R%&70Px#9Wd(Ia#8`@5%~}vUK(%NCCS{krN+J(#4IV-mQ_NrkkL|(a>>@@% zX`SejW)ycZgz~pp9t+kUI?7^WsjE#eAm|8s(ofFHLjqi2xw8d;0s-O+vqi6xkY~=__kms$4Mdm~WA}`L{gglovrVw?j}BZIv|a$saawW*2%K!JIEz zhFJwxSfQim1>_aZG?wZ|kNploFx4N}j}tr=;9(fwGR0&1zH@47W?vin?9%8-)))@( zq~u0a@hv+%u0B2FXubY;c`$z&?GrPfkz2(6{E!1z=j|gf@o5R%N$J_(yaZ%q>v%-*8p~62qilO%J%}hYh@5|# z74IJ_cb(2kYj-0R16LV_?B6pdpYVWpe`3^?>K9>JPf0WhWcMMFiFJo7rMv=pV`ZjyTt*nopA zowd9#uJ<0zT_rBx`IjzZ0G zp3k@Yw-lRaSTS)lxO&wURh3B^+@#${Mj|xXI$VdiVQML~#%X2$fYY>>_oBvts z%GcD%XT39r45wMNGPFiUeb|^E-LL3yFk-~5m6Sr^d%8a>2z+|{v*riwJh@{|KbJbYiuy+`ZwB;? zeMD*FO=dI#Hc|D?8s%6*PU#dT9aHjI3z=RG@5dXcqK5k_J&YWY>3h^P+3xS#^^lLJ z`zhXiq)WHA8l?rEbGSF{okMXcax=ts__B8vhXlB_sz|rV<4mbVm)qMF4rbdhFl^5LXNR= zq8-}*6dj>^2B!y}slVed+-9E|PLr$EF5V#eS$|>+x^S6b~jNzmq48#wNP@LuGT59hylTcJR+Siqz@@IyjV{F8c1|YIeq$cS{)qVDqx^Fb z8foUg!pv|54H(#(n4)~HJp#wDm`4SnsK*o)^jX4=o5&4i7zcif*2jsC7% z==a}pOSYxa*9Q%DtQ=mup&^md$zxVrHG8f-}Mj zQXTzAgYA@w4X6ahWO-&9F`)w!-CQx`kN7{DgjHuVlumYq-RbyTx4)M!g~Xure>Frshp+!>Bbv2&ilLt+njez{6RK2(t~dd}N10(Qa&`I)bYZz! z02`ul%5*aBqA11KLnND0*8o`CGQy;zxYl;P#ETw#Fa1>fBKJ_5iNTp6dq z+I*9%r7y{h*PE$MnjKWuR+Yo@%#+J0l$uE&ps)pag5$5l=z6}nMcH7sezBR<0y<0Z z2tX9^X^Vfuvf@5#0BKHxQ~y&`&8{UgE>YtYiejS4JTqt@QTNJyn%cqe#ZNbY?Mi zQp5j(Y2!TMl{_<)m5p3mF;X|FV1D+JYHhL?YC}lJ@Iy{sGi2G(%_=jK^V)$BC$PEp zkF5z*PkS&^q2=Y}1^1^I-4rPC2BH-d34$_{BIpHNUpH)UTQw7+H&A z%L=GP5PBv%`RgPM$=6odjNuoyB@H70;HJpT7qbd?FB!s_8$p<~P;KlHeHWZbT#b3l zi(bCd!#;fTU!(C$5I${hCGs1~3U@$abt{XegJ<^V{2P7AoSEZ7l>S4ep?`@B2H(<* zH@n+zLaG5&wy)0o;ZpNqW?FZ=u_A0pxDloI`peW;xlb764wo9^j8K=~nNIg|@87?3 zU!%lwfLqtBq(x@o^90BY`iP(%dZNVP#<5a4Vs~aR>*o;y^)L<+_0qEi>ZN4UY@0l4 zr>k3_GV)^9T70jc2rP-E-652dt&?F1s(%B0NQ70&Qk#Qm{24rsDeeAuGsW$!C_j^T zkCt1|SnFEPr4j)-QQKn5#Nys8XMbeR`ZXpv;oik($;kG zW^p&CUm(;gjPG5p4;_=VqcT`TMQ!B6w--M<)X>0yc1TfL0O1LD?4o?WMottmhvkTMXa915iIe3xuKQ zyh)6|@0Fm3>XnDh1AviGMbuyy0KM;s4&0$si7a3JU4e^g{Lm6Y-8rxVIkgb z`{+RIZ{jna&6P|)ySeDmqM`ny?MDU{{2LPi-Wxy{q^&(B2+ZO7$imoq+`(_f#)FE3 z-g%#Xym`DkW@PYL+L~1#7jSsF^6a_yZb2%-$=Vl8)RCyHnP&!CEvS(pkwySj)6VoI zrN^6p6ttp;z{!Gz1ZH3fEchh_Oar`yTo?Mc*0KY_=@kFLh*ZPS3f2Dd=@`+qK-%2C z?up}Tt5cJ@OPD${AXDltWMp${tie+bO?L5l(zB%&OiscBba{*jO^z#dkGrBO@)3sW zf8Q=MRV6Hc-0uoMUHK{^Uo}&qm|lAL{^*MM7wcdOZi~gQZqld3;d=?gj8nD!o;SEh zpWKDD0Bz;aDzbCk%8+5^U#Q!MMn{!8dD>2*jZ3bVB)Cl94|Rc!z=Xh&mKEVXbjbmc zteb+`SB}RiWB*^=XB8`m3#HV|*gNsdmp4THL$-)}0h34~yvVOrmdSmXJq68G$Ik+| z-Xx(Ba`b3(b>E_DhHH;|v=#s@rOMPr9wD&r(nhoAbK*rz+5eXYH60ExNu!(E-Y|yA zSrG%}3s(c<=nYI)IJ^_`{A~L>?iF}{kf|lDcwGBtIxMxoK%F(%eR)u*CYJG6W=QRK z^Ly%c4Us0Nxd(XL>ILe?QGI~Du9I;c`@`Z`gRzUGVnp=WeqlK;1%LO~(KO>5yb?1s zv>2QN2@**G%srH-!3LhfJGae8YzOW(np(K%cCxDPA6ui60Tzlkqh?vFvZiqUlCUTC z#Vt&LlK3L53MULa;+eW+Uy4`uk|H;I;YvC!znkh`S|XwkSiCn|iRO>@H-*@-Idnv* zqa%vcJvp$#^QZ|*rbbIiDLzwS#0nr`WvjtAx$Y=F=9mf!0B>4ByG5+k7odf1!?AFL>MaYHz+?%i$75i=-G`r} z1}MVvJ_0kzHIYE!zZb*?RA93E?q(>X=;-9J6J@%0*L6_MYrhWf)|r&Xf}= zWPG~J){g!DtHtewfpy=Rmg`nsiO4hipD%aTCVoz9HH^HMkMD0!w5@s=IA%at(oD}g z7eKgsr+Fytn{N=><#W^))vlHA6IU@S_!+z8Zp6~b{j!sbG+Vmp&$@Qg6C;z>p-hb{ z=kVr;c(d92S-C%F#kNPAC@}hl$(~jdwS4k-jeJCa{(dX(Y)_f=>uhIK5=8R?f7-)my z04}$o7V049(8kVABrs-JZ{9C&QIGGs1AZ4yaTWZlbDM*r?dT6d@Y4oC)85k$sHZpd zqQOHHZ;~C2D*UTupK<@V_&G2*)M8PgBdx=RYai={JZR!13wI$SC27xr@Fni+bq&C3|gkeZTH@Cfp zkRxkZj!{CBEF)_1mI)Y>W641`&(+Q`Ckx-{kD$&`+}o7Kr^SZVutUbNHqaces266) zi*>X3Gwzzi|2i$4T+wZ0S*|Hyvo)&C*M97n(GwjFEj-No=O;99D`M5zz^ikXAZba- zH2}PapqErGRhm2x2@M_bPxXbhjE$Rd0;Ob|BRs1;zm-Mo>*9%Va-Y3;t0D~d(r4Rs zQv>!l27}JxQR1mvu?vw)VeQ?kO*Gh5P2s?m*xWh%v%#6q#cK)anC($E$nHqYE$Y|y)B4&@G)^+n` zzuvyGKPz@1x5*yHh1t$Oo>p{$1jgOfuCYKfH8Z0)#4tB(yS>=o1nSKhvFzxf8}D$& z2a^N0(BDPMW;3NaKDBtvy2dc@gsABjjnjVN9g|_JwpJ?|DL)RCOq}F$vj&(rq;@*# znd54^|ITDV^yAeL2wV;dnDaA*{aOw6pkPDX2R1(@6qQ^L)JpyekHEloA>bIN`>G|f>qNAJmB~|M@AfgQ?%Ho#Li36sZ$Z^?Vk1ZMPJSU$VJXU6&#?D% z5~}OGT=d9L^FPWpDZS$xg*Y^Yq|7N3)1&V z!qu(Q$~zu;wmc{TT(Mn_HS2m>&F1@iXwGKhl7pj`RoXKExt)xH#?ZDRlpd;N47m-Y z61J8}pfEpb54;S(7>z5JigQN%?{6(QzB|- z`VL~39;p@%$+G>>fbXz($=|vJ&yp5XCh9o2v%|3(c-eZZqK(Lh`P~ONO{j+YmsvHyn0Z;>;QD|u z{Jujb?+|pFb~Tx@%LDkwqhB4G)pc<2*uC&!HLTGOk9sUAxhnYSx9{i;m=7 znSDN96>(wuRPT)whaN|ht(6e!eZ$9_niZnxEV6%df65$E$uAuHXD7&U9dh74p*P?T zM)w1+?EA}J^pSf^TiRG#?@Q zR&~2&kRewmr|$XT-?X1UCLXrhR0Ghi^_y<8X_JNzY@!+ge!*5!jqM^OO2N7#3jWfUh9N= z3&b&VMz}b;MAWh>j>(Icqp+AeySPageWGG3J>2}`f%=wM(70M|aXbDk{1cfkzMUO$>I4yP7gN zYq%Ef0p2_)C@4`?O*~T|JkYOJ|LBJ(}I%U?V zfG^G^Zd{+v<7jrEta&_H`1ovT$)1DyAcI^;N@JvqGhU%(2&}sj9l-opqU*??b~jYx za%-1a-nUrMmwIu~8l#G~d5HG|gdmWo@I3(u!wQj=C-5WG;?4R2#erZwjzIil$xO5+ zWS&TWA>y{gSQ)-;6s_U|Ghd6=b(>N_uSwsYGiZZtrL`c#=9mw37Hy>$n47M4jCfzQ zZ}xFJn=_v0l})@}O9x!wm=8p_9OkJ1WrHdKu%z9vnGE}f1Io@l!4@_bOjv@N^Z5AL zM&YI!{pTz6z%doU@h4OSuTOcoZ^-pF@J%KzJrh#WtTIEI+eg%SFG9+BIXzQMXt=20 z_R&YXA=lwHOTR7desxQDmDJ~`zV_NVm;O;~%Y=`J+tVcAE3x1tZ#xdZwLjR-G|l~v zM`ZC-y=|qUH*3z*6iw;J5v$944_V$_$VND=SQdNb^g}WE_yWYcV4lx)xkID4QC&za z*xTpsk4a<6KD9w`YGUf=iW%YH2fEqx{_RKqjMcdH!)&%I=kWjezM{dWz*RDKkX8T@ z`mHfO5IX0>6GDOSTc`hqYZJ$!)ElQ?JzBxcS^ca@E_VvNtLWpvGtas{*E&*RI zz&gOGBy~mmT_aJ&BzIg1`Tk`?fVKwCN=fJtb3|a*-t$!o`iIDoHD#96`c#ho z72J`h&zL!VP99%xZb?vL_1)CqTkT~6etPadnYpV#`h0!n#=h)HoALQr@CZNdv(#jj zR6QAmT2M`~cF%3G{vU~I8wW^KO1y^ButY@$OH>h*{lK-`v;@@_-MfxK8Rl;L>|E)~ z==7#rux8!7bTyzl4gN983voIhSU1#lOXZ;Dpm}LeZ^FYp4DPFW-CrNher3stfF$_R zw8|SbzX3_|wp}r_CC{>k9SASoHjO{`2m==RG)Af#h+&Dj-_Bn4E&j%D1fldtiH6Cf zFXP%j#8u!2NkKx;lA0=rpl6@+rd9|q^5s@ebHGS+R?Qf%QQxiUg~XnJJaLP9B=ocG zv|pG3v61BwFpWF7!%~1Csd+hJI8o#z_*v}jLe`SXx3832I8}~wn9g8n=CI6V= zSCR;zmM?=>mGW5I;=jjvXr3ly1vI+basIWA@ zFLYhPofVE>0ue~4Cihh??JGA6+Tiba|k?l*q1XYoV% zFqP@Co6^8J`!G%7Th@L1-FyevieQcm2-gsHGbwRtYsF@Ihj!4%VIGg~;B@G5bKIu; zbY9chF>Z@U*)8*;g`G+Z3UDgr znItj5sT9ClY9Q4c?-S2CpJNEgWV6Y->&F-Jn963zlrD(tV*P4$oV;|It=dj$(b%59 zX#AkDr^BLJQqOl}d7!Kv9X3Uv@#34c_nbb)sZEd)TE&Ekp~19;YIkEaGZsqW$IPi@ zcVe@^NoJN~JTJR;v2~7eQ|@l$n_32D83Z*(sI3=NSM1z|5D#Z) zVx?ci$^VW31po6Wuwg?m@RR=CQ2i5Lx-A?0cY%(`Ke?BCd-==)u|V?(`7B#yCAL?= zKwWWs-im^GN7MlQzkI?R<`dFv*-9{r!0BPwg04}qfd7vknU_KcJ~(Kpg`Y0OPU#OFBA9^$j@IqGV1Y+u8iGm1yM+e z(IYxQN3PB;4ekq4S8_x0B~hGsB?H9MyWy&1`+5acugUszP%wys;x^}S>-HfP7bjTzy*rQG#T^J4)CH- z+)=EfI7;Uc)eEBcAp!uGZ4C$08Y%E_fF0(mLd> z{8|#xK@(Aa?hAMNzG*-wd5egcrjk#UrM&H+aZaBV*(A0qA3oU}di7Ygf;pm3Urn~0 z$frY@!2T6%u=6`_d=A&i7zyscjF+BW%*(4`@xEV1HtRhspxJ7{`;xegELHqVZNJb! z7myl|e8qKrmL|Sa=>J%aC>(RAX#5i;tdM97gHJIb1D~-_2s%muPy9A`(?+ zKrSG@-i^0w&FGsG?DsZ8mKol;VabUl>>yqHDwKN7^^bdSTcDYw`V@Y{bt`h=SIm2t z(m0xH$MuvWS^&3fI@3i$S0qHV;Xw_wA0_sF{M zh_WLNee{n9hUE87^yX=+P5!K?Of%bl0N`P_-Hg_Z@LSqnI}xjqzbGqZ*?Rdu51oq# z*bBIqJf{K(Rr4UDg@5Tir z>4cjl>H$61KfXWAX+V5uD0W#Ebz`&N(fiP@$=VshKnL3{2tKDu^9~laU$`XS zf_kB^&l)4ZtkS@1cu9%S6C=l$#{LBd{hPtq0jFMb*xmM5m}D>}gj4}Zvi7eeNdZ53 z%MO%`UpbZDa6(HTWei7_dl9i7uf3ck-uz$qrG)~AtF&$j(r??)-SR(Dvi$IDLM>PgJmuWloq z13l0T#cl$pz{u4fr;DFJ=cz@zTq!Y9(I7Lz{`L&d0`embs$G8F9Fc1fDrTioA+mSZ zFu6K(+F1K886<1HYaJGfwZVnD8wXx|P{`T~a@G08;3Z__zI(9s5(D38dV>=wI6HZD zZ|YR136>21Gi4Nm0G9#nrLMaIm?xL~UoK-9dt2*A2&9Y<_{py)Q}fyfy!ANFpY=3w zwFF1a>A-=i=L6Xc`d__I;M;z>7iif?hj`220ePFPd5EX@yf z1^HxSgH;PFwZLcTQ@i@RxL}X72{OkvC8JoBu^Kboot?6e5H?%mp zLH-Du)&d+^m+zA!;ct@~k>EuI@~Ivv5BHWH;1;<6$m8b7 zp{sTCi#@HvsReX-XTF~SiiOf|o<~2*)8uq$5u`_)OFrRwQpozVviz^RFT4fZojbt2 zo%6+8C%G4WW4l%>KxCqQp#}4zH5iJs+@;g2Ii4&@-SxX8f+c9!pKjV&>_JoW9(55jN|DAn=4;c5!#PHm?52;wisUAsL%;LKGMB`2r7t=}`@I zu;A6X%W3tW-6-E^1Xqa#!F&i+Da7f##d#B}Dx2cD`%3Eep#{>Qk4Mxn6cSnW)lQ5>Vfm(^{ z^7m;kfFglls3YhP?)57e>WKY?&3gApxzcnsm4B`xs4QUoM;Qp^=qjhvwz4-bv?8r>C|-K?;x zZ?A>RcSL?6Yi$30=Cjh)J*)0^r~1OjwCN*M{lXzfPsrsVD8(!H$*qw;%TdV2eZ22e zE%h?i6)JZzNVcRhsp-^?nZxkzPac3NL~jqC$AGe#T4k^4%D722Xg;=Ax!Sq1Y5&wV;mOqa7tlSkh;P z@ND--KI2>qJJHKJ8MWyncFB<}+OuX-<@xAuzcZZJOxiSTWt&vW*&$|}h_}AIIxoC~ z4c9>+Fxa6&MyEfli>5~!#e#{L0COU`cfPR!%&F;x z64?7FVBwSqy*Ox8J2OjvBHzIg_v>;T0A{7RZV>htsM;zliQDnFiq>_rU^-r(E3T1*3faaSLu8GUOs(^(4*~vrua03(Ju)SSked5WTGDxtcfIRz>hB zAw>3BY5k@~JR+0w#l;wm$d+r~`zW9WI=?K|%sf@rpRA7_{ruMT(EY<1mWQoND2ma~ zsWaYhW7LeQ?u58^iRm@R{gft0S#2va;7Xv_wR3;X^KWVMpC7r@b9G<(IJV!=0*W_J zNjHgM;obiI#-`$^oc<1(we?Sk zcPS;~{QSHZumJj}JL-@r;9oo8L8~ zC*=9N+bZ%{vVu3a=f6aChc7-xPb0S1H4ElDVBD*P%a?-`f3&3xqMt|EQZ1;DC|+T3 z@fZXZh1uZh{V*t0=O$BKm1STz!)}-(88@gCOTM|~=V_I$8#}M}jyp)YEVVTY==@4b zPRT0LFQ}~RxH56p;pAF!&BKlOe_*P)D5&apqRJtaz?cb#BIRjUoBil>PKqdqv01(Li&lAv5v9Yns46}l>aS?gms;~g3lpGz4 z4@=$pZ-f|iBD^y&o=n?I8hkMwc7+MC7;y0+gmh5+EpSv>lTD0gDX2rx-~m>C80>Bz zzXs{DHxs4KZS&}b$3N)_%c`DuK(jnI0IiQ{ z=(5M6eKS#cPc+L(*9LqWAl5}9?58afTf3Qrmw%}Z8`Hy<6S$wZ4K^1EWoMYF{`%${ArXGgaX`QPMemgvSXJeM?GH*a@#4mLCwGL{4W zTu$6n&8HDUg$>k`O8DNLP%vnTk~$)qV4Wtq(giwU+yCkDR_Lh*y z-nV_1D0|B$*(*C+WY6sE?7eP#{;s>uIiK@AbT`AB7O`F7wwCCeDdy&Kn;Q!EjXBavsHi5r&GkNd)9Xl| zZAA%2lH&Vvlk1FH@AQeJT!Si6>QL55xod1oabL=bduhP5Y4Fym-80>xJ#V+Y)YLf( z{wn_eFTY8u$IgSRcHxHSvN+?$-u}LaeLvXRY0dX$_2wIfciq(0)fJuI0QOW>ZEfxB zyDYzu5I3+1x)4Awd0*Upr6AfX&bgp~OWkgP!}9_ck+-)uIZ;v`>^+ZW-2(0U(|o)0 zIbgWx(PfiO6FUSPjIX73*67|3Vgbt-N4EZ8rZe;sowY1hl7-L4YP`+~I`RcGs~jUZ z!c4A*;?Ze1oBN`t#f+>B>qXRjqXrz4iUqn2VG?PKYj&oYToXw7_` zaqGpo-m-^df$QS~B?kO%(R=0VY*aB>w+QEBmRLVO6%KA7TqCV3+6UsvCtohOs6$l01Lz zT+*i4kuC74o3ug1dib3+1c4^8EtJ+xdz{)hq=I$d@10g1^9Cf_ zv!=E?37*WmkPbpweW|qXab*c`#gvAE=gua51>iaLYjg;!2mqINsFm^_0f=w@=21zT zQcP4>?ZW4S4mpxZr4-(}JV8Upll0QW@1{gtH>#&wSU>lSRXVAJ;xTDXd<<(fgo+>; z`fDWdyAKY}UpaSAl*N@EEp3XpW&dgZ8B~dHM%QBzeNu1c>W!;^Zmyw|X>j`%xw%}b zbffTO*&G#jx0yoL91^!s(R_q=uV~D{eCVE}g5U|`%@6vPJlVZ-HvbnEz+jhB0XQiR zF6Nt#)b|uQx*&_C%Gz;O z)9d~G&1t6t$4+ARjYcsEyma$0{@(E*&#)CC$++q`xPsMP-6Tz;=v`KB@$5N8A2F9z zBtEOcp)|jYu!R3Eoo+-$jgX*bVc2Hd)+F1r7zKxc;Y0H+yNJW%dey^;M`{B$HPzQ% z+>Wq`rF8`6!Cp?!F8+p|>|Q+IpVY3uMfW?{T>@=P3k4WvjP<~nkD$8t0gv9vZ;6sr z{GCpRyBd7JXsjmc2ikcZpB)=(=>mS-%s_S>kbv^AMQ>77?v)wO33u$X7o-b}5KGJ$4 z*Z9U+!!CRK_Lm(qm%qm#HMte8-q^M85|TGBs+Us8DCH~Cq@X$BayhCzW%}>Xtre-`YUcC6YwwMMA zY_1Ys_mc_-E7Onh2A7E7@u-YB3Av|iF&g>#UhCw-#uOxFwoEvlO~@Nl@{}s&kx*tv z%~WdCteT-~9}TLDKnc{tuIcG!KT{iT+~hUYPYaYBt;5kNFPBTQQUma8MFw6nb(NS) zlPW1akT>G7O4&8y&A#_KQia-3&_m@zvp%|r|7+p*yf-IGD;aTu3JI;LG~PD5Il0~j z?PFM#P0{xEp|a@dk?w)|Uk>RQ#7S}(?V-xo$8XwinWVN}MR)SAcgk$Rp|I|#m&0IQ zEEWW7@tbaRbs&Sx8cm~oM1D(9n^d1_gW1lS9u6+9QY0A_m5+wgzFfgV;Px1J&)_91 zvS-|s2qswppCLBRM$&0YJ{zg>;(;nNzs*4LJIMpZAQ<{SA(y)@3!^@&?5#W$4sqQJ z6W6^oB9|Uw5e6zC`dGvf^yV{>Ja%>Z=M+enx9nU~+)HbgO;2{#LIsWHyFpsY7SFSM z@7$ug8rReu-r4PZDMLPE1e7wE(oZJl2C5b=y^!PBLT#Uw-tNBsf%o9+Iw|1I9GfA} zsxP6`jT||iFR1&6?1!);*T zxYMiNbD41Z`8s*-_FicV$ch&?Y5yQ*tJ)|ZS>r%!5~%CHS)kf*W_o{X%;y{!DqgCQ z_tobXfJ87HlL&^FaSjIo=`5roYK-ZI`JhalT&&{PiMv8|XeY1Pz3AX6u`R9?G+Hqp zJ1>rrIyX;rLIdrqe(vjmSm(Oov$;URrGQ{xG-r8=&~>dF$oxC$iz4M@))d9!>o0r| ztlI+w4^>Qk2&-6f`vS#3&GGWPK@ppw%RF%XD#74EAJ@@T*?06bQ4^}j8btx4JJanp zS2rS6Ee>pl+{1IL!*KoM$>G4YK zHoF&Cbau09FI8ch2&Bq40@`6*H-ID}BI+%*p`>@#5m%F1K%U;jEv%~6ZE7nK`7_kgrxV7 zr4&Cx->QwtWV$odQu%c}D_e~{8#M?!IWd4rK5NTnJg|WnsAGplDVMH1hSo_sN_u3v z9JT6DrOPlpJW+YHKPA{(H;d(2uzvH_asPJjzKgZMcmZ0+(ZTZ?K1W)2s-cJ)w%gC6 zJLjjd(%4-QQqwllMk=<9i0%j=d~vR@Ibl6moarX3I72pI4WtJb?kG)p_XWefD&cZQ)L;d$rN@@Y6ISztE^4&utZYt;4q*byC9_ zw>&(@fZH}GSFJPrrk^QK=eb^#+tja%mcMYM(^JJ~VUn{|baojq>}%q3odgrTd! zwl7!DW(0+|CP0vGyD_h4x%cz}da}9dB-cQUWbsPPg%vQ=tr{FGt<#aFd7g{65022( zv0+((x7>X*p>-$vvmD%%SP%$G=Qw0=IYgZKBigRYvbxhMn*hMfrx

4=V2K zq@kI~b)r6vG1z5sFOtvmXNl*%;8WGc=jQrSKv2YS{&lc^Sw3RvSc*=Zj32roM`+Sc zC3m0SfV-qEh55M;UEEC!kI!VCx$7h8NlYSsayY}OW>&qu!OS2!+1C4R>QT~OU-yzK zF_lvQ&*Po$PVJ7GI^^v)6V;3Rb}i{B4_A@ALymwvjlP6>!j8OPDssb7X4tTta6{VN z1y!SXdc2GHxmSi)M#q@w?Z!XR)&HS5QWtRb?PKVv)&a+=0rq%$xp5?qr3Go!-uXs| zxUa!HR@%61x$=0?>v+?&OTiQHB9*;~4DA9){%LVllTcPZ_)Vn%bVh(NwY~P^Sd^sl1~nw zUf;X?SYuxtP8{J$PY;<`UOTk75 zTlI=0T|Zyjx&h`pQL?A4jAV=CLrCG);WfcEci{D9%?5qYu8#)zDcPFx*<`s#VEujrtkWE(p)b)Z*@60OfCElYu-(g4IiOkK z)Vah$q{J8sX$CXL1;mR%`w(;bOENKdA-NesYI z7>em8p5=DXZzx{j$0V1kELh1cfJTZ~NfdQm`BZ3A)zQ_euOW6p`VLvhC(;*B%Y^2W zn>nXu&}PS_G#_7B)3-Wv`K~R3@d#7)c&f90T#90NG19;5#%BSqn?@A{jG`qr4>nhi z*|uj`EG8&?q;>R@yVeal1JCE1vaeNS8=Q66l5e2XJOrAJ zgn@_aOVVBcwfeT~sIk=|_lHzQ5soBe6wa%~+2Z2cqzks1QD1(Oi?8Fz8Zr*ALi4JMJ*t_dFLn)fGUP4|o{`TTfJ4V+m3^l8z?!x>?3!L;9c> zr!zS8+V4>{*40&j?vy50#VmV=it#|5P@yb{<3}eA&B5ZPH^!#efNAL?5#Hn8V+*C;=2zCXyWxBV^{cN^AlVJ*`qh0I@3rj9eWCeMW#b7 z9|NBYToP_BYzem;2V7LINp%1#pcIqu_sy+1V5UUY4l8-XpSw@nu{gdsk*4{nBk$P6 z#m;%F#S8&P=OV(R%0q65%wsjNX8V(=-17;U$P>F=C1&JgcoET6i|faW-IbV(V+zFb zGF40V)#Uu%X?tJ4Gx{zgpp);_LX*RUnv&fzUfRGRtv~ze&XVRrVOP_~-&M%FPNN%w zH!EwIMRAcW!Y$v4{<-?$e#=OQ!5%ejCqs3YLFlHJRpWI*h;gN)q~zge@MR|+tM%#8 z44hwESwTeO?lvaAUARVq43Awo{R8>_c4xs16 zE;JAa56NCbSX2nwmA;bKR7k6*V~DJAE2iRVTMFg%RYFF7z|mFp_Vc+P2>G>+AT>;tKbgc*Tx_@rfZbpVIzB}GWoBlk zDGwlJaX1O-pU7_}w%~0+rvErPm{ka&kUyZieqg-3yXZq>zC-b#D#q$la{nCuDj}B@={EC=82YhH zo@I`M0W>!hz1M8KXOZDO3tn?Pdw6$-s)tkSnh%F)7Fcxzs?t}=FY`EN5h7X(JPP?=5T@KZg}a}~5* zOUk%x&=!L-qE`-k;c2ezc-NH2W;urFx=QYIET~V6-GE^m!zA%z0t^~&pqi|Mt*7+1 z*X}7tKGn9GLSFrz`D^mf%uM0uRB)v~cj`0$_Y}sj4cQ-nFouyu6EbHUhO51w~&^uiWggR=wTFg=ynnO9O6+ zXfGz|`swKr7pZdm+@B)F*bL*Z+=&ky*rP;$IRIl2p&)Y!dh0=t5oM@7*^BU|!%9Uz zB>cgqPm%}zaESflrbAdbS zG_)9$0itL#`lYN&D_krx8K-SScdsinkWG_5*_joQyIt5|hTruJ%?Y3zD~C^{N7sQr zr?l*mLJmj2l)7C~0$nZNaD`jrYjACwa_xzUB;=mk&U4O5X&?I)0*a}K2XT`1Od1l> zl$bD#$}G$YAs_(kGSpq{ZQMe}US8Bmx5>+lR+rjyT3T@pkYNFF>;qj9wx!{NFV&j? zhFc1HR^QnA**{VyYjVW;8G>$sw?Abz|3$yxQ4Js8%T2D2UYc%Q9T>o&5szo7vzLfNeNzNgtA%;HEchQ;m z*Gk{nq>o3AmJ*&ZI|7fMkEzrBS!7|(Y~IAM_n{~-8jwF$phiWru|`H(p+g#~Az67l z`Qe4OS%EIs95=%44~7XsZ#m!Uxvg|T<%De=#F|<~W$qFu-Qt$_?Bh?ao$eT2>Gpq- zUSlCoinLx2bqCgk{H~#)Xs-=W60Q&OpX~QJFgoTPJSF~GBSzs|hCVVnqbY~4=GUEc z1ec{0p7?=r3oqRDx}-3|(B-%lKg_EIK1MKhULA4gxkU^kafN-#!0*(Dopib0py@6P z&oCjDySR2k!Yx??rzp$MnRoJ8b2DDO6-=FJ;<>Qvlr%Qj)*J8i;LLPnqr}&-1-^|$ zWV-CB4Sg&23?rs=&dBC{AOI)v#VLCMj)L<-t4u^WVZr?i&yZIe&pcz7zQrwUzB$rM zuD_BzAR9Uc1mgcsHoS?T3_*^IUWIzODTjciY#Q)l-xPT z(Q))+qU}UEv`N6kcFm`BWAVD5a-;?N>FfoiR8iIj)9@J45Ji9U_MDeRzstHT=f*s@ zE>utxM>08_-HFCI--G^*bnibN92Y0sy!E?keI;Pxa-ZVR7a+ELZW!Bg`c z_^>5YUgpgi3>hX8c9r$t7}77}(KAHawH~HOE!*p;L9N$S|FFIluR>A7-U|3?K`|;NyjfS)z8$ITVSQ_@hY0u7ZcGxjEMJ`STq)bm8}gx?(we zB)hi#uiP}qD-B_B%kAzi&%hHlVyHHadssGhm_tzRAtnLhtZuFi-Vzpl=%LVMCOp*x zDsq_xCSxhX+nM)2rc|2CYertrN-TJ?I4LH2+b(uJA)0XFSf1mN?^uReER%Tatp;eX z@Xnc+P^5PWQ8_|(uN?I%@qLq4HruDJSLc$v8~rn}YjT@9b|Qjybw?FfjY@}CiS zRe4CPK(%GpS+x@AP&2bra0hV#q@MI9)+_gwTIDN5ev_ZN+dNNCqt!8z zF-^kK+9CPcGvjs3_aB0hf20y}N+2eYJK(YBgd1@-fubw&mF#*IwH)r}KTv=t48D;e zs81bVdmY!T=WiFCn92@dCZ4kl)yPDpl+B$LxCr}4MEwpCMlySpz9)I|UAK83Y2l}L zltYVm&h_gu&hBt_$T_JHv3A}WQj&|+eSyiSA}@F&a*FszrY^m&tU8{KFpO+<_@y`q zOMxG=6qdr5-@;O0q<%s%ZMs;(xO`^|AM?v8NzP62z*->9dK;c)dsy&>yY%a1fcR_G zs)~*2{ynQN(ce(R){bd?+LL{Jn@@TBInH^)3HXY1x9p3zmeYvAv-i#2(Rb9pPk%Ew z0eX9!WNZs*^9|GAu+L*`LA#q8HS8q`f??VBKS`qEE|YDT=g<#CprUGxs$#hni+0aY zW|KPb<9$k!4{48LXDzqajPhh4waRF@R3~r9dz{c+ep^iL36NbaEeo5z(l7A+2BWsSNp{{9dk_N$`3ouY!P`k1emUW&M&ZowKO~fYu*3eGC0SyWKE?E-cQ>#yOIF20 zU1)oEcjhv|480C?I|0G@e)> z!`F-b1{y;AenL50m6M?amSY9?sMHga}@f9RD zQqaBn9VFj>s*7arQ9o)nrbo{jda5`dC7AOw{;Ds-h%VxwcpYn4b0=laU1hx7a5GBv zH8Na77+AUJTQlOP#s^8%E~vQyDf#Y4<1@FD4u6-+A)3!%3LC2vjbF>14xg4keE9H$ zQTe`Anp&}ZG&|H^_h6E%qgs!`8Mr_S@YKTohP^pvmj*GDm@^V;q&u7r<<@Oh=~~fq z+0A%&Opti4JmOi@lPrJGPfpmCS47(h^q4Q#)6k3PV#G^T{elDv^u!)Hh2pfZt4JQkf#7sma%gsO!Qor3ILfou*xFja(+w41Xk333U{U#99j!ksJw~Rc605Q_ zt6VjK3o?l$=3gTb*qOdkTKYF44e+KHO_KD?E>)aYJgyANL_(Brl%lw3v^4PJXdLf$wv!h_2w1X8ZC0b~$O)6E}isX{(RA*vj~W;ccdG*|mG zfxo5e#2~!Lk~mO9UR*; z!gq66%eJt!YCOO8*H8tcq6Ws$t%6)cGs76NftxB@&v09;5Di4&jEo{%$-5OvN!LVi zBd<%-Y(h2hJBGmkpKE86y(^r&7f@YYEv#@LCOiOr(AM5%my7sZdMD^4Fx#CM0sBm;x}&Ay81KK__ad_z97xs|eSJyPg6NlM?n@`@ z{%9mhbjizbJH4)0#T)MPvcb+eFdB!ZdD&4OV?T;ah~`rtNv9u`7;y}!mWc2B7jA!h zSGuADDhjqt%@|#2li$Gdak%=zG4!OS?N4PYZ^7Ykm6kR%$e^7i2VitxPXE)g(A)Dc zCuJz0!eDLsYmsn*j}I{vM%{o$gw`O05?;!!Q6Xk+-suyoD9Ded9N~pEDaP9LGvCWa2 z|44!`n=vH;Q`Y++JORs_TC)o(OYx~q<7Y%G=z{cX*c|t4N}!TtOT?F~J_r18)4Dy5 z$)W)H5MXA77D`3*)sWjXc}vTF8mNV8FS}VQwQj;^!R!7{MYxG&EDtE z+wZUUtaE_pr3cYD9id5lXnfq`$TetjrtI**&Oze4;2)G5$K=x`K)G3CAVqFgZjNxm zCj5~aK;U9$`1BKLup+w+D(L96Pk0#dRUKue+i38j@wgb3|A$94z>4PP z=Gj{WEo^LT7T2C5M*vQ2qxq2AGm!TpBx59p&nWGIoEnt)){D;)9l#pvGZ4Q9&bE50 z&kh=j8*}O}8w$}<2MQ0#O_m3A$GyTMzMm)&xpq+Ll%`&K9xAR~#nt1`@vUR-&taPX zpOhAJg_fX(gh`_vxfO{Vs7l+v-UA zUAy(!*OqhLGh`v{jGc;}h)g&?ZAYQca_H-v+NWY0m4&Ha1(ASZOJca- zY`PIm4|EGfpOcf3xnjWiyVsLd(aXyp`GhkcNYSA}ubX+v4wDv~J$v>YsS_lX-RRl+ z!eG=;X8D18tdLvZ#Zj=iIwxTNDe6`0Do*l^zy_{lZw#LU+%3m0A`hU~@DCd(^Hqvn z!~KRmTQ;YtCNKIR)B84hIJUq9u7bj)&YW}|TCD9_e6x;CgRExQa`jTsN9ZXH&R+qK2lt>bt zFx|VXpL?Pd`_9mJ-|b~HbPkQ2!-}re6Y~Mqk7@G(c2FW7&5tsUlPh(k9rUmLM`Ddh zo|0%VJ4*#@j+>E7;%Enf{WX~Zr^Eet?ylJgOiNmuRk6_2FFbAiB}nruW`>CYNO$?2 z27%`50a?R0ojl0yW)KqC0!mk}1%fY3d0kjoD9mCET|P4i12_HSO=9SG8+I1WPLdPKCSbBdnGmfX z;&f+b)XSIl^E_o20pdkKTAV5a>X%cq7qB#ZicBocM50orAG8X*0%rk`r$@85uD(3* z)4B=PSSDuI$~s<6Y^!2!#+W&EIVZAbx3N?B`F=NCXqE2WD#SXqjDp6Auh!~bGoFC zR;^a+ZwcO?Kgw#Llqsjhpm<$T|pPCncur5GiSxbZT6ExscEveLWtUtydWp*4K zEKcXVS4Z5JNRiuZ9>ue81|pL!M=^nd@K7TX?d~>u>i0|UDl7*OX{MCXH9at$Q-av&wc*& zZmU=iv;vwRDpjGvsIFGvJ~XFaJU#tf?UXJuGONt#&@tC&P`Ths$xK4{GT545aFg;w zv}SuH@&)jPr|Uq2Je%!_ko8y~5_~bxQTDSgu%udyLkQz~%7?QN!Fo-ppZj^6xr|+( zvM?E4ELe?_nKK1eWVShbmr1wz8D8$9ipDM~ONa1S>o4ig9~XdS-M42smlsQde@^<} z)~FOEuD&IZA)44e`kO4mIl{C`3CbNyUphHs2*O>l;i;*q0-3i^LqOg3rV({3`0k_rR$PoQD*&Ecwxs_CM|k3u&kC;I|)Ad=gR`-!aC3J<9fR|sBv zA_9bIu;0D}UatQyXTg-I~#dc#fmx5`5xchHtHB=sFHtVY-Jg} z=<@5UKEfqT`Tm}9d_0q7&MBBxLp_#DR6>FUpMc=2RxPKG8guDIk}-qYK-WBh|X|@S!Ji9Eal;eT0mfE_FdB)z2=aNB){DF_V^cY4KZ_ovNqBA ziG$$Qg~dg0$Xvcdcgt6NRlQkSmZ?&bl9Ca8$kRhZLuQ5QFhe!B_u>&qoTn0oUvfz| zXCL6@Ig$YLS20S{=H!382Vj;z3!(9{U+G3iM?cza8c~XJ81|ioJ3QyB>{S5~adG!D zHyrcNpFi6=3J}za7egPLWBmojJzb8cKM3EyuPnC~WEGJeMZX#3jv$x=UodZI_n=0r z7;Fm9rBg`W8t+W=L@AO>$09c?{C74*e;r;ZeHwtb||A4^>8YFow!>cOVfU`mX^{1f2Ud0rw9e9>;T*FTXEMT;p}? zZ>q0`j*P=K_$MDqa@* zXOi=`L65;0n=l&Ho%#uJ!w86ITB?(7QQQBZeElCGo*>Ljq&&sai>PI>2SPt#)3kX5kgJX$y`~JUw zBRoAV#uDc|{G~35pMXN%uD$(}|5$R@;@~OSUanZcfUh1um(DpM2DP-ZJ>oC)|N3T_ zQIdTRpA-Kw0hI#heJFYAI>~tdhOqG0s*_WJmD+V6V9x_M^<8JpDBNQT|20qDn0exe z4r3KZ&9YX=hj$%bSp8w!``0M_-#s2P56i8=sD=r%;V`q)=-i_6E6w?@2}T5AmNc!v zQL2}4H5PN$R2Spn!7z)`6NsP)Zf^g=zkoeOn9Ry zH8|Rf7~?VfZ=)4-6;`nS6YlHqM2;|8*Z(j%{AHAWu4YsiJ^cQpgG|{e+>|^_;eUN< ztm?;Y?5`E{f7U&M7PCHZWQoKu+ZbkVd%%YDKVG=g0A845cXxH+np8RtjF7SeivTz#!=D^iTtQN98kD~u3?XKxq8)vu1VXmHoPfffq%+ga?5^k>R<_7 zf=}&y1pA-=8pZ#-VOoSXaXt{NJN9~ND#(l@d>RaUL^>2T zPs)Jm2`rq8Se1MtFC~BbZa-c+jEuVNMW0(KJ~hb^6i4B4Z5SV`@rVt#hQO~r$3_@W zy#2Rlaz^x%z-KQk`6VK#Kj6R{d{8In^7yx3H*v-c&hqh*ReYE5Bk7z~3ts;+Uzicv zWkSRhl)sGdIfC6xM&0y+lN9grZ>IL=V2+%jZbLs_#>K+HW~eAZqB?a9{_TZP`PX4= zf`?gJT@fS%Tdm<-I%{$M9S3dHMZ8Av2mWOtphjS_jt^N^MbflA!+D3oh$E$F~j*%g=A6>^yS z|NUSnwsL49scn%brAe0Of|DtG>-dd-8!mW-BvwR>f`*GF4oNR7)?{-(tAND)e|^Fy z#8}v+{Gz>>iDyK-E66m9aQnBxbw-R~s26pNUc*zTIBkyj3|bQu^KQ_P9MSj4hBJSSVLmI&yKE0kfu!F2WY^<|c2 ze{IY^rwU<0Kr)TnvpZr$g0F;{Q>U0Mwmylz)v5J!sHr<~q5Q*#oJ~%g*W%k6d1dxZ zCuE`HN+CxJh|J(deJR6_JqKQ>nG!-{n3 zmUPqeglo2I(~lGTjEp61s!0{q?B_G|)p3x(}%&du|GoqFM!Tr0`*Kzi$O8$tkuGQvf-yb9Ponb_tWUBl2 z0cyy+m7g9YbU{!tZ7Yi?%^OThN%03svU8(W8xw0Ulh=(2oY@iY?9C>+s929lLt%5{R=l>B~Q1WLXf(sk4yhbD)Cz*V` zb7B186y_*YX8idm>qq$`iIH+*3RZ7#43rzppxKW5v6o=o<3(9x^~+H7O2Q)F#~HW4sT8gdZ0gW0n_Dxk zn)r&8!2Bxe2HDT0y@MK3(v75rJ(~@D`hF{O=o|ZD)atYJO8sLO3`K@%tth+QAl)Ak#!&6&Mw!L4)s1$@f37nGprWxx z_1-ob)8WysBzSezY^FCW4oV-5pm<{GGs{UvEakKc1VCNon`uVrhH@da7TGw5PA?ep z2MDBh0m#?>fPQkGr&}~-yYR~y6eA4#jCpkQ2~MRXY=XTZZb?tfCRi5yxm5M8we5l! z%7JDGy3R3xGsW=XrIAEq0RM_2%o-T19!*s?*4OK?wG{)FJ0A8TBdfL?>XBxnZ|N6r zCK$CLj6rPm9)&UMFvQh(g_nKmmvPy6;EXUX-?~kK%4anhWD;^9=C-P+X3IaU{ws2} zb>j{Yjt-npj73Ne-vM5R@Kj01M);0W`NAY{1ttKJF8CZRzcSM<`#Q?$h)vVY+PVj< zrB}hnVuR0-=AWlKVh#?iyfSWud(_%Exy=dk(=&J9k+WrbT@c1>Ws}E$#H-_LpwH#m zp3^wgkG3BwL9XUR?>8k%NlRab?!~^Nqfe2b?$ZX4(@H}I71iPe#1R@y!0BCEv)GeRscA8O!)@~PhN;-{b$6aCb5CFT5-V~7w4`tjQ&-P3tT_)w2^FQl{*nCW4Fkr z9RNmS1Lg7V+IqhcM#=Wxd;!3r1}IHA7~TvgT(gJ)?fvj7JfyPtg9md-wtSaVW~?g- z=<7mtqZmL5nvoMt`pYgc(SU&AAanm7^#|PC%S*3x*pr%$&lJ=(U%?F4C)%HT)C<>k zr^I1r%@0-($lZd0w=~pah3=G*%=l}Mtw2~QuB~lMORF{D$c@l}nNdBn$2#LM zBtU{k4AUhuWn=I0){n^gw|Q`GgmuUAitQqH?DZJUDEUe5J3oEZp9%>cdCv3E8Xc?uV&D2ia9-zjJFZ9P10$QGbgMOWBm3wSDkZ$ImoNHEQEQRN&CeCBSR+Rsm$%xFLoPF7Snw-_f8hj>kq|WxmwBH z=9sMJ;qH0!vpVt2K!%US13|^21R`ISWC!7b9O^b{tjdyLcT+x03WWHC{|Wr|IV51Q zmjz%QJyw^k(|8qnq2~`q+|YcPp2iD^_yq7{lF)rHbyoAeG>svw1vDcRx)G13_zoe0 zAcZ6LYcJWQ*+V_$pm_VAtKtYY3*qchHZJm%kmeRS#*h;mfetz-M{FY)w>x2 zwdT$19hY))K76sfAsBj^lJtdy-t8}`g26fJGBSoQt0)s5O%Z)lD^ao{HNGVnezo9v zdNRr0WA_HDaG|*&53db_3UV4s8Qc2LYCb_IHSYk41zX;#)wj+N;#);N|B)D;Kl}3} zgzc*=m-so11!6@oW}^c>ZqT3q2&y2nd|EQ7Axv9fIN9=QC8PM1Kw#;|GsVN?!@u!v z+EJ~Ae^V8crbejQ|8j|P_JBQp@=3;kfJT|}Zc5q!XJ)=vLMT;ha#crF9Di2+piV(9 z%0Kd^Xm*F2Xc)ng{#0&<=tAPiVq!yK@G<1NalLx~q!p(`ZgHOe$0YlZJ zMKdFr=J$D~c)zRX>Q#N?Fm(28+77xnFrjF7*mBd}HM0V*@mU?-1Kq7737(F!W_?t% zDpP+PviNVd@Jyl19*~xn_ACn{ZNFqYO*5PYnS234lJzzm^xKd`+xG~}rR9vn$+DTw zs{zZoJ6(ky3va2!_bf1$Givoj0~zV4se>e^30GnKgGpt8u*3 zR2;1UT_Hu!h;RNi#Je7{Va8S$%$ZJtk9A0$K|4lF$sMVal4lCZ7sb;Ld`N26^1W=&^BfJ_ zF28wACF=R3{3omOCo==NnV!{;KB5xMQ+QR<#~Ez;Rji#IO+pc7`EqT+A~4y&gWY^Y zIPA$ore)=BfsTpI?rD?rcW2qD_& z@HTh*F$XWAI&V&Tc*99`2I8M@i1;IGk%%dPmi7HcGD>}UFLe72_2ty|b&OF_mokGK zm0u^K)E+%rILrxW_il-_hEUW9=?N_avTsfFzwBEa1a%s`PAOTMotl99%BS%VeGZj{ zL%+LBJ+99l`9_D51%-+YIuWj1K3YN5OT#hHq05+l{YLAyacKFZFvGP$qXNA=^^(v{ zW1CHlhE}{i^{4Mgo0@TSMJW{Hm(NGuU@aNqlKhEnIICc9Z*ONi?%QrdvDxrr;==9l ze#g#qyC)a_d4g9eQ9nG&pD8OJ(pWq!S%6YU>iDJWcNDI;YI}u@?d(fPkFr1VC101} zP=9{-wljY`&CfmM^tQnS_|EnzeE4YKhD^eLyLu1eW31WEvR;DMbGwxFySs8+$W8XC zhn&nxw1+ma^9TxU@9JoLDutz>D0b%U$`^ z&$K@ftoA+6Q?`5*G)|TtU>&@t=Ge>A(~_0gWy{a*R!>o7?6>yC=<`ZQz1kLcNr{FEattE1(s@ere&q5zR*-%FuxqO#f#=bqzzm(y>u zw?)#1>+H;%$A7pid0I95s%x^ zw1?!KzfipOPRT!rQ;N^;aqRA~O>|4?`RPz+^PTy(&m>CuRMKxJEWKtcYW=W(F|G&^s zA*1em$t&`qvn7f%z5_B6=WWw19`N?2i4-&(f4<;hz^cl$bCAU z`G$}UT+;~>2ApgoUr>=#NI;RN%37f*Xew^x&s^uPQ2D=wpm_!J1kn;Ul8{KRb`>d1 zQcS4$?5ePY8BA`PEVGKQbH95%S6pQe!N6;fQxR7zdF)zl%W_Ku15_Rw)_)Nj_` z&#hE}BEerm7+3E4pAOMWK|_BXZIASBm7<9=$+yg#^rquKtIKM88j5kpG@hdfc6g~x zx9RILefI5}H+&>Zsr4^r`4!}Y>b>8uXB#wL+_tLfmsSjZkxI-_*FoEG=E}F3R};OC zGEGwm71TT9mrXM{^x~%6DD{Af^p81?zKJ0p7&A#n3UU5+ru=W9e*E2@@J)nZ>SB*=I1;u@}ZJ`)=NA zdVbGycmKNc{QJJ|=kA}TPd?`Ry{_v#&*M7I^E}RrfYY}bX(nfUZs0OBGP8bEE~6yR zBZT*}rgo6MhEFy4NAUNANli)P(lGXRN-{tPggLflSSJ^sT6O$d3ZN4~^HaBM);X9D zE{u7(!PMt^vhA#D92Z+CU!q{;bi4}PVPDmmDiOgSK)C17due2ak zx%;Sizkcs#kxRte{#Ep(Fohc?c|@u#M-1jEY3(SlV9t&f)zp~0ZESy%kJp9xVyIJ7 z?s95hWg+XR%;7!LE+E}g(VjpwyB1B|&Ry&Pw}eFo1*(p+%6WudmWXgzV~=x~RE0>r z6e+^&6Q@?Ep{-DNk|j=-#CFIkdqtlAop=iWxtz_Am+s6bv`k0`mh`A-jnKwC6yN9c ztI7^?CYu5y#>4#QT4gqpzU8Jmln%u@hkLlZ1cPac2yFg%$ua*o5JX=7-~REa~bl@bG?L|0GdRDH-DYa z*~$16T=aMYl4%yf22-#^!6Nl}J(!tj_dbLA$VRTZ&u0A^0a-CZ=Uqtf6{`N{)s&oD z7U`?!-F&64xfr|Htyr&2%}~5$pDo%|$*^=SB$VX#Z>q@A@+Bp(Jg>4?C*xW)m|SH( z>oXs~Aui$+-Ux_Mn8LQGtu$?r6*>KFf@QZCw&S-lIm2Hi9$BH>`hcNMI5gVkUu-Ob zni7#5B6DLt=+7cVZtx|#`$H1vV!C9e>a@Zd=CTiGotvM7#ix8$E?%=-$JnbO@r_Ss zKPDQ)Gau$eo(ai^w-lRkjm5aFqk0-P#)xz9q4Nm2usV+fw}_rJ9G=06bWc^6 zO}2XnL`wFe?pL|^Y(@09`4z>*QewA_iFX2t!ciG?$*xa|p?pMUigcaPYyEX~@xM*x zZ)}O~5Z%7^$GA&Lbh|jp5NHxfM-yao4OS%aD_}?TZ9bGiA&lMuhaAs*Bnvegi=XRz z8u~ZC=U*-jYH}2WKQMmjaz<&qi?&eT2REK3m*qybq2h^J+if0$`!IM8NU#~Cs!*q3 zp8R-%O9epYjUxL9d6}7gXVGW=g&k&;kM-K<7DZxLiX}gq6jp7FqE=@u-qg%@49NY; zPk@J=41?a-<6TbS;Vqm#W<>YlcmTh{%ujq|GxZQ{DuVd|+J4+-j$=KJ$5 z{Qk?`+R3KWI&QlYXp?eCWlQ<7veD7CA@2$(t24sszGutfd0G|Hhp~t2|qHcAu4YBXB;0K4+ah++(6GYDf zPxAU)zuK}oh;lQE%nMuW6iNNZROq2EGKaUl0Hp5zpxC50P&9g4rf4GH`9rGE>GPRW z!QLDrzVlx>=LQSnbxkWuiTyWgnt%I~b!N941x9n~^zet*RObVst3J|Lv?nuv)zq z3Ney!V!y+FEt~SKN*R`KlRDN@Hk+GozmvLug9W~?8k&cG{`oAEu`oM(kNYA=U>RRR zp2jSE%ylk#IsUqzuc@5A3eON;hF zWGR!F$CC!M`(GEL^F7A(2gm$2i9ZvsKWx=;TRO7ca{b`Lj|qE0kE|A$_2&TRbS_7* zlC?`R7i04`{UJ!b^(#*WwxE?L---$*uT{S8J4>OL?+mwl>FVd30DWLhwc8KxWSkJL zJnmi&ux8yt7d1|>yzIqq3rR0>`Q9UUjR(40u3Kn6pr&qvTX!&A$i&$`_LV}MT`M%Vsk^119sRLWq88HY$WR_)tysQ`eRMV0d5fl;H09GhQd#+G zM*A)*MJDrcrYI2~6mIh~arnc&u?jQX;u?h)WrPKpd1aVbZ6&Y|B`!K8EJZzb*Op;f z`tmwTONi(wCb7;q)&YOXyi@!%g^gW!rpbe=O)}bDv~02TXl+>=q$d2sB7Qu#CD4aM z^^(`^48!26p&~!MbQN_-h$PJ0-07LuG-8i@Q=*Iw0J*o;PGUnK+E*^$?O;4y`fn)v zH+Bq4HTEv}?ORwt!;4_o&Fr!l+UtATNVNw6TXmZpq)V7E_2zg0S2MY!D}AhUk(${u z!OAv`BXskgI(4un4D&|te6(Asm0q8iQPH4CmwBE6pfs-(cbR`h<1d;!xTe7|GS@CG zD_E3dE)}}$x^zjkYn^2TN@AvN+?sH5$(~w>TU*~CecjFta{9P<)( zq;~p?7H|T43T&(7A`J_=81HaQhNxchba-rT>Kb|aQ-((HP;yHZSgHj00IJ(9Tvoc< z#ruz{HXe~B!u`v4&h0G0o74tIEIowD96o{zTY zJH+#&5;E-|{ekww#9P2(3#NoE0DztyFG zX7uD(8cSP#)iE9C{NlHI4v5j%BYMt*nVn5*uhFtGv2l+^Dz)ce$`6M9!ffC{-8dO2N;#T81orj8`x96t}QQOaDlab{S-OG5Pd;WK2^RzFqUl)$<7G zSHy$P$caOvIMp!A?-{LM7)?Msz3c{P(QVncTzi|H`61{&#}K^9aEol}vC3znH9+Xa zjx|JmXfbs$%>X<}j#8kLf{bVtk;KkLCK~j?>l^ghD132l$XG}pwzc&dgrdI&uR*yC zPmz>U2}<5czpR;}ZwsS2ciFCTwmw3eV}RvSff`*_>kgIkCxFo=uVz0NY5Z>v z_lHLXl?i$DSJ&xx)paO&$|V5{a^-ZQKDW1bsw~M=R~OoU=j$|Ifz&eRS>gV(Ulv1y z;^S)-(hb+E(p8*wgCb^Cr>fLe=k3SKs4MP(7x=6%*v{m04>3IJ zbzr-}Rle7&9w2|1s|V}b@$P0i%+^BQVS@)2|k$a_Wr_EM~@8j|5s zs)!fEbOzoRA~Z%>nJ7l2&6zDdmsQBmNs^spl6g}_(i~-{1Xw4o8P-+(>ez#`552J; zyt&6zk+hY~>?N<|^*wZkm>gbUe2VvVI-MezOLfu8lhi$YoAOw{2=)~%MJiBEPLgXQ z#{#cf{x3C=PN;)nWd?)1$AmW2H-@(8| z)x|*ZY|7g>zG2v>79PIKMEM0NwsDp`5{z~blUAZXg7LZvm?jm$hC=)3J|o`g7t>=Z z<6L#J+uS|4AH3Vix_2E4;ma+zg(c!MO$)|$#S_+ccU@n29dClsJ;j>_=;bbhB9_tn z*o86Im#5?FaA(Aq&GU%)^uz+h-?xxiKq*F@ZJMQ)Ljd*Ldf~^*0@5qg3{tX5k@m^0 zAwoM;Xg-|lZV1Q(!X8x@{#?7~PyD*JgYo zN8hJQ0tpWwDlLagC^DniNC@v)4J;Cc&wCpBG{4&G)N2P$9hTj^yb3a zjvVMQoM~kp%{*5pxPNjbQ}+!kT?bJY9JwXDHfH66VyM z@h;Hs({O16$x+YE@K(;agN7E%;Fe*lq0JS|?5+>i0)`JTRajk*j zk$*&)`|J|k>VuWN$1Vxyz2^%^4$J%P*O?oz6cT!LkRtTlUyCB(7W?E$jkWXc>Ra@> z#FQD3Y@%0AxGgf_o_s@pTCQJCv`%C#Fx|UgI&8IFaWXX3h@PuM6F1e(H^!O3#n1X4 z|MLVM1+jzquzQUy`ywBK@;*R7IDZ+)vAVIuEZYC7qk-&^BTQKd&k`2qYjQD)9sJQ( z{qJBGFtQh9wg~8((ees3I}ohx5dhwp9++{9f!oA)9?waY34%gw&38+0d-rBE<$lXh z4x-%L8o{IP%d;7i&Xe&~LvO1bW3hpvJA>Wlfgw8Di5=b6T6^d(3yK(OLeVL;^A_e# z)WEPX?MU*$c>Yb<=P|ZA4tFchR+e_hLPeMNdZvT{A0wf?g zxsZR^e${nYPZvH?Rj)ZMTFrGo{`99z>3au(ZyCtj|5se}*9Eo(pmF~KNT}Tz#;?wO zdC2L2OAt7vzUC^ctbP1GE?vNGqVb7AKz~Zuq(&;LEkr}g_YBS4Gx#O!sJa0K8h>F< zbiScCq38%hIse_xZ4V$e6xzAlJKOlWnIIR&(^S!7^)1UHs17(S=bOj8m>U^BbwDh_ z{BE!}qc?ia=~39&5d|0Q?hXdb*DnaTt`aY50nEwL*`3r2tCOO z!Vx8vB%^o(FlP^kweE8L`8+>=f)4Bkz*w*E%UthV<8xXx7_iQ}n$fM6vz#V>o=zgm zlyw+6Ut5oe{5|&82SuHM{!8MRJV=1~GlFsZjOl5Mw_ZlBC*MC!7nUg@oo2fU_7gVJ zWX}+6_VaiBd54DP&`~d{4x~#9&N>y}znQ%#HEDi(7X(yQgL)LHPC0D#*%k%MC-j_+ z(9$VQzWxAHiKOm?O5LrQ*YoM%R@O%a87`|kByvWU4|zV@KDzLCo}k5 zBLd8xM}Zn{b-+9v=88IXKKVkSQd%3$78{zE=YW!5m_4lrD{3_Dr(cpH$-(`nujU`i z?a^`5%5dl@*9Tw{rY<;-cp>wqd)9g66`@#6@zJ(tXRib(O~NEcbp%?{W6wi3np8AA zrEdC5qpqXiiUTYW@cA|5yBH_xYx>z-?duA%1-_VnA=I(p3%W03$sQ&&Utu~QT1Hq% z(KRqS4WfdxDebJYDyDFG{bVb&sxg*bGl~n}h->o#g=z8UW*{w+XqsQIw7|C9+J|N``rX(}VCGS=F^PHn1DWK#PE4GX5*}0#jO_%z+7xwdNAw z^<5Va-`3+373^yG#EuZ^#^S8*q!ZtFMA|80TI1B^5fNE!&LHdRHUjkGQZu4=&#l;0 z#)@QaX5z^WR~jO(UY6$uH>`$eV`p^9UXboBaF`dMyMqQCP?h;Q)L9WVT+>I5vuV#( zJ-XdhXh%Rhy)rWl%VY%*bphYZo>jkjhps&PkV(-lhnz3jni|?H%Sn8KVxO6u@|!~d zrDN8@R+_M{B3EiiBeed-4ZTn7JP{*tjBLaynS&|e8S|@cajfW`Ks#eT)T`U(V@G9f zHAPFwyXFqx&!ucVF56g?s1@q6tL94ZefS$m5nh?S5L)L^>(ft5VcuuhS#QhtB|dml z4bV}@`ng}H@#!Yiqz;0B$b%4)zKATZ)(UDTQs9)a zDA#&f`bQkbzsRgViC}8JGEOQ$-*kJlzHIY#z_Gf%Rwefc`rMsudZ54~+WPKfP+)Jw zJ=CH0k&hxNvSfUNa6L{$7UyPd1&javDIXQm6j*7gEp8~*A@6Fz36=;Mw zL2B>CgS)4SdiY1rFPvp2=mQ&$Uw!5l*Ju(&{ua`9SkG}D>COos&AggzBRY4}8rUtg z)3nDYim}48`6MXG$tJAAZZQR4=a9XIQQMW!wY5-@CZDh5Yv0v%{Fg0(-zmF+@Erlk za^X+6|H=nYD7Zl|e5gy^&gFrlRKyESrH1pq-rf&!v$BGU_y>7nXSPEf;Q6NM)!Scd z&it2eWu*65a=-oU*V(%7f#QK2Sh(p6V*CEfx9&Uy8tV26J^HEC_0Moa|Mbt&{hRdt zb9??-x}Q$|Kd0`mX7inr{{P!xW4slRJ9ZqyTvffO{}*5LkKg{eE6S|5ti85)kYh;gdiGysFou0m<&+_j|aoyT-&(>FE+aO~EIgRO+OyUV0D?hSNE zHH5+Vs44yxbXjH}-(#TjX|B`qd2OvgN?C{Hqs0ecxUxnrP>1vJDRrL$lRB~1{p0`B zLxfnGH6El-SFQtNk2soAr@&tY9bOY?!kwnni;WdZ2UP#c#{89|_;8i`&aU#>D1q)5 z+7>Ye9$E^o^S-9ewtr<)4Cp6Rv2nV+S zt?vGL`$S!IB)RT0pGM3TN5@>J{&xYptcXc6BKr>d3d{_L1$lv)=a~xXLPE?7OAMbK zw^@n|9S1mWOn;+p9V{PT%;prkwl>VK+yN5;uU{fCDB$b_ zTkp@5FAqg(h(@&K7@9qXNvHomX|3Pusd-T`*-VF~_E2na5>+7T3rzn!E z3D`w?1Mbv$Fp!*8p)#M|A$}BME)4qqVu9Lu{R_1C?pu88s`U3aDshEd5*!Fl zT?L;KfT%hVuQ1hHN?4rtly@VNix1Av&(oVEvC$7o^t!$2y0mGpU-R+tWe{hHp~c%hG8Q( z@_NZ)yUA3skA3l&zfSOR=O@CZMNN*zIJ;<;rj2ELPcLwm>X06Na1~ja@0!BdvsdXF zfW?yq^{2(eTBf%a$U0U)UwiQ&ZUV@VM*w-_j;`g;oK7{ZS$bBW;oS{C5gP8*tj1mo z8oPUKS&OQ#I{dH}KbxYmCim%GUSaaKa|g3sJI5i_I^8CAk2@fKv->&2;oKcb8E@Xy;Bl=3nkr`zwONBn zg|=Y&ypEC?R#^-4>SCn9>MDf8VH3zd;9j-Tl`HAG2b;Oowkz&L8EOq(wNj_~E5Byk zfPcwxpUM0=Ae!9OG!tXu7@&Tx=2lL2QA@!7g1}o**Uq{Y?t~x zzdoCbt=#z3*V0eh^5|C7Q<1T0R)@0CchFAgR0qbw$rrillc+KSgf3nJ@m5OJ;wsnu zZ|`N%FiA$F1Uz!|cxXu(5HOZHgB99-87MEHHB8=X2e8kE`C?7Kh3$SVgm~6q6fp$lvH<5x6ivB57joRC;r!A%h?Alm;cXV6 z4;(GabJKpeH&D4af{@M*qq)S!#%h4}^*8l&$p_iXw^Zs?4k`>K3Sj&O@Sy24X9e4v zX`o>joo;Zp-6BTB#2TbN*Z-$O+kaE##t#3_r;2}@f`dSOBE+o z6WKn{S?OLOX(KAFpiO;1_Udyb%ApI$c153$B4V=wBJgg)!opY)?!_BO_8B1cKyabJ z>$PculC*q@q?Q-z>@1^6tk+ELnuDHisl1kfIym3Bf39CgOd^no#`X3X2P9MMxXWlJ$pG^ynn|7R9X09ipXnofh(CiDq1Rf!ieNsC* z)g^a6>tYrY4hUS}hqIZ=w(A#q05xITy9y=I9^3B77)&srGy?tNxw*LwkM5(-jLoPG zIK2CmG0?loueOa&X0m@K!eK2lPZcr7)(|65)$TKWX08VmSVkq@*gG`aNg>9v!VCuC zlF6b#I3kx0C0hRF>$7^>n;?kctRoLfut4${p^O~32P78e-t9Ul;cD;WAS5CZ4Tg2p z0B!!!L^-GSw?^&#GM=Ef)5TOJh2vv(bcGFKx3Q6t5$a*yE0@k+Dr(V#TVOU6C@5@L zJPcAy@m-3iE8pT18oE#jZZNswc4;=u8Ynmv_T!vM%xg)On@XT!&{i!@l-~V{G}IAV zKYaMG>hf6eR-6A8RknXI*_G~kCzI~3`2sXUA5q(^PM}XNkd`c>&zr@6xaNU3NL?oR zZ;|_Q%*jfDt!_45dAA-RVz;@5gxa)7}2dM1u)J)m71P-;a-~j`efm=1H($s&4CTAJ@sGj?w?eE%juRo*)*qW zQ6cvMa^Og+y7owY^IkIC0@N`^dDG(%i1*0)HAR5=?5LOS11O3px|~XVJ-A(_9D6?C z#r_3s=oDQ!1q@N?au2Uu`2gv0b0DFra5-mdYXPMsIdLDkEFvyWKG~h6C$KL3Kk-i5y8=Jlx|BpeOVZv8wyPi{ zPD#c>EJOi_#`WW5r$4gkEWm!ZwgF6OFaXt^2I)YTKL((iS|GbJ4CHPJRIEf3*+ZVL z#*@oo&o}&Su*&zV2M`pVc44^ApiuuFvJFDCeZ;`SJ~~zg*Z$TX$x3~=U-TR9PHQNH zWf-fF&ZY~uCj!Bi+)tPrk1|9|@SRW%YLfjJ9i-KDSokBTb7X_aXCmJI+BZ8n0{Rl2 zD&Q(|$d1g>F-aazyM+p`-InLYO49cUSbMq&!4Y3a zWFD5-fZH_}&wwKZ8sjr8tHYP{o_DAhe*CDqZlr|GR_X&lad-@s5<`c9TGG9zfw}-b z*w(5SjNEn*Y5``;3nn8h>fk2&{npWlnJnsrsWZ@3%IlU)C+BHX2gsJ#67;L#qOale zuy4C%n{wW~c>z?%DKTl@5aZg>9!#zdQWmx~``mxj&=I->n(;#haZGAvzeLY+pt>jx8D{S=TS$H^pU2|QB z{=fX^7LYk41cL&t*Jsm_;Y$Citbtw_^O5q*w#*q%@<@w4L$fSu)J1kH(>s@_Xk!KR zY5IWHe*Zn7*Y4roMFKMQT?CT8S$h|1=KZnCt##)EJpi+I=+CT%i$^~sivCM$qghvR@~a+h7%5g~&bAYlTLT}~p>GBvcd z!|wyRb0AZ7t7Fm7&#wYB)nC?xo(6+=zXK41)UoA)0ob)WxkX?0 zt@o~G%vX=~_?6hUb@gv8N&B$O$KJ*)13+X3;Gj$?4M4USU=O{19)55+bv9W_TOmzcm)Tj*=#mKKM4EreD!?5VdC5cU6^O^_0L$p(tPxiRqQU=r-VtSK*RWI*z`Q)k}$ zAW!WkJV83`6*9qe==HmFpLWLfPfC@P=PdJ}P`;fO8DI1D+?H!?s=< zVU;~4CE3$c=2e*FyVO;2O;4xAX0-Wr@b5>Du6=HDlkE6ZiALpmO!pWqaL85!<=Jg? z_iq^hU2g0C9HP}5e>S`7?L7)#zzddz1wLC`i~#}u?T)d0C>&8-UD_;Icr}W@D_fT3 zGlz-9&6YYjvND4b6&!bM<#Iezp)87LyXeQ=6vL_d4g3bG?fIS)t}kpy+auNb zk;zUYDm}d4^bh)Cz)u&#yg70|S6376c!vK=4CgNwn?K!yvThC?F_ZsuGbB0z&9jbOuD4N|P3BfV4!w zB!&`nghXkP8X!QB79fNc5(p5u2{^y^{qA?y{oeoYWv#3{InOy~_kGSjXKxa3T9^nO zIB|fFk5B0OwJTP9eEaJ8`1Xtn{?2UyTt6hr{bB5FWOVboksjcn|N530;<&KR+4BD_ke#crt|(oVL4T-p)>lg3l##gjUd?2Gp^tjd z+Mh7=*KB%@xt?5s15&evtMtGTVhZ#byGmPVcY!=e&2P z1mp#g!l+_JJyLn92iPZmX6zgClg`sa;XiFv9IgodVUqJ4PSV|OL(+N|?gqHr!O%Qp zUfumXdYhD&``2s3PjKsEXiU+p_qxwUO{efa=yYtrFm?Z;_A8INc^a06KK&Iq5FWfH zES=#fD2viVa~b>8!{Pe<8#nk=xX*%o0((yI{l-<-X&)Y;AP?I``We z9O&WU2lMg|O8$ZDepaP2mXkMF1g@3ZH+)#*j<{3LH1haiU==6Ya%sQTS|{_Y;? zVbBM>dH4*%^tg{ukD$9J!=S!?FugFtQ#(EMxX-+1jZ-Igx&%QCPdVJUdD6%~(BmXP z{ha!_Q@{f!Po6Xgymw#E>WcBN;oM(_r@VrK9_VRkgocKyhh9+k5A@W~)YaA1ICoy- z{CPEQ4>ef0U(nq!H9wgA?j--tbHxJ&4)lHyF7J z_kTh1gZ(0lOP~htjfSTBIgS6!%^hmMYt_5y9p>SC`-(S|%QG$v;6SR< z@xP69_^*+gy8r9Y|Mu$tKGYWG5oqKO<-!aC{+GOd!Ts+qe+@Ly;F12{Sh36JomMWR zfd>pU{v$NtfvnerGh8`Jd0(-#;eK5@^aV4__`O%x{uDm4&n!-Lpjy!C{iBSzC&pjZOFGXAeNhn+ zr@6kq{3-iV&WWaX2CUEhilS%A4_(bV`>kQrH2r+mo8rUvwcUo({Vvbo;u4S6IkN2S z*>F-Fjz!S*T!@bOV!Ygt z1KCHbEyl?Y-tPKmBmDq2;c;*Z&9=(MyZ>h5siwi$46Uvj1cl zGO%Cp#Q&K>9t%Y4bN2opS+UdMgDJn@NVyc?QT+dbalfV$@Spmhbrh_4!S`rYB_ZY2 z|BTUGHtYWXZ2nig|391mn-%|mN}E6H32yd^YzM7AZj6MiK9=RMJwC@W-ZnNEPz83h z!@_7(UUogFVLUBnI$M43TJJu8S2(O%h~F_G(;*3?318vo7wtb9SGciASe0HMD3oH3 zes>FtaVmJH{U4t&dX4KN(gaTYYKeoL98F=8M%WnrzvwWDfOWw=g1#xA53g>o8D;1-XoZ=v1k9!Ltn%iiJ`(jVE=l%_`{bLWjd0N{tHEbe@WdB(6?8@ zg69G%@*mFglcYywIWv-Vh1-iI_9J=F1pnuG!C~Za^M#Xqag5W86MKnn6$DvI^u?k5 zG4RvV%zS~E#jo2z-t+rF>!X|J(+}?edztdEFF>JhdEnW7A-OQms%QIudH+jEpJf}P zVcWKoz6i{anwQJbPSBTTIGc#rwKs!4{R$4 zl4GSiNW=PX61WcTLk$0nW!?vzrL48U!py;{U!F-vmH$w!TkNU`v5XjWbVnS(lMfd; zdy=g4<$?+y=>d6^o=xKWXSEs+R_?quvDMwUxeSzQq8B9}2;ez;3FC+UA;p%BKl7Lw0LrQden5(hnT?*ZI=BM7HKD%}hXRorT1}0xG0K<9SEKc~U~hM`6{f zqeMLeiN{>v=UzVi%T$4b4m3wLFt$iH3H7h3zJG}*pmFPh!!*eYr+CZIIqf}t8|QBc zES#4#{SQ%hW`D2K?kkKMjUABlDWqR~GtR~Pso=D*dl8Ak`N|1s1G#Jz#%`wJFBaX1 z|J+kot0l9evKJnHf5apE9RHyU3V&IOHO9_=m8Ec|z;3r4h=QIfd;Xd)74R0}X-cGk zC=QvX{Ae{;apKp)+>!gRX&xCx#q7Iq2ONFw{0UyW!|6Shdu#uQnj5ARM$WB@Iw+;i zIaHA$n*6IxdtT{sMH=^;Xc%3iv3^72*r~Obl>7G&NA20GV=b7w&l)JudB8~a#mURO z-OA^mg2}fD30J=6ZtqbOSP*e|@H-a~o^T`i!GQh&XV8ZCUOp~rUj^_Z>~{P7v_f%k zjD^6+czE|A-uSNl*A{I~s6@$Y8s`g+UXzUXV~f8)u8tAqJ+aj)IA|6Dz8aj5nn_orIiYsg=h z4Suf0HEiTP)eM`dohK?mAA1)As{nLXWR-TLb zTr6(wb~+5lk`IeieDK#V7@Bc%q3K*pS-SF?#+_^6u!Kq!q~!#t2eDXivLXh+&!V6 zd#!aJEl{I>N;W#EA5@5XaPsr0I2Ths0hYju)2rAAaxYk?uPn%zmXuzYTCFA8w&}DB z{yf5?Wrj;5ju=pIX%WaVP}s43ZtHcyl>r<^P#5ml+K|eJUYRm5)4}^b){}ZaD?#k?=>zy zJRzS={w!LRM{#xO9~Bek_51D6w2Qr{A4tOBW%TDq7k0g?#jDj04T0{N3TpMaD|1uY zwbVDM(!5tRr5|-ubtR1(IgN`m#c6^gc`~kg`*t<<6L%EmPca2Vjr_hH`db7deshjF z1;#wK4VSsa^SXTe<9{I{~u5zo+z`O9g-y4$G?p#Wh3E$zzk5fNXBMKRg zL+)Y8fw=fBiHxN^T$pz@1R_Wr*joHCfdx7-G=jUb@f7CM=^qTI*H4DCKk4wCB43=C zK!m_>TGX^>2z4#}cvNf4ll@nB7aIxlSDuUw=XutNgA0N}5qOik`Hyu11cpBGc={on zze06EMDx+=REeJSFEy{Xd}Z?R(%gMC57NWWQUYraZQ-Y!mz}@;y#tHAe`V-$@+&>i zYF4xBtw+cL|L58Xg@p+|(<99bS3q2`?tDInN1uaF{9#p9iWMjXrqCR$I@ z_e|Vn(8(WHU!lII_7DI4+;m6m=U!Z*mA4arc1-n2yHqsGD;;a!A?{TYuN@OnVt*`O z-bT;<=TAYe0G>kd@&CqOiE?X4v*NS`7XG<+lIPWUlKhQdt;#8SBDGpA`C{}=i!(np zCOZ_OW<2LQKcsM;H`Yn+v&%?{^!L7fl(o-ayb1djc!SZVuM{Hup`6Q$BeUkkZaeew z#r?tWTYW)j2b(+l1srLeSqsa4&_KqQ8ilr-?mG~o`77&^G3c3w3^8bceSZB>hZZlf zeLP7(#dI(t^>ShUkgYDs9hH6fSs^a#)2pZTRk)_+{oFo#1f^bHGFuxS^AE@LxzEo7 z7vFs~*lBl``{9^`Vf7Z<>&3p=r#7|ex0?9-?-(9l9e-V-J+^mR|19}vHOJhic6aAQ zHF!^RsirYAj?0n$Qv<}XgKQ66;bjbO`eGQ zfpL8aNc1v~nRKGZFZ=Q|eZ_Q-zrngtAQ7~)YV6khx*D zZ;7XQ#O)Thv<3-#V)$I+tSW0{tBztAV^eaZk)ATyFPT7NvbvdR(G|n=k41I$kkFbW zb}q@Cm=okjK(U{j$P9oM9El?2!DY?K3CN6GVRY|`O;duKdC1tghAzrNiqljk9+STU`6l% zZ+3SW3Xk?QcLKPFe|HrDk(CEtK8D_V^D_Qje_s_IoYnxx_RMaVI>pqt!`>x2eqm8SWu5i0uHwmP!R9H`RX94{)*(6tZbWR<_I}=yx0L zGo?M8rKwMUIy&ulm-4(~*&#%3dfo@7_}4M9{rLD|Q^7)!P#r_{VE`vn-2 zVr4j)l(Jb|s(lot8jvA0>_SN&GGS^TvrJ1d>iaoAZq(J#Pu^b3 zZUs|9)~`#Uzn2;KT@xNiwjrZR`d44P7p0j`c0*bvGjwCC@9x2h*Wiope;uC+@5+@& z6zb!Fy<@k?pW&2TYZ5)6SO1SP464bM?XX_|6+U@9 zwd1!xoa9I4z3baTO(}`hi`5>xShxQ-o)g0FuUPV%DmAj&X#auY0DOkR)S%+U_3n9H z_o0LL&C+_ehl*=Abg1zk0N%kE|9M13PoYdUz`;qA&_4P?Xtg{Pu7eAkTN+;vfM>b~ z4$BcF9#0_`KF$}k`XDpv#`^!BZ&7EPmyZ@YXnVE>tV$i(=+T|ol7*<> z{w6^VC8z?e-RFPmdlslw+UlyKZv3Uy2snO$64fqNQpGSJgsFy4T_7;*$KPJ`5V{%Z zl3jw!T0~W%e*fmC9FEKEEARKaHr$U0_Nyx^Vbg-KC2oG?6Hdf93cG+Z(6G7OO8T?< zj5FhS^PsaK`Pq_lc#z&ZKi_QqY&8YhRgo95Y61OA8CMuh&aBtbAAPT+D(nH$PTtdV z^+-X9wMnmVui`4Bzizx5)k`Mz1KFP$bxN^6*}|$eahKo^5w@nlS!da)PTBojj$TT- zM0axS!)?jhF8W2kXw(wT__bZdA97$!E&l+LMx2o;kF^gpze4FA(+yA88a~uCz9o>L#^@X){np*?FzRZtRpkC?o@tq-QLTPLV z*=0|Vq3=bY5ugDa>qAZbk~cmgP-uO+`o_llW^2_83}@f@gh34cbamT6S@XH#iX(Zg zgylreOx4e&>6}pS*o1Xca^Z5ojYT_W2#!(8#Dzj30r^TXBO*D-063odX}Q3ZMji^T zFBW!5mo6ErO2PJdxb2NV97_eabdo;FfnTme2A+UN9QRt0mzG2Usn_WxYsp)|^STR5 zOXE7(-2fxmxlo_K)tlN-_`%YT^k57*5d9bS*<4KOlN{k3j@;+ez?tE7JGoiE8J3>^g!Pf*4 z&tFh->&#LqaO@(c!CSIIMr`e}% zXK{M)qITX+1jku3$L*dpC&>|r3(}$;r4aU&*&`n$v5o6HSTzz^xfonZ3Ypn^#gN_| zvl4alJpnO$t-{_dzSz~mh@Lf@^d-+9ht=?lzE*J${0Cb`6&Uv6@6P(cgueC0DasP? z>1axSbnu@`f;+*5h1nhGn*~uvr$eHFdC3iD>yIDTHXO%Cc&B&e*(|4%ZOWVP75b<) zYWofSm;nxBVV<$cOPlY!Vn4V*^J%n~oQ-EjzE>iP7~f8k8m;P-+4uFXihB(_*~I&e zl&7hIo_~E-TjfGG@a&FOZ7;}eaL%EZR;(C&LzyvrOT_IF8l^%Ybfq$^2_2DI+3yTw z>5f4z`^P#rHdXg1!K~T+4rb`I%*G-SudDYfR~sS2)9~}uKt?@E@oH=l)ubXFD6;9? z*Z)4+?}nP8<*?mkfDlHA`kf_3vKqKPZa*@lysW}(n5@zA^YI6PqrVxibbr-J9mpW@ zFViz>a`IXcpeZjqAAJvDoxT@meyF5XiIhr`Q*_h*F;x%AwiNS^XA%CfQinUrN|(=? zA8*Y~!?rr^NM>s4Vvj4GDHIwiQiO%C<3i4iy|^}8SHrxlSJH>VlRh}Ok*;ubzazHZ z3|EobdnoyjLhCKcC{-d=8hY8I6+m5-4t7M!ByG|m9@g|hKob53;a=lkF@QOiZp;rRt3Zv{e~a`Rzv!pa5>jbV8z)Jg8bkr0 z(%GA3jS)uAV;Uf(mOkC-?gza$4e1Ge&l|%`hF+#Ldc8ebGw?ojzNwqkKKppD=}{%e zJj!!Rzk|cu66wqMl~(kaX2)x<3-}`pFnNT#g5qmDH1Lkqg&0P@%N$X*o6j8A_JpkIc!NNC1~LhjXhwfQCK@Ih;F!O0$jOJN0r@vTO@p4 zOPxs9gsGDNuIWBRlYN=x={4o+F|KTEKB9C*^ zEiW2*Y47J}?f58e_Dt_zeqDronFxdOJ#wc zM)gniNGp`1o3c_fxjP9`R063=VrsL;m2xtS`UEOZB&BvS&$7B&=UY(2Ji4%#a2#s#~M#Q`1`X+%nG%Rw;yx;P7D7q)xKi2?Fd=O1r{x6 z9|r^Y8uk~UgJYmDC&`z!U-c>vYb%WB)x0c3Cyw8vV1=w!ZJBTB{XoC-*|VQ;(d+<& zVsrNaXX^7r-@Z@%o`ZO{9@;&1)4eVXJ}%SxHnz9DXe}6B_EtNl^vMedDV+*@7o9Rb z@>P?ndStKT*+4)>_TY7JaGeB0*+EZOw)<12oBMHU9GRM0gd?yjsipLFwqx~7#F9g< zMpeg&4v6GG5ZAX52=~;98oKmY8s0hs>mn)`YHYSE&A?iMFOtBwl|j)K8Suy%t%}q4 z2@##Kvs@Esop}aHjBXl#2D#&_l)AO}av3~SNy{W?4Vn?10Ao5bk!p#!ZmjOTp>E(t zfS#h-QNL`N@%tZJOeMRj;8(|cvab3~u0B!m-jGXs`vV$#NHb9RDH|I|&AcMI8vW$X zINNE=yAWs`SBgm?tN(OxN)oG2NV`ch5`aX2fc2}?VfSg2yelP;bk42uO7J(7f@^xb z^M+L{?rX%8l)i)Kn26-f_y?Bs3QoYph`t?Ufq`vq9AM76|08pn zLs)2B8EIs%6Kc0&wo_u4Mz*&?kl0hmHFWU=4k*WVj(W}IUU(e8#Mq_k};;vPjIFZC@qiniip zK)xrCCsIob`aacyQtN`H4qK~tT+~Q$3i#+AT2YC87lB=RKr-vC{Vf#>Y3p!)67^^=|Bz zGve)X_r2IMa{9fm02nI}FlR*2D(=#&*lgUYTR>EqDW%+Q^D(zPA=M#be9|>70;ydI zz#W~@xyY`ysRc~UHHX>AKqqF?zQYP5jt`h57p)Ffpp9k+?I+F^H_+Px42rhD-A%>a z0f%#s40^YjZWLG7AJSq%jqV{5l?`9gl7N4P?lUjjJytlw-vH@!)P|eg^x+sUZry?D z4?C|&Y6RHk$PjLrQW!SlVVj-Amr@1Aem)J9t&LG!~VH9xe=PLshw< zZfN^69oS%N%eGQ_Ze#zpmEr2;U|Xftl4>v2$wRHDK!xQ7H>z$RoSD*2vl^&L!qZ{s zoKu=vbZFg*1h}TaD!HnXJV2Fm=yNcq$0nr@Ou0X}Euy5Eu2BPJ+eX*e=E2u}Y9>C~ zeu+%tJcapB=a-?FFvSi}66sc{oeA;pmt!!MDQX9l6yr?TvUN3UVBRwdXa-?yH%nuC z(&*k3k4@Epr^STMioZtyh_G0dZny7#XJG|lHsQ+O3&9U04eeqq z%X*UnzTeNCLmqYWJxp)j>|*~|SI{qM+kULQ9enE*hkIDDo$RbF=*xVJFid-g_+qf>TsiX(OAeFji{vUzju z{;Oa=5H~hstj2ur-9{*FfL{Qdq@8LnOG4``42m_$v4ehX@wL#*)YP})Mx`#f3n^1N ztf~%CkV2NS2zf=5l;D^%4r!i`fI;7vgv6&pEDPP+5VF)siLB}zwB(7p@iJ+%aU?*d ze8_?7iw}cMdc$us2QA4O^Z;nP)qtaQRLXNWlBq@bfjvR!fHF6)SLYS(J=F`8+P>8lmA&ERhK<2m(5G2XHCZ>H?za&lMg3hNPt{N^Nu{=Sbz7*x;>mgL zM$U-S6hqjGI7?SG#Mh=0g98JwsdO~nmE}STVd_vxXqXA~&$@=xLEpanA^w46c>D6S z>iQEl`dFF*`KeZ)H$-Ps%!(x31E3Eb<|LUnLDc1_YQjK zy-R|rVJbIBZ?KJas|%7!zefZ_AS@A*oU4-FQDFBnCWgCZv8n|UG@o#;6T5LupSo(X z1n-J!(s-Sga6>@PoOxmqsqtk7{-np(@yWy2DCKwp9BU?VBQ|=q zTx4YPyS(tPW7n@@Yvrnu_i4s&Q$leuxmU30SQt*_MP30qE=Zo!}g)mNFwIGakonc9iOqcY})ur(TG znol5qtF(?BwloN!vu`c`++35)Jto<!ZhuXm2;p7cCS?`059$^tuFYHEwrMLt;5!w=;y6sKCvoVSgYi)Ax8z zI6smznH|c#x1aQtGpKO@JfES(Ap&YgR!(DX4TsU6lIT^WY&pP~)?2hs{n1vT9CP4gD6Y@)Bk{m~@QzMBu;E{aE)3<0Gtkw*-uWyiYgr9|ss`wJ1;e?v z|K1P@+kdIvDr(8+Y#!1SU5+;ODPQk2wk(Jq|1rYK2Te?enQ=Z=TE)7Iz=C2+np8pP z^lDn-U9W;^h!@#`5)v34nzP=WXW-|f4;a8d1AD`-NEYc;>c}TZmDojQZm{2mMWj<% z4u#8y!G-4)4K)eC;tE6I2F;Z|(EaTQi8fmknw4|XINNSvu@CY>t5|2B+0q79s_QPdRh6FRhnDp5$OTu13RLS2AbBR#3^C&-~>A8F|tLWOOkjh{X? z%y7ymD$F2|Q6X;BhOs2i7#d3g(-_yHB_z9>@oST`y9H_2JB|G#SEDN|+1UP*g;J6J z^+iBeLH=yb>H5t0o-|;jFD9z6I$!?$Qwr9s22|2IIR?_;OyjN;YZqr5-)E455%Jx9BA_m2cz z=#)$hs5#G#{8L&u_)~qs($JgeIc(Ioc;60tqU3(UEY4WJf%?=sM0~Y>0%(cqrct)= z=h20Z6vEnKSh9zA;naHq3bfIh68mv-D^+(eVJ_tc*ga=fd%bPxI62#@Zd#*qJiPCM z8zjW44nYQML+d68DAe&Uk+_}JP1GzlW=2QLbs$AE8(%yTYrZ9=*^2C+#5;KyMYnzV4481DR)PqH zU>RsB2%H}ME^otVF1*`Oa;U+(%xxp1V6ZsS-=H$0)6J0-J(KwMe#t$1Xdhg(o+|w% zH|%8JJG$5tQYswsX9W^slN%xM{o{>wo%Q!F+wKiL@iIiQB>H;9(ARI!C>vQvI)sx^ zAS3zQq!kXXp|9x}rb$`X83qiv4)(D%=Z{jRkpy`3gCWJZ!CYoLC^kH^02gCh@FQg{ zadU_*(0y^|#(IySx#G>kk5PV=Ohmb0aUTH{z1WslpqKSRTbwziWT=5!l2jm(G;frR z3t5saXwy~D9``mB3{Gw}QR@Zt*=Es5%o$$%`|#V)S3GzBJ@y+W{*3(HPyW| z1$H9+_(!}NluD@L{HSUTKx$SK`!w-AWlpM?0`BQl2m+K<{GaSFgpjN}T zM2=(Rf?*sdADKt1Rwh#s&Jn3$GJR8_QNm5s(3uGTIv?u#4C7p1z(H?m6Df|q|F=%f zp7k@4h?20bcVT8r&9z)R=kx>Mn3qkpMU0V`y|^hP$zm>2y*M|`nUmGvocHn=xm?8X zyJj(y+3#VE*Ya~+ZL%&KzrpJJ{S%q^lf+qIdQ+C(-w11V9ZXIOhHN=2I-$fjhMz`W z8_p#bWypmQ^$%e=52)OS)(GXkF32O9lGxhc*%5QJ7pZ00irE;1lXu;kLkP4aNIHeP zoPTqzu&zGL$?E9sTgr&a98>|}HH#a3SDHjQ@7tYA?_-n@c2f7r7d+p93aDM8kyzG5 zWx^%8*5?84Q3f)}g{-&{QLONMCl0!#5!dI4nyUNWcE{{d=tBH!Z4Bp4JqkM=0nBwV zsBI%S93_I2XW~m}$`na<2cuK6qVlDS`cRkPf3oSJHI&NY+$i#NoodhQxMfPKK$Y(&`W_&w&&^ z(-|BXGDL8`{c)V?u`Y$CpO7b4Y4l3x#jmNdCEVX0g&_;z5pex7vZ+jx%#?x?i*#bO ze+1g4D>|OYOn{TO`ctAeB;A$#zNInAHP7e!eF$d`W&$R9f%yEIW36VJF$V8r<~L6X$=?ak552M|gAY_gMROLJXzF`F5hRaX-q9}(RvGvpD{XdNv({C4 z#ESe<7RlYRnnRHe?rx@}2w2`fKo*&*8AD}Dj31&|`63Esz#DM1A=7Zmkku6yR-=WEn2{xjcEpr5Vj=6h|-) z3#2AH^{nRS`QtY16iICsy8R6P)uSwA^!Ps@=FCh<;YfCih2{A)2W@;s6gjZ1rMn-E zFZEEOZ#3iCF~0XGr5H>924){F5zM{rgZ8CrPRW7A$j!R*n>sA5ZI#xPMCzERK8kA5 z-r;<<_t>f%6>g%3EOOo+H@N9UKB1@73usV0Lktph`X4e$xtV{Qyu?lONug1)v+j~FX{8BAxLcuB)-vicK| zw6Tb(Pj6Ek3%gM7#M9|nshw`>a4Y>ZSOhec9dpC3`sZelj0y#|k-nbZrlf#xPjM7kgqY$&XWTbTq#6^2d%ZwWxBuQI;H;$<6Z~-0hPcAu9H=Av> zLV^|Cpnn3jXrQz`Wl;!ST_QKCttzy_l1og;B+YeO!))vg^{sCN=EV?RD!|rV?x3LKMs$-VLqnc0F23k6kahI_XHyTFzO{l!P#w!pq~W>c{D zN+#W6dP=eios%_@gtUzb@OMvSrB$J{eOiGdC33J%1K2Xse^0V9rij(t36<~Gv3D8K z>+{77)*2j(P3~km5|q#OrfH5JV;7x`2%B05_M~q1j6GpVS*KC@{e-Aa(IPo@<*ngP zYwQd;VjZ0NiTVj~tY`vqoyfREtn})Jj%#`1I(aBl(~ zqRpXe0cmQ@&a;f0`bD_ark}XA_n+{JWG|*FN|x$ywx(eOtjdqn_8Wl60JwlEC`lS6AV!-Qur(2tvViL5Bsy1FEN?wMIWX=CU)_krPTl+>ps z=la-@lp+^w<~xR}nILM?`b`n~6M8v=HcsU0t&V9<<~A~PvL*c`V_@0Z=R#9x%3Udp z^ITDt)EEerN3%{gGQUPvycsFv&!s_B8MpkTn|BeOI{N`)BIXL(ljQD zkTFP|pR00_hSuF{4Yoz%zcKML(u8L$!)RDrEzEo!82_$G z>~*u22@OMfe?;JC!)K_8rrosA`fe>we7aXV+6Pu&V)3yQh6zZ9RkkkW-mZgxf^$@h zGoCLOpI!}klH%A_EuRN#GU&7LFPw5PGccYFO-h<=%792R+=mCl{YGf>Wh<#4a)mM{ z=eK52u&UcQ?S=8JFW+LZI*!GTP@8%EsU9UlY48A5O;!aXT@y@}3pv$oH)N=(_S%9z zuRgyh1N?L7r(ym;uG6&Ms6VC?^)AR|vEXa%yz`SM&FZuh-I1Y{Q8R-t2a_~it_72# znwKGerzGb$7^g=E_;16|?)9_f$l)@{))fbjT2;Ni09<2%Q5uZ%1Z@TKNiTMwY?9u?U!X|uQHXit9-?=?j*a*xI4&N{T z=Rw{J#NqeQ5cx1SYHv#^U28FON-ct>nl^Ul#_5zd_bTCn4pNy<%4)GThw%_PxL&ue zvfgeE%R)K8nB4}eChSr^TJ;H4GNd=f*9wIG39iUH{MyBmAQueQFC?&9%m}Y~HMo&V z2NX%tU|JZxVx8do68W{!IwW@{-hSuMM@KwvFz=?og3&EdstXjMdIS5%s?PH<-t|7O z;0XiT7liSf7w)64=F9MQKMwoi2JdS>T3ytvplh8B6QN>Pj^6r)NPKr|1)5lMRH}D1 zw|$y?%1hh?_NVVzjl>yc*zgy`TIQVR_A7?lN={Pg{o+2fUDn`SDFj}*^qfATX*E0@ zc@Q-4grVdy?GQ1RRcTk<&|_IHJxM#wHLi!FF5Ik%E#?PWKH|!5`k4a16Yh_Q)@aof zB{mPulOHU&6U_pCYPt058jN!Gy= zHCb;8dP=CamP1;yD&=f%R8dp`Z>>9=J|kT6dtzY z-r7yH3;ZqQzo(wd41l|m z#I+x~v^X;Nqf2x5EO7rXF<<8H+B$qI)#9H%u8L8KDL(k-N$L5-G@rL1rGf6kh1rl8 zl_x5oK}q?0jt8?UWjY<1`9pS9t)c3rvb+Tvca3V{cX)$-zBm10c?K&{+WN#H`+!2jcCr{92n>6pWfv+ zJ{H#4wzLkU{NtnYGHb4BQ)2ZQ>(-?dmLpZ`wLUr}K-rC+F+sXMhIk?1mYHO!UW9fp zusPYmRHZuM?%6A&Ue|YV&x}fA{Cv$qOaa{3BEDNx*zapV!+%skzh)<|_H&NdFI5=0 zSX9T;Q~N)q1+4j#skjVT?i(rO)L=9+i{c3l%WVFZXusQzu+ZrcnUNFQ^+ zhXtuq#>8nEpGs4=9Kl0lhoXy?bc(ZtFscD(m|y5Rh%|11!}o}5%GmcJ+PL z&Yio5OV2-NUHz%HTB-9&eD^Zqe=W}hTzQ2aNEtPZdd0o|y70C+O5kin;oDI|*{f$S zk46b{z4OU1`7p}}Meg4Hk8HX>H=i`T<2oqL-Yq(Kb)AM>kTh6oI$T!*k0@)@)FQ$k z7Cvq(SS@)JKHYf~d3~4en#P-ZcG4^HTy^sIy)?ti=I0h!rT(J(!7kn5``6T|;g#+} z+(g||JEpaea$w}v%1l9z?w7ev6WL*Hl2S$BpB1rYV~oP>O~vLS*AVMj?+V`8qITy7 zS-I0qybT;fi%N38Q%)qPkY287o0}2~blnqSR42;19ZPqRrsR4df@+)}2_Bq4=r$~6 z%|bc3A3bh`aUEZ~{qn8`6>xntN!Ra}VIz~P>H|F1&uZ95~UqwzlHwochVM{0fLr(xm$s#5>jqC6qpl>q;DjaOMs3W~TNCg&-ecF+XezWtJT_#ASa?6N+_PZb7QIV5B zjCO2m*l5v5T415vAZ$0OxK&}t@iu6FiS!ez092}}_coQo^|WN2G?Gq@zSMUuK~wZ7 zy6eY|lcv_CaPj>w^$~K9LZe(M+*})37I%Sbb6&W6c)T`xBg8Vjw|8Jy6n<~S&g}@_ zhD*Qw(#>DRBRV>}PW!>;|AY2|AFU3Za`0|p3DPvPvLx&nZ>vde+QXmVQkc^8315d-l9485WK}%&N|OZRCC(D6QD_Pq2&W9%Mq;ZTG^|Kamz^d%gKUu##L z!nzUzd^V=#J;YmsMxHK7%tee{%jsFqq@}Ol{dv*jS!Dj;xAw-5!%`LTL(dD=iH4~G zR5-&geK}Z4c^~&QagYT&pGAvONuM@aQq4rjKSkMJqewdbXxv><9~Fzf-qpQ>T;JmP z@X7vPh9dCGdhh)2*1L)q@K3VL^-;K6>UqL=>5Qk6J<#Ti??7<=p_0rue6EKoY&BBU z&fmgXS>&71CO5MN=TlraRTD$1Gx6J9W*ROB%CTX-H9^VsgpX+<@)xKjnyQUW4VVVs zjHVu`I!&%GYNk#K6kSn%rB#sbA~@$=pdJghCzSzK^LSEZyqkH&uEY977C?X2F`?2$@kI#-z& zsGdTh9N^Kcfr71K#Rgm-YHNeCz2#-?lycR#_#O7sPd2J)2Uy?w zlhKpXLX2V67!K3Q7|j|n0WTady$-l7CNgwM)iyW1)y}<>F*=#;rYvbgpH?z*?p)k-o?48?KXJJ7GvZNeuwu)= zf$U>$QB`5u!8j^&NLf}YN5#hi+ktnL3o|+KwL_#z23AQ-+v7a?Lg^XNr3-}%rar*! zn;f!6=zH5VEd!tH{xAdQ@TSO-SftthD6Mk-^iWQXxA{b z7!Nii=bRw)vy7-u2PQX#s(N!x#tzdn*K)v)jh?_}^iusK##RO@q>F6-4Q9D9>EJm2 zeB3kksiwUQ7_;p>tdoUZ_A;TS-lk$9PnNTDh1CueR#Ot@oSy+ zDJ8IQRIX)r^QOETFle6*!zkXle#a;Aa%CVpCD=5^J?bdVN>iM5I6Xia#KdUJKgb9xa}+-agv)sp*f54x6~dKjYL zEcDpThI*%oC?6=SMJCKTnMvhTe^I|*?Y5ydAcGiu)$@CQ4@Jh+@vL=YafF3>Q(&|r z8ZJq4awgtQMqIch?3_isDrO@z8*Al>S<}54a*>c(xn+ zR`h7yhvWrBPo*EV%_eI2>am*zX9^vITvhd?KhPwn&G!~QB}cOy{ErP<5uTExSO~7S z4fsNx?_~t#rzZPSLL@nzj6WKEEbCM?>R8hU=VI0JJ;c|4!FOU5dC$+ched0if6ir* zT&PQhcxbSve?#@I>HIOh+IYwvFPrM>UHtRuzVZZ3s{f}Zr*@(}-~JVa=oYI-%C`&g z7>nHfB;`lD@j`L=k`XI@QsKV*W}!2!GCdulj9TSUsqQ`CsS|g0oSwxuVOh^uRp*}t z2!4YifAgxNQQ=ma8)Lpi01%01TmGA%$T%XzTtxZ=OBG&VZt*@K^jP$Hkg;o&>-A4R zp;_CKe%h$ByYgt4nGOk^c!~uYwvTq1_hY_zz86n&|6cMHf~%_nV*T=CrN!5{_a95c zEEX{d#q6uF!1O^kGUJ|#T616Qb~EUm@v0h>KIBb!gYp>%LS#uIO^!b1T9)kyaU;CR zTrhMe(GJiO*a0g?IQPo-y>;~um9lN8jhnEb{-mj|;JDD0Zf{V{l*F6a9JiTrNC?$4 zoemv%-bvA!T#JJzL=$@+DTheP!#D}yVIV9NVj}X-UjS%F4 zye2c?P1oNA?uP*ncwODaC`jfdojdop+p(}d8_6kC_K`BHOe<7VBdT-RilmjG}T@-x%h4$UcQJ-axH4z*r$U0c$$66 zJ=1Wr9s!eG9nkkT+NKGa8~VxTXj|bDoI8=vnQ0mDN%S!AFua>IS}8Y{MX9(RtyzAM z25s@k%mrsTNIF|V#%3HQI7RuWCrd9vjcRgtpPfY)jR*on5f^il7Va6nO6aTG$UT3JfZouw~vz zK+g~D5^k==WDS|fI2pmYfA^CBkzDt%^H22fY{)1@bYH{U47=`QBuS7|OhdkNPU*s_ zXw1mtFQ1%_&E$rV^H^qpA&T;6ZwvexvY?ZP{M3yA2gRCB%g6MG(c>&xQ*R?LEjWDe zL*SUD+6eZZiE6C|MeCHpgf2!1>B7J2N~G2x%URm%BAM$35V4 zd6(GU%9){VTd$NCe^=j-wsbb_cXWK3J6Ev5$%yF<*q>ABuc%) z*})^sD1ipMSG4RfT&?@Yh$zW~Q{F+3kY__Z`mfJa=Q8CSG<$4fORGPfN>RdoLMdle zbJ@c&8Z(1HMOMY>_soonE(}0$0Gql8NH#0im&*N%jHPi6dlw&z9x}=}s3y$ghEDzQ z$%kMj&p#-6FHP@u=NtuN4sr|v(2*~9aw zH41YinGoL)-N=-9qE!NdTLQ{UQB;#Y>jo>&Ty!Lr}!0Xl=wNo9bJ2+Q+#Sr z>R?_WOEbREdvxlnUZP5fs&)KKq|d^l|7v4J!P4;*tIWf5EQb)v$i%xclfIyPrr!+5;clj3Qr%}L(|sr6lQOy0mxSnoq_7@#Oe$>hSui%lP|B8 z05(Q&9FJI-UG_@iU6N?fh%z;;5$jbB(ealo?rIF#(VX7_Ok1lnw3F^F3evM$l|JpK zqBk&>^PTffrA|P#$28srqrw{0t3_ulH_w@WxD84x5TjwQW^wZZdF*MY_}Zp7x*7B` z3jKWm#~~rfmB)dWM0W!fiPIrD1XhnjN=ty71n&j&mJ5*Rbcu}1jCB~stJ=@y@~tnA zzAGi$Mi5bKQQi#W@3(zb=Hl(&OsC=Gb9X(hX>aEH?aoUp8?=XXU!5nF(H(=d+DWOS zfUE80RC60Q^LCr0qzVqxJ7!%(2*U8j|5C5_VDq2dyn4BL>sbtTR5AL>L}GiC&4;(z z`}iH&bNLAUQ^>AqNg z!IDzLE=r6^m}BYK*sX%Fi6v;%7qh7`86^5Yf24F=Hb&bv$cwMiE? zBw5Xr_*TYXx`B2p_%+^wg=_c3oUG4?h)&-#%}X-ZYJePasP?iOYYaV7s?A+ z3S^%KS^8+(bSE|JwZ3w?qX93DW|x*wx<5No58?GK-B{Au6EZd~Ej?u>>ohE68&U&| zFKcdj(w0!e8`U9%=60NHH!K5^^|6xQa{}NNFuli<*T7oc>w@afy|t_)|L|H?vc8UY zs?(F=tnh9G4@X`^kA2kqN_7m%Z!{{$@ibFpDGp?RI$ydOVWAEsOHs0`DXoXMwaa!o zvPZ`OY6r)+4~2{^67K;JvE*kT0QJzrl-B4+CjNl=OG1oTOn4N!JN_xWbhemc*QO|L zZh$`f@+ecTpiTy9JPYo)`D?SFZ1hTim0GS7Vd#TQ*ZjZ!A2}g8w5dbTGm?T)R_<@L!O!3@Eg)0sT zUXbpf-uE8?I;$4*Kl-BmU-jj}EV#AktBTOgZ9jTEQpvD);<;4IPk@#}e#buFQW0m^ zuO|bU1|g+)+QJ0bu?B9T<9D_>23IW8U=bz0$@|W^N~*XBgL^<8Mq0W8x7#3KegO0j zaa|EW#>Z|zcb{E(y$-bP;^*1>m_H(n1UQKPPY6^VmVMV|SitZDjP>tHHI|f{TDZ7F zcq-2~i|O3hFnE)6v_?B7GmOM>@WTaFuU{?pLyG<6DLz;}v;OOr_5<-=hk#-tNI@uK zakC*jZ-!!^R7_mCp9X#$%$XUe-&}TtUPM%pQawBH$2VmE^?xD!fC$b%>KNZ8)}L{5 zz28RyXAp6I`@V+0+pFt#Nl$@34o#gB`4nupJX`-L0}zQnMs7_kMMT;nS8E%nNt&2o zLsa|&|1X0bgdJ!*K)8q$E-=eg?hqM1G8AE$ZJx z6U#}OB!}l4A5MsT+DBlv8PqbuS9!JW*~s8kl3Tf;H1prU$3We#|CQNop|S!qVvcWS z_r4ViJ?>6jxc-5B>gUt%KWf?gA|4&Lx$rvVk<&nvk5VD-MR+}#@U{Sbrky`p+2l7+ zQ||XR9GK(m&93F&sk5~3ZYMM!q!bo22h@{QEQ9PTu9pOxdUyj9sBddlJZ~Ub-j_Wt zV`8DAEoTn#$=Cwu(gmA$=iKVr}qw;qkdb2QLx}4{;*zRmK2cEql#eJ?`Mcqiu%@(K;Nv=?JN@p zoX1RYbpP0Pc4viif)fWR4qDb=wW1aNiUs&le1IL>uRc=Jee~H-WDv!h=F_S7%&AoY z08vS5k`Y>vi@HC*esUMK5T>+7AG2wf5GDgQ`Qw^`g2eg$b(?hAa57xg$Ytg54k3jKf)a{U%YmDJZiuP>R*j4 zu#AtA7uk6R__}^K8Q~_vpku$?kXw%qsV;B%37`H+v(p$Q^(gQzlD>AKdSBEfbZX2Nmd#m<6P!f z!g<*MR%NECZsfakU3Ry~&hrBC_Wet|`vCD4zyBmRsc0ym8_NAadKXw6lLVi9kEb^T zwa<+>i@i6O#JU+I>5WW%XjxLAd~jO@<}u_5o|X}x4+BC4MNI0Z3&AH^YR5LAGumkk z#9v>^D_+aPY(HOJE1a1qeq1ZKanIHLy1Mo2j^7z>J|BqGatk`_WzDDRVy;n;DNDx1 zV^6>Iz{F-)Y}s!@B8ZYal``+<(02I#ML_FF6Fq~{g4Uq(ZKa|xTqivFXmNvSF00q; z#bja+Elc0i<-?eGLQ;<#(dAgmbVrRtldW|A3%(Ve;{W*bVS$ui%p9i(7h7*N40C?rxLY)UAOr#wrCns#PSXt)9x@^kuc> z?$TsQVIJ5J3HW2A=m(m2kDzv+;AXnUub{gfQ0i?Yazy-)(|VJ&6k8^9tvO(Ne9xvx z6NMk-&n25W%f363hb*R(y~~GH%FB=``O*`)VU^6EtlpbCyW-6N$N`D`%aake3yWqR zalZkKKLShZu-{9mvZ#@3)YlKe1rB&M=&(0>vPsd+E1lvZpO*9ZG)F;Z;xs1Ww{ zX^!xt(jP0q^ro(mz$7sfcgAM;ln zQ1TmrQx+ARa>2h&IRH3iui{I&&k#NDoI%bBv>H;`-BEG_%ts#CI@Ifh3~}g-@t^Q- zMo<2I=)My0k0Xy5^!p0>C^+}C{=g3Pf;jJE@qd4fPHh-GUE*_s6ft)cDp>$!Ik12} zOvr#t0@&MNU|oo;-@AP63^Y7<(Q9S8va4rB@9fYO;k7UR=h_2atystCD%|{7@srY9 zbq-^>@LXNfrPACkH;QH+J&$=OUowR>6{uz_U1>S}Ay(v}F>trRr+%S$LS^L36s*ys zMJfXE*eyqgAG@$BLrQ`Gf8E2kd)8WJWRY)%X77pEOZOI^#scU3IvC-1jqcxH)U$aB z$V~;|8pYOp`qMCTGs66%M#FemdD7}(e`Pz)rRl^hVZU}keij+Dv_<%JyhRmz@Bm;8 zGy;;1UzZPdA7RiGJvfB@&!xw{R;LHw0%V~->ubo1@-ihCq6!BOlC_Syj^o1@fLr4#6na= z{o7sld~T99%xy+^Y}EJha`C{+&UjNws2qgTJ6y0Y=_i2(L(Hm1w#=igf2RsFHA z4U8JGr%bLkPH)GpU)n&x^%PL@1D=r{)`E^eeZwUm@OJM>v3KzeFRa8#&hi-o`&bo}ED9)2#^`cW<$E;0*A0{-)B5zKP5px&32 za57owF>mS%>`?uGZ-*pwkFO8U^qQ!=C_W{}lt_Idu!gb!Si_AEt@q5@rY0;M<5&~p z!$qF~KMfc?BfPEwjZqiZzENpBThbEoZ@vNGSlwt;5U0(6F(m-A2L6>=fGu<%f9}}| zdz4A#^m8xz)rJ=gAy@evhI8mhQi;Xt97PA1Jm5w4)d(4evjFzq(mQ7DMK~R~V=ynH z6(Di0W}KkUb(mx=_Lx3k!HVTg`j7s-2b(P{N!u4x^zosfmBkqy5YMz(WaOc+LnmZi zgMc^{=)KDT{{C4J#XeaZ&T0=I`>nZi#}GbdM+N7pc2hT6V0rXhf3K-nfoO}uw(Mgo zY1C?N^0osm%ftIa#39S~Tea>0DevhG-XFz9F0jt;hRrqX=YZk^?i-)GA>wDP0=kjV z?5*?=V7UXRRt1e1GXm;8pi?4b=gVFJnJZ8thjREp2GRkP)fA%NW_7WNyRZrRlDb0P z-G`e6NB8>w6f2N={M^X6uWLfZb8w7lXVlV&%HY#}Oi1owpsnzs>Uy7#)S4i#5P)-V zV46}{7WD1$XM53c_1n3Ta`1aUBHseBy}S%#Cb;!5Na?9eZlM3|E%U=TpvzE&T|;YN2d)F0a(>lUmI{d#zfA~7RXAkh$5IBkDS{i7kLfA|(N z09wB{vs=H+1n3De`PwViYwhGm0UJ=V@!Ws3QUO$V!lKYv%zs5LcVR70s>}(fiPvp- zw6*2%Ht}HiyUoyr9d*2mkDdrQFWrHQPzwgk*Z+&yg`oC+e-yaZ^gX~T|2LS{{|`nX z)LA`1AGjib82{-;`=3N;s}3V@CkyjmU55#*%*VqU1dNl%;3p%MuwC(OKP#<82c08b z!uhNL0BI<{hW)xp0K;mD`1V&}5?Datq!*M@bpgok^E)w{{CbADuq)dDH?&xg%`F%} zFmNIFbb2929uh&}%N-O&$kO-BmE`2Lu3!_d;s}nk9~Q$XiWfA{&c=7Hwegim%zCXu z^TEyqfx}ybepF>QmUUqTm`Y0NoB;5Yl(7sU^7}b(LxSP_Hw)=$@@<#l%{)M*0!0Ey=oa@ zV+JYZgoRv<>Ex<9aHLXL-}fh(Ek7%b#RmPM{7MwPGT1)mM&y3&=b`{7a<9xo4b)tW zL2+pmB?D3lau^`&D82e!oNaKt`1>2`>;$^y?-*(5BgQ zWM(l7K%z`&v&Ez1lpGPpwQ<0vq=<`)i2s2*9+2)J4^0C+r?yS;d_bWsmajzUFcRqf z$^zB8zkP#qf+cRkX{~K@PqHeEPHze_sf_0i=`o0r2^zxnTvqs)$a3Ke^K9q$L;Uf# z2{Q}V3#f~YI#meo$+G8c!PkyT%tqiJ*EWqxJW+hDd#_WE8!TGq;K~8WBwMBFkK)0N z0$g!nkKLD=SO5n@8;cI^tL+;vDa#i$Xnxidg{k#0;3yu23Vzz`1s3Wf>Au#<)^8iA__hv4y^7pXgyMN;$-$x z%cXEg;LfX6=t?45%Y-^AiQQG30NFAtXgItnx}NYI#lUf%o=VSEj|h>aWe66RsU_5L z3-i+;VA%5Sp<*4T{g3K0UaF;en85Fi zE8hIcyB3lNK*t;oyGmnntoc}Z!l}A+R`}ptgOngbHkPm(jvSQ0?mq(Ky`+b)a>B!( zRY#f&MV=b?I>!UB2j>}XDB-7#@qg9vfJ|mDO$%|u7FGRKm(HQ3)PLD3kW9lK-3Hrqk|3GXI6ccw4+U3OERJcc7vS!_duA1T zNT@i!de^{{Wgz-;EIDZ0BPPON zcL8P(9nPmuU&z@U60+q$hHxHc{X-1nxe!TR$X}Q$YoA$#*Ek5S^UPsbd7G7p-^>hN zU)@kxz6jjlyqYqvrXe~gCV>SZ7k&;Xg6Fp4Kln_vI}Zy27YWhWK1cCZlgu`SZ3zLW zp$kxU>yfA0m!Ccxm^|V&|8R>GQ*hIFp1#?lgMJq=#ph57h>;}Qnm5J*Tr<5?jy|If zmKY?yvKRbb^4Zh$IUtY;0oaBWYmK>6?hr(-FM2#~2)T*!dyCmFY;`SS{)(%x<^AW@ z%K&v-&ia4k*W=n|TfQT+r%OP0e%~H=IDwRZ!7yLRP%F8bqLJ%Y^c|X@$`*K6nXCtp z>)_w%wjDcy&-WqrpNQbxA}(~>x(pcc7dnA7wIMc0pc`d-4%GZAMr(#o-d zJ{_3irvfxLR@c{_XTXUL$#RQKBp5hfdFL<5*Q~fm+E#e*WR-d}bC8RxfXz`PU#TT1WqMJicu?PG_)p zqI}<4Sk+^Enlv5GVXO>TxOkCYN?;L6viTN3Z2MB&kbs!qp9O&(vgbiq1(tIUD<8dV z6a;S%L$|^bmhD`;0BqtQfgG_?o4pic+8i3aSo|YY*Hy@hirEU&1O@2ybsb1Kw9Rtb zxPZT#+nDAkB`fUNSIq|UfCbGDI2~}s5T6EGP0e0T1HZEawsi;~)i9>Y%fmoHun%p3 z>j4}#lQ+J&6kvCh%`MMFD#ltm>vo~RWVN7xRo_z~$TL;77{6}*`+XYr7O?3N! zV>Y=`;n^2!-W5;6W4yn~4UfQYLdyEE4P2?~{JZZ_2A>rDBRN5)K&nEN{0~J0Fi7Kb z!uJc#@A)`1Q2Pr&?lXQB;Wq2$4e>!M_2EO(&dDd$o(W#eIjyrE2O=T-af*<@;hl_a zzsM1&?RDuo%%r*;LHhgo%hr3Ez<6y+h#+>ldc-M0DeQf`dX68U4pL52f~%uz2n1yl zv0jY01Z5Z%XumZ;5ujX{tI)wHi*B#37pvn*$|rJe03(*Fbq+4>E0_ditT=Wg@|mMR zpJI-_=FkXiFG59UWsr>iVVJEb=|4lBH+2G`$YZ7dA&bMXlnar|ZhL+VXi}HjLfF0_ z#1w{KKH96fu*e(UPpDcvfZ%F=vXB8G$D=-N-(XC-Jp-tc@4r$y!1k@%KmwpO7x&Vq z8A9`?6?;fixEnQKHy+D?07ijHMV}*=J&IdR6@89rgc|qx%gw)!=?Ee425>QiRUQHI ztgry!$y&+PKze{E)dMX5*g3s#ul@SRf3&g+bd&$3Hcy9~q%!MAy?ho2sQdvlMf(`k zQc8H}>Y^=b#I|jI&q|~-!?dNrp}^#8Qe1o3=t&-zz;YmTci(jh-MDJ^GDYthum|8= zX?6mWBmiywMMmu&HHL!QZQ$l}?%`~AUR(GtF&De3$niF4U`3yo2qP%oOPHnV69YZ2 zmaTw7&K+Su2W<7!{jn2#-4Iwp_7>4WT3;s8%xEc5b!PNI2Boq1fQ%>8Myv>YW%nut#l zlC7LQrW06tk)N*t=&AMAtURQ}Z2WUW-c%Vc65Lo?-L?xmcBFc{fq$#jxpK~H`@#k4)jp%@&=@zE_2jVnUmJROcLQ4Q)5M+^ zM1(Z?ZD%@e|2d-qT%>52Rlh zPU}agC6HEMtfK^C-)zGWaIvNXF%@4+#yAF@J0cyof2g-j1B^>ua5Tf%JrR@nh3NVwH-8fL#Y#raFB^wT$ zWYSOoIiC(|M_f4%qD*LdWhyz2bd(WSP%10NLscRRD49BhAykAhH`P{h$5Ay>b=z3Q zbA$ZXQaajCvAR*3zZ;Lmk4V%yq%J%+pQ&HxFiw-=z4kvdo8|udspudZCIF-4U_%f4 zzWRyyy)^3AbS6ScOKI*#)I>N{mE^64Qw>Nf9XxL^$^4rokKfr8C5Z_ew^PpyR{{6@ z-j~yvDDLzU08A2qrw@Qfqhb{$NUt^N=deBT9uV;SNJZ@BJS^DGS3JsF3;+s9deO!r zew-`R&v_ggbbu8%DcoBxhqH{DZ48KeJV&I!K=G=9TGREES{_1)>Q`EPQY8i*xp%gahv|i za&MwPa=cuP#rThUH(-tX;=sBPS-{hDVzP%rfwk~&EU-0`Sy@2q>M?wvwk9Bo=@3h#r!4NsPt3MX4k^MvH}o)$O_Q>Lr=;(aItJg-Z%= zTsxPmZp*h*-0#lVa_lydphm4Pmwd$g>@O#jSg4)FUaXs>TLUnAd~xphSdF$U*Xtv# zyoc17Ov({VQtU2?RGJ(<|rv_q_MOzT%mTdQY1Jx<6Blm z8DzwZuS2kF*LJI>F19KI>{s$JBx2rCk8yz?kIKS*%#rE z7421d$y4kST(#YIhogYJ@5Gy_d;l)?C%>N*qMeNafb;*nVc93cI76d9>*s1nIk(g7 zt>6AGzZ7Iozvarkuw9}%N5G>8DyK#V z>nU=?pj|(pyXz3ksfX82g!aYC{;&MAQDD;w`2KqG*_)Kek9&3Yz(uYTcPBhI_KU?J z>4s#ET_e_!XCq_GE1vU+(yEv+*p=7#9@UrE=0$timTj|oC*f4qgT_t3T_*GfSkxJi zlNO87_K3w_k#50knL2}*J3TG19W1Q?i!DD<&A#m{O=pA`8}41+v!PGtr5%ax)EZu* zO8z+Dh{J?FFXy7g?}s9b`ifYJjgE!xJk6(8Afu=1K;cNysG_k`ySt>~Xg(g!O>F!^ zL;X1l?|+U0#`kiv_L6%e>q2znSM6NjP7FLtz`t!@00uZn@l$yfPd^1YPSi;Hykies zFu|xIKWp@KhPK8x;Vj@SuG^2b=O(v0O3x<)<$A;`T1T`eupjP=@c|x!DmwVQUmxHy zMl}T)l`x#mtd2Ohn~iuN(dJEPoJI`ZT6%((Cfgkp>ruPl-{g65d&Tv#pRSU`R(1gK zaPZd)I-5`vW-d*yYF?0rVYw&7_nDn;*92?>0L>a@4fK`A#+;*nG2Cq7c)Z&goY7%< zJ~t+TzLe^4)0AzGjN(@yWqmXn^Qr<(l9(Mj-!aH(!$#_)SF1&VD!E-PqZd zpep8Ko?jdNnxniV4qZ1BHR!F}pyP-mIh9*GDHt(=aOPBOH@9PtgJb00TGDU(Sl)we zt8d!vy`PM>PF0P5g$0#Z{%y9-)>Ka!{VBd76~WDHd`vfaGv%!&8fS&&ByIGfTu(C5MWI z7QIajG>TPN>xW{d^VBPQsEN%2qyu{NNogr@PG4_?OR8xj&JUKW<9ANro)Wc;jh~nJ z_vIXV5`Z4Q)s$><7#v7AmlcCGk zO*CtqrU<-|6Xk_{*YrGOMNXyQ`O|(3s$%64pi`wXM9_Pd?L@cpFRe)u%7#CornVJXh(Ul*@y zlsLdM9BKnF1-=XWr()Dl3I7nEcg z!{X(RtZG(REsyj}zqsdud~x6hrE)h%R|n3Obvd5My_X(;)yR5af!=&a1r_bgN=h-S zC;%Q8&{XeMZpb^)5hij<9mMM;2E*NgV;34%*?uv;E0L=+DZvn4j&uX?gdGFRZ2NKa z<>V@8m4QR@R@wF8`rj=q-J+z#4QD@V%}LkQ0$`f{1j}?El=LkpYq-X?c5|NBrI{Lv zHE|9gBJ?f3#G^>3Pmg)m6a~j3-!B#Qy-&*JTMeXTe=GPjC{QX{H>JK<)Ij!&>3fzz zW~?`@{b@41oGRdr5?L|glIT6hr(vSg4f|dCaloTFDeJfIy0Z(bf!S->Auc%{`jhFS za^l&L)7P4`h@PO*O8L2(j9Nfz$WoI029j8@R~0#_XG2m0YIK>%I%KOZZHfU^qwp^D z4+MM1611D;c8o<#Hw>n@XxZ&sZBCZ8}x^-_aa9WmUy9=S1n zFd-l04L}Or&sxTGN)&Uo-jEysg6vSAfoS|N`I^0#xAnAX(6xQMS4>c|gN#hdRN|n{ zPE7t{;xIQY=4#NJfwWaer#?&TPh+L_s{tje7T;0n3Bp$Etf&{tI{0^!*@?NB zWIM7=Z2mQ!o{5PNndaS;d)Ao5Nxin!Y#dW;U`mO(+?ReM;^VV))tE;m@zwV_b7b!Z zc0Y(}yzu7uqnD}L?2!}d50hA~Q6GPFuHH-i`{VIy<`=W59~HMwAHOwWPK2COpwIOF zh0%zhmxz|!J+E<2;;(cD8mVtdLVvL6)(dKLVDM1eYT83BuLGHTej7RqI?2moaxn>U z`AwneIoXF(Fzh0y3g%WI-^@PfTRoDz^z`FL%?-PC6Bm91$4Oai^YF_Mz$C9I$_H!k8Q`48rca?b&!w5m%u?RbGV|kCe z-^oUxIEU#O)7&>0c`X~;`Vj=y7k3RRSA#E{vW% zQ?XmiMd|dcwkM^wIO?q%uBC6@|B#k@Bpb@UR`J(PSo&QJ?J-`eM5CCWF>f;F#pJse zl{)*{TN>@c4UM~%=Q;;(?xUA^!yv3sZ*OdZa(byTc_)x3P&6%YFpoEt6{3T<*rq_p z^0JFmzEyD?K*C8)ZPi48_r*1+^|kjgD@=8j_BA^n3q>g4zsDyq3VJ=aU&nuK$IK4` zkpi>)t4ZHspDpV%>qQ4UiQDRcoC{Z6aH%U?T5r?YGoPKyu1a~EMx6CHBqO$Y9-(?@ zaowrl`eTS&r;h$O+c2v#FF^LFjqRsrhV zioR@sY6u}m81!6S;SfNm6=pWo?*3FO!HH?YEO35)2VLF7YIlX2F0-LI+49!fP-t~! zQrb)7+5$cM5!Ia@gdUylSNdo~b{_c(ui*E=50@`dv6h(mEM!#sVB`y3mlIj#+DJ&) zs@2LnSU?zf{TzBPHoqczC{t0#WEKx+P1aGMrlSwA`+xhsIPEjlyp&cpGB))>&w2Xy z)Y5WCXn#BZS16qshW*CUNg11d;*&eM)GOt7Dj8H}FdEsOP=1iSK(lKy`cyZD0y-2p zC-+fCzlY$NnZI%(nI>}41+tMZf6GYR&Y>RTc^6djm6JleSRI~Ur+(sD$ z$p4@piL#y83EYfee5l+pTlh?>IdC@)!P?!cJjCF*>KFK{D`6fTq$kK zG$$uuPERY}{&FH++ax}ye)2K1B-_Dy`bmWw^ixw}3nNDj$!6!ZO{@KEfgV`5PJ@5x z+?i|MU&G4Wthw5wFWp@_7`s7X>AAF5`Z`67!Mrw|rC)?=H^}jQZj`6Om`|?RhVaWt z9%eR#0*dQf_=(XtSLg6m1?Xq&pcUfM2JM6oaUE3|^NQ}x#D^W#mE}0csrk7#T?EN3 zvK+SQ>IH1LCa}K_j1+Cgr;!}X`q`*}RgXy;v7-Vx7f&vG5ZmbxEC*zj8!N$_>UP<% z2{o3&*kBkjyIw?iMRqy>Pe-)a`1IfNX(xU;9y~H{#c!1b#f)HT+`tR2P;W<9B+V{c zK1H0YS^Zo#8KEeK@Ibv%pdr(WGKN z*w`d-pD|wA=B+RR6mkJuxA`ojnnZqa2DFG{htKzwRXB@Mzq%3* zd8W@UvKIhH!bm!oi&8Um=7rrp(E{8qMbvTcFf)ic97& z!*!`kp`p>F93u$T0y9R-=BVvG>FTq%0!Ls<3*4+J7JXs~mKaD68nc`s{!7>X1|&L?85WgbO^sPk9;yB@(uOe z!3&t9+U>mM4&C&0;1tu&8jKo8nX}9xh1%?EG|3V?KQ+8jzEOvG$ks198*ib?9t}H; z*4Uu0Q{q@1An`I~VQM}Buj`0fiurkTFo=SPY#-$3-jmxx6A9cgTh?5u4u|+!LJ`j? zh(|-y9^PXsb}a=HNeu_PqZ&3fYs9&L+a8mSuE5_kny5%KbGq)TYNtC{CSF!mpQk7V<+;HupZk4R=JDMvKh=f{{0#Ow?99km%&v4H zS7K0cBatO=kQH))c4PNg_tS>k)5J+w`mwj~pjO}UBWPrJ*5pZ#8ylp6@xc>>Ur3lL z%QGOizmpQ_h4cUV)mQe+Eb38n#mrJ@>239{We|e=o%qDAXwLsoGKiJ}oFl@zWc?Sf zZB)EB522Gi#(`oztD8y~*0Im$E;d|cq*L3w(}(lXdtz{SA2#pkYv{(hT83tHXl3G% z(a`{eiv-pvZn~eNng3UbQas&U8KV()BV$*To#V&l7s0pdiLZ|LaOqJBz+L3{6?cvu zZ*cvTH0KB%DZ*CV%3kPGY?p5&rLcI_aO>rTpfCP&)1_3%`b=qoaoj32jy4jrFRQWx zkw31y#*m(0PsL*nl<40w&Y$JDT|K44Db8f_olfTf)X->!~pJtkI zr;!((Zg-IrHQe@l4(2sEP_nAyiQMd$Ey~Bb>OG@lWR9f@}L>6^~@|6a@c0g1fk8 z2)Avh{!Tr*X!qm&z2VpI{S>$FE3XJUEv9iGpu&D}h~itPun=M@u>jnjHi4B<#t+_| zfW>E84&Lo88B-58qip9jj=_c47~B^lrIt0@6A`+Ic>U`LyvWDk>5$J~+iMB`D zCKb<&@R@e~?{jElV^Y*2h^JS)_&$d`^7GYsh2${mm$_$2B#dT-+ft5`3%G$)rW%)! zD#D=h!w!H0ep7d>@|!evZax}Y&UzaJ-N9Gm>0l1Q zs4ec&kfj5MSKXimtmgI+{ZU$0b^_nOkWRVOuwJFGSOeXj;{pz!UZN1=?OaCuE*(r_ zerLw9zEie`!hoV(BxY!UXx9bPw4F`;UnN`}d~0PPNp(NOY6JJggE->$kBm* z5tq)VO{3(3a;<$otk^{W9WU-@T;KddxasQ`s|BRK%fC*Iw%eh4O02&~hS`7YTJxU4NaE!PcGMolo%@lq>FB|S}Ou^`te%g`5^gkuIDoL{G)Zz@Py{v3H?UB2-g0>ys4$I7NUBW{u{Mc6f z59JfnxquPvpIvtI4{OO|n0!DtKK3!#;MMrsX+Kus{_4#B@OdsF95e10>bbx#Y?%gf zR_)ys91A*Pmodw%h8OqdBhDh`^{!LfD+_dol%Gz29fifLS72kOt9f{hf)#jPE@=zC zU9%R~yMX%Ict_o35^Zui9O(O@&-r$DT&qJ&cJ^x$c46JFRWMQ$4BRkXL@?``zv=g3 z0^3?M=cw=FY2dQN87*}m3#D3N!(Q2`CM*rpzfQ%E5WTsWxFyYG$la6L>KJ_)`pX9^ zPbIMV2|FSbZU?7-)vV_h^^jJDt-d}O&wEtsY$&HcdYQajd8S2(Pu!Bu;s)CR$b8t_}8OBd_v1VELt(YZQE+C|~py z!Xmc?*J?)qVs^t*t6?`}&5}5Lao){}FFrGl)_uFv@#qiM`!6Tp=mpL%ly|Dh{DJ9Vt+4tQZ)@@0*Ly6k66Tjq(7iWk|x#_?> z9jGvpUnDW2kiks#!-Gbf?RYlo<*aGaaZuqH@!cs=csfQOzv@;L#+yWZ@WZ!SC2OxJ zgG{}-_-_Qe@Su#Y_&+xa*KP?*|E9Kcr97YOy|s>VaiMm{SV5Pe6#H}gLj~sVRHw3( zV`F2yYX_M>o0`zQ5y9r!t;u~o!84A|o%nKT?k=~X>BjliWGcM20M*y~-np%$!Mw~R zz`3jOc3*t=92oeaB9)pLZj(G6CX5E=)x`$kH9U7)-)zVs`F#O?H_yzfLbGd}2UFNlMiXNf5JjGO<_f&U^5s z&y^bTkC7GHI2;B=oCFTA2{WDjpyVfz7lHDB)B|*R%Fijhhl0?JgF3L$dhQGSU@~bA zqYuW9xW3VFpU!oCNg!TM(v-H*UN|{rmg>jyq4Uj>SlUpeRysMkYtAR3k9Efym7F*S z_N0;yy2&gK6g3x%Y`3~YsxRl&R231XP`~4D!PemSMoF9&>sUjg9pR43WmK$HP}r-) z?4F9S!s^gL9$!>*Jd3W9ah%piK-1Umk1rbN&i^@wdq; zZTvKVcOIF1^B_CFx)j>I>1D| zjdCcwFx2t!$@+`9mO_AUa-rueezknmbdENyj zjF4l4g&CH33s1+eP+_znSyxM)yA--_4>m|+CE37577QnbA6VhaXPaO536`m=J=mna zeQD8c=Z*dj_lD~+t6XodVzTYAC=fXKeSCxZw8~O%u3-#CnUd3fx81dHICsf=<-_Fz z4#cT%I(=59`5((cWw|s{H>4H_or2e^7%R!Q3J&TITadW}*_)3fK#`iDa#oNPZMeCN zslcx&)l4*3i&j}m_b5wA#~MS79ZG-ko0s;H zt(?mC18!!i9^k}D^u?`;(7cA~thV?;^ z0dxdw#4s*#!DpZuoZ{Bxxz4F{b7XvjBgoxt>8^=G`=x9s-v;cV|4_?qzC>(L>WMAB zwBLHL!1fhoTplJJHPDybLN6Gsh$h|558R2K>vOd#64wg825~oAa$|qdr8SRvN6wl0 zw%6M2VT)0IRR;KnzNVtcT~@sE9LK@b$NzJ4wQ;(a5=XBT$A zajj-Oc(b{`tLoFlM)kfqmut;^#dCW5ZM~+RnYSg+6S-17oDt%1Z~?mXrNhg);IrXf zk#_Sz0kqwP%^1As|Hs;UM>V+x@uE^h5a|daB`OLcO%6zvVnwMc3TSA86loEVPN*UR z3M$g1SwVUyLg)x6T@eTbLhmI&2mwO5`%5_Q-SytN>)pH7`}24--`=xlX3xz2&2MJ5 z03GmF0wt>HyRdUwnZeSLlSI^Bh#L?u{92|>s~Ij?Jl4Z&XoOFPjePMHaN^#nJ^g>JY+u`E` zwYVlebt$WzRqKY`t#t|9FFZ!^Z+zt zF{n%8hA(%Pf?$-;ZTWF^yS^liWJP?cBU9VAGPyknmknzj{(B*CA*rS)QM>3fcjTU7 z{@=J*{Y{=Gqt41_pQRB_{bheOI~CWO9Yf*fqCCD0tA0!Ifq9@WgDjZ#&X%+CC|P%6 zeG-geT7;>MflO1pSh#MrA0`#WKegFe?!YB(WZ&A7 zVf$Agp~3obN@*`F@_7rDf zXE*liL$jsggEFy9i&ND*?}`#vUBuwHz2$onVlam&UvjEG3j*=^ujcq!bL}}}Z@ksd z!4{0e;$_x2^MUl1`zeO82<9G*AzfPT{Z{H<>K%^B_28(y_K*~lhV8)` zOZ=7b6*iu&&vf+0%An85o+&MucS>NBM9L*emf`x$kdD9g9ZLWjYA9~{LGNj=49;NB zh$GDzoW~-i9|)I9hsVAs#jzC+{o|adTPoX}hm!4!!@JGzIIR@B*Ayy52ky!PX~=?d zYDd(22$y-H>qTJjCb%hF%R|o&ew%M^FPVWaFya3+y1UOXn{U)F54v>@mE0Jp{Ctes z^?Qv`=hd$W!R9Ez?A6ES^oUE3H|+_7cn>lSRMo-s4VPU-q;8Fx1lD@6C!F)DS^J~i z@cXl7q>A^Zrn!Jr&32>RrsvQ)ZnW4lK_=^@aXM-()m{*PaUO*zT0{uCoj5fkJ^nFbTH1d??s^UY6b$ z61d@s4;H(C;em}b>QS4to!*VUMCH5FT;4f$q1*&ua%9Cd#2k zbXIyqs>u8lf=d(95N@un_H8iy?{?{TGsla!3l7~@935C_s9Jk9%#xz|?ii%(1$j0bD`%1@@`7&Jkl;26yisi$(( zI!~Ap#}`~4SRFT#+ejpV#WZXZre#y#Lj6t$XjM77vJGI~6K_Tfn}y)*xn@^K&nORS z4|~0u+=}2hhOyC4aF1F~KUWn}$BWHD!Z_7X5872C5Z%3gr>eL`e^)8cLA$py~ z?&9cB=lAD*2XMs~4&sY#J6WOrmW^3`^#cnKIsHs>;8HV@>&*Lxz8hd!EtF+B4O{F1 z3fP#HcM{Nw*k?j(-z9DGRFh5}M$Ks3ea@(!g-=3lL7S%@DBkdccGq<>C{u>7j=XHK zcx| zol;j>H3K^#scxM)PxSZGi)OEsU2~;DO4h-bR^EBqy#5Kn{k!;r#Pr{m&Ro|2^dxfE zi(i$ftM3g){*2R^UnL0tpzK|Z3HU$g0K1o7xml8a#fXL(WazQ<<(aBDju3?n0#GV8 zFEc=b4q9FqH9n}*8t?+Lax$Mbn&E|aN!dud0j}`&2*l1Gq{fu9^AvUVgqLkcsELNT z!Eza*p!j<#pF|O)&`&ii=Gy(3`+PY=;cLySF|Agyf~>!BXke?#he=!bC%$_HzGvt&pT7P z9}&5dUag046+HY*Q54&X1n1>2y!7W2KnwFlfa6LZk>uTE@U{6s6%S2NYrHfYl%~$r z3^|$Nb-Cqh9yt2WCrK6&>zgJXNb;NX3sXK>7)ek=Ws443dS$-ZvKaP{bNOOlLn2|%`}*Y=K>ZwwB@I4c)>rnDo5rjD#&ZWKVhXkCYbfC2af zLAAS<$ic0Rbb7>Q=#1oHN;lUIdjWYxH+<++&`63B4CuN+Flz9^Z*m@bNo!Z_K|1^q zbmoB?!n9SOS4A)X(-Y>us-$+F{vQ2jc%IL%C}&Be=Z=QjUPi(5(XpwBA$vh!hR1#+ z+*t5$EgWVG@FfK(o%U)CHgob{o%q$z#G`1rl3-kUz(fZ^*q=RZ!93)HoPF1qK(>KO zyzNWVz0C?@UXc;uxPj^88r(M^ZSEBv!Gk~b$ew1zC9ZTHT+=;_bL}7QnkN9?ReF-L z%-X~BekTF|U)6J{4KdV^pt}YzuBx!kq36~2O$-s=e+$73v>=CKcgZ5tOMuH$+O*>C z#A*O?c42v;G~i_%vN;#QFH}7}|GP4kt;s(Iz8l4aOPYykv}zoKIGDN&o>RNvjH3cA zS5Vc&+)>aMs<@HTA#2+qw^^k}kDqMH>2-BR`gI4Rh>gz>jR9wRf4w?FQQ%gNWzu-W z-up;}6yREfUo+1n@l>`u1zbTie=k(vq-amc$qxtezVL5Orv>beFcq0SbDkfSe^Ec> zu;`JrW#iJnwE>!(5ncTEDN;FcJ}uC);BD$eS;^Il*O%oh`cN`LA|BaS=a}$GxjCep zg~qch%`kNgXC&MCWf-K^HuUvgIieMCrlG0R!KgQwZF!U$E^@@_oz6yCrwQFLCQ2{P z6c^)ojl}w?r*J$3#CVj%JX6Jpn<{gjs{!CFwqZbCx4=%>6uaGx5jxmLZP0~-s{jL( z|8Y`2z3e7;-r|b<`eLGODj!6N&To3}r3m$m{AgiYU185bzbvV?>&}Xz$nw%W7cMgc zjo51wS70bss3SkO%Exbb=}4?U!i3c-(?eL4(b7IMc!tb_`?0mEVCLcYO=l)tE?2Op z!>8`W$=+sPkhH~jg8%a&Ag&s)hYv_m*OqX}Vs=Blp)@3mdgbAgMg%>sD6CTt(e8>x z=0T!7&JeG|QdvMa?Tu*jGp;fb1QU*1qTHphrrvk&prh4_E+_$RCIKNvQDJBZgoQC#AIAi>jJ54e|@Zf93-X8es3 zQ2NINyvJlXk5S2DpI$z}?G0h_Mkjv7T*EE?L3okB&#K_lmU>qn)M2r?59g5<)LvST zG>v1?`pJqiGsDI$k{&hlWhBNm%B9;|AoiWP;B9_K3b5Mru`*9W?n=YrCT%Xb;sFo^eQ&t_U`%(0KH&@L@1>7I zkh%zpj2yi0UVQ!Ckb9kqUj^LH{vcwk@Y@^cuF$Y}8R#Fq3E*Fqw-*M>-JlpA;itDa z!2@nHgTH^8-JlgccAkJtF0gis-WB3^HYXD%UYr2so7YqlKmHS4THLt6`Oms#Gf8Gv z{=3_oNdseI_@pPxP|wf-dI6enoE8-H^~8VnxV9YCsJoIN?iFS(^1A)KH21fQ)V{Cv zuv!q1vnh*zL|=Pc^vwc69s%gB9w7S?a8qv)K>Z)`UfxSdtT+; z{djHIl9|Ma6K6Tm4y_g=Op89{)%~ag`q}dZLoj_q; zjhM3W)S*-PE>I;ROWhZV?G@IWv^KxYMNw0i4V>voVcAZQM0ES)d zVbXBiw*n5B1g-^98zZaOe@X&EuD)oHxT(rgi9c&?7$@>56R8y34F8-1Cqu({m&@+t zVIzf#JE1&)a?tE?fgOc%FqI2)uuf&2yX-k}y|a>2aOilQ3Qx3my-GxE2t^R*-)J}& z0&t+vx&Qs&kCZGRXc1x=UfV!lUf>8aWv_HdUFz-XI9eIr?7Md8vJs?AIT}=vsDpDR zw(WDrC!Kr^B@5S=ac%c+W>c1MOZP+QS(Bj|t)mZ!l4XGbprjY;b4iwZPHgmLrptNA z3b<9oNEMr{JQ7MO3smV{Bf)?z;y3^l{(l^4czq8`{;iMqhV@dEFPNY`7Db}<)6`T2 zR6G{at^6Qe080hk*O9>}GbMjwntI&TJHlSAAcN5UbefK$bI143fbRNop6zwYHOG!Q z5;RDiZe-J;^?=G$Vh`EQ+;gLEU0Dd3x};$i(}xEhL#_R_LL3+ya0~?4@rvGHPYEHy z!4lOi;g6L4^N75LgT}4Au;~B&z1(5mtnBp*z4V1Lxx@FeMbbe$X1;BaRO%KvR0>p+ z9^qiCug66hoOOVfnUktSA)Tgl0{i(uo`M#2CxNxJ|Eei-vTFi0X+OhQ0@zlUo$}h_ zkdal_V>2qJqu7ZQ7g5XTSUi#v`KG*a=|PPS`s9``Wd2KX3>caWn3PhnO zWTLXYTgj5IG zcBlfK`riV4Ur^qJ1h1az(51Lj-L6>&& z&fka9)pj4{xZ~{;zxgpsZE%VXzikBx)%YuJp~iT{Z>itymi$0NDuVOnxY=}cM+(Ba zDptCv=dwj?3bZ?Y&#>_r_(CeVvBLObs;rRYp4n8>xG6ET+$>;GA!8XaH8ohIFWDJy z)-+Rh2qvJmGpip@bK~6~_b+2nyw|y|6^FrJWL9mS3YbbaGURL^@L=mOIOPv7LBU{4>p z?Nh!Ra_|bni_O@G__OK}(k_Ni!rc)QZ?xns>y!HPhQ_p$kC}{_OzjFL z_L%4_tq9$r#e$%>eTN@=>j|*(&ZCt*-=aUdHP>S+et!&CL34XP=pec^zbIGpzr1uV z9#Ai}`b#> zt!Yvvz+r-GQI!)z^eX+!h0AR&n!H@($K3e*c!2Nf*U3|pCQ|6RR)I?*XFPh65!DL< z9Q5WFe<`ewt}-y!^u2FQR_Y^{I>{)Bur}{T0f!|?kLY}|1cufS$NnK+blM?ts%UC8 zvzU*4@kne<7JUJa%ml05^BMi8nQgcjG6mF7$9b#9+DHhkoA2i4^^DkftXOg#S3kR>S%&~4fZx1SnSlqqj8S42fKF*vzq4l8o z;LRzI(J_nfLvbckYNJDc;xormK0ZF_*s)pvVsjLQ74d35)-;70EWM5(HA;5=KE|w$ zvmygUq>rC0U-)AZ0Lps(`LYUQsy_t%WM~x-@bICc_mF?DPW1hiL9t?{OD)tG=mFI& z3fF`WjEm3qD!bSLt`l%0ZJl@xH;xYjZ}?F$&un&@Mv}iYrdl;ewad8N;x!**9Hos{ z4BEKe5no+C6M210w{O6k6s59sY-(5DExFBO=Q*ka59l}NZW&ny-*PTQE7ZXS#}K5h z0wIAwW|J}cF->@~G$4fSs`KMVd|ddJy4qVaP_9;zH_WB(vu5Y@ea=n48iTMQ)mmps zM>HRgDIe!#wS;s-<;#U#B}lJTJEA!$;w=yY_h2m!yuUmjG_Y>LuS}Ww@5h7cj1$gk z*DSCBHE`t5#xYa&wH{4Qig7G31P${3_953l)d8OOcQP6`lIo?%TgZCU$Rxmn5=YH@-=SXgal(|~WLKCpcp1&YqOf^A zSdy~f91z`oa9+e~Ikq&8nsN}G+AvVM3QE>oKO3i0>G7EEp;Iv5l=3L>A4)N+kEM2<3E&7Rf10?IDhrH$#{La?6x&C#n!68XJ)`> z_6tX7CfhmpsdMk(3sY(vA}^^?Oc2Zdx7hUl>syFsRTO3^VzF)B3YmtX-f?P-Gt9>E z=DT5|qWQZ||9&*xSahC$B*lP#TY%Wv|1?(H0@w=(E)4WN9por}9qU^?rUVunWO|w! zy1-Wc^LmQ%oi)H^eQNal8ZD^mTQI3ku~fi-MH_KReYX)6W}Clb^(N7jD~O{q@_EW- zs=d+#Ik8t-<;$wOPW*fVO4q0BC|4jSKOa@$XlS8gFMf;WKOL^%lYEY^9h@?@E()vf6@y(Q$!8pa7>OM zQvg)^GM!N9P4YJ!qa@2ar>MJC=cS}B|Id&>^Lt+ej)O~pLUKsMC~?_>x6wK1^0L{) z)4H>iqm|IG;HSqA^d&MIK*r=7#Kjat+0*&N{UF@ozfKNM?|BtZe+-b2LK@e^xB%)< zAHd{7m0Y`H6n}A!X-&$j*~~UiYRbO`zDcQtbDW|UYozzXn9uJZcbPYRlN$6HM*(+v zpiZSh@V5$G7`3o!99t_DIYb*iQZ+Yw)rz{XkXre}IMQ&y-V;!*am)wKnh|&Z=_9K0 zx9tCR$eaKAZQp0;tN2#wy(>M4?5Kp(RC3Z2x2{>hcoG4(upZN4>V_EhSMR^IHaiMx z+xP83WQqLF^Q-9){BPJVohg1S8=7h6!@h6rDO&{lhjO0 zyxhTMyg7e2P_$aS{5EyaLJQNv)_kkgX`rms(kJP-{0#_p$$NAM4(#nQ8*AzRI}~uCD&VJJbK*4C6;r;#kcSM3{2qzMH{(`ztqT|q74^|+zZ}^LlbB}{<~k-rXC=?D7tb6bNE>vOsp!ku z$ng*Nryl4CxTIjlRb-E-u%0wKKYILP^_I}IH=&YfXk9|!|u7~rmdS8~rBuQ3%gH-A?TWuO}cgTC^&DVvPf9Fy12I2Q6Zn$rap*~Ih%~SUx zqdxaFDQ-e+UfMFzs_Vw3zN96|=?1Pa^9_a3%vYUk3!4K2>$l}kJfQ|4jqHlVZrdz_ zd^IlO^!r7HdtvJr`Ky|KDF)qzjnulc86Hw2T3i&i-8Xw-t{X%e=khHJ;-qPJd;l^A9N^$J3L9MV`8ic+Db2|zu60$%GB zrU)uk`gx@wB_cg&{S@Of_iCMd}1qCr25- zY=M-d!KImcV*ssB1pOMam)=19Fn}aX^j6kLRh#Qn@cZLH3sbkt8mOl6g#JzluwT}@ zQxCJ)pk=(-U&dh|G4OQ%j{O!RW!>Nx#XvzzOxc7=wfL6t-B~YbbaMGdIs>TYXjjm! zP^X7ees2*U4gGVps=g~^fPuB(J4o={Hu~La|Ev!L>42Y0{BVP6qcL0w06uD?QXCYK(x&lk;c`t@QwEuic)ZF?wEu>8(Ihl`lsn;=7Q=>;5 z3b{i0&$I6}9tV3a>GbE#}h?8 z9sy5k;tkH{HE7~i;o|;9{iZAJ875reWSi&Wz&$|~kDLn`6!URNendS>ntg$W=>MOZ z&!2;Msn$ehd;0=#pRQtA4^ci?OI@hSt;8^j>4;o?OAW`)QZ@7TWsQ?mHKJks0-ysK zGfyDGwgdE9r*6F+5rlVagV)Y%4?J$@_8?<1i((g53i z;@)o^FKPR1u#p1;hH%!4V&kVQpp~GJC3ld4U~S-uQ|g=}dA(GW>JYqWd)KizbMa9{ zNWY~{Uf!U(GInD(U?2|ubhjvP%Ce_!fodSU!{ivc5?tuO%79jKeoylnV=6bT+Fosq z+-X-BuEY?i`9hrN6Hq5*5sn5gk7FAkf#;u2SEC+Pv|bk5t@k6658=oI(oeb%*}>c> zM*2;deVyjCU;JtR49J(b-=4hqujPN{NJtt1%*;0z$&14>#u4s zrIu$H8feJ!+XH|W8qLN$nd&_$XU36b7yx7iNYtdCg5@1GQKa z;89gV-|PH)U~4xmq=He;mOv~lNx!7;H^#+G&uOERJR8GQ2AzJ6I1QQO%{pM#v?&eP z+dUY8UaH7uJpGjs(mb=k}%5fxULk>>ttqCP=hLV&gv|pKVAG@l+4D!aDhPt|2R1bDnx_V=* zsVFv#Rq(xElP6O!3Di=uK5Irhm#b7r*-Bl>tXs;o_0nE|XbE{_EBGdGoF00R+dLK< zSRGb6IQy<1Tr4#wfhD9Tk4hGjL1Kq7?g>@XsBvkgkORuo%hAk?)P+P&ws$5gb&ELU zKQ@5et=VlWs{aRpxfHz?Ph|}tz?J;L(>`;3W{J~u75d0bSRm8z`xVtmM?5D8>dEN% z#GzcquMFr6?n>Q--UH116fJmnsn*8lajkCz$p<+6`+q8TRNv0g>74OO!t4M_w`&0t zebsUM9#xI;-5}8ACE4p@aaaZhNtyeaMGFn!5b;NZ{-HjzYPH<5l_)2yp-eFvDe#Mulj_jU(E(tS zYR?;C%_fO5Eyz!4vK$PhxD{~p?83mxUSR|yIVlmryDPO<@nX1H)l$fe{lHs)4|qt5 zJABK&!1r)nQr*(m`kOY;=00^Ettu2xJ@UNvAmy$LtgYZWdu_RQVF`q7(O=v8cAkIP1$Sc(qg@=SBSj?}cpC@q z8emUWc>I!;l3Nf6>LBtvpZVM#Nz@?c(REFx^P!M225LCaT6HdKv3STfpxrm#LMM;s zHS_xT)~lWxC=pIrfIl>IqR75;Jhe`He+^pD8o-9MhHdVGtJ(NA?fXe2C_Vj-yO4Hg z*&n=mXGQW#J4Z}p0lMCyMhj9pJAuNvC&Bo#Jja_N<(7!5IXAFJo2gxeX^#1wqv6TKFDr8&56=H|^o3n_qwTt`lvraCL_k>8rITvkEl zyU=|UqM{W0KO3D-CbSs9EhwIQkwR5jb(LEf+D;S15hsOL)7B=^G;vqf5K{1W-;4d+ zm;9*@YPBol&DlU{S(m)#Gy$n$$auy9s{8?@jq%+Hulb!J;O;>uok78OFl0;Pg9fcn zG%3XsSMCw)Y2KPB{snk@pvRJrTP!?-_I^E~fIjGg03ad92T*!uK{$agCcCa6ygbv6 zCFoKX5A5VXw*V_CIMw5L((S%ZRX5es6(1Ht1o!sAAk zYTFoZEaL#-Drgq*WLet0D+|TM+9nu4sxIxW2gPor;A51{8@>lIpkG7M9No(^&^{D! zUc%rH5W*M14cD6h{-l3HrdSy30TOZ7`QSw; zQBg(jafT=+%M#2&OdScrCqDj>w=p`-wC)5tD)~=PUWlLz0o=LYo{SuUjJu!17`!>J?=Gpu%No@hKzU3t zS^77+;rN1@*Rq8;8X9o57Ir-Mn7zt1{LtL$FgZ7X5m#efn5Vf_g9M?;!rZVQp}ofmHFOY_@;0P*bY25g;?WJ$kl-uL`~cs7LMA=0SXfyHN}xk5^W#Eh5W+ z$XQBY`-iAno%q2P)_cEkxfLTjfUo2(ue8}9Hyu4*S8?O~c^dT-17&BQ;mdwB=|_*K zsuAuSujdEUM(==Hm(pq3}`QT z)0Dsj zekIUWgnsR$C~z3Z&nqQ3SVAIY3&O~#p7`puGXWJOE8OUi${*YjpE|TQYy`l8EXoPE zPrvg|mTp3Pq4j5rSIO3B=Uzyrfb;afWGe0D!1Ki3m(Z@DU~1F5EFy5tA4JXnNEEx- z$ASkm*D3Ivn-wLf((8~6s9hV&U7Ie~sZ6VO`Uje599uRk#|9t1WWfazMO{T3;>j~! z)dK_|ujU5KGKVMV z9uH*l4kIpLcuExmM;+QIS==@!dt{Ly)8H`a%zTbMNEc{?!e~NI7>J_tKm5R3+aiU% zd_P$WdV)SlSRkw__NYN+kOo8n5iblL-GO!PJDrGAV3^BY#R#|b$dO#F4L?R)W9ZSa z6J6p#_l{Odn|#037#Fof-WfW~5bT+cnrPvJ+jMP3i02tE+i_OjdtRma$0n5jXBpk3 zjd+PLY@qt{9l9Ne(5Xqsosb!stG4|o@RPiGbi$DQZlZ2~Z0-|ThTV$TOt)0GB-h+z z6S$TwKD^I&qH5SCa_1>nUWQeT`9o*?k4zh4y zs*Em9tyBSC?Q~~LYB8oX;G-l_y9A6tj%0qL?(Sb&XuXIVZ3t5ss0E*OzmQaIK02hp zpqx?Un6Cy;GkMg9WuE^~l*`ujoLv8K_lTYb={GHAwOx&=9f&G2RitO-1(5J7fQP1*8Ye@z{t?rC=0HN8jb~ITiUCxUF<^ zTu*t&^OVlQqx{Fpw*7}6iBBXpNcE*}djW2(DwRkp^XGYF@;~ z*YC$?)Lh1XjQ!I(-gH*iSFg}-|L!`nbi=-YykB`!`OcPoSHnCQWos#0?S#*7jDng@ zTNI~jGO&K6t+G=+B3|Uo;~9=;)1;-XHT&K>PHvrs>*~1fyse!Nb?j$Vn5RqFQvOx*JV-S$=q$*1tgBlieV1(@)-luR?&; zV<0g9Prr^3my)~Gy=s<$b14gU-6Mr%3AukyxZ8=BaCK~Tl%6rqiIPcss+tj1^s(Q# zsYy+|rYvuFk`dR`BpcOptZxgma*{NM|rVGNs=}T@AR=$pqp56#2?!tEfQxqpuSl<5%)pr@M{Sn9>I-<4TBx@XG?7lDW6q?W zz}GsRFK%xCT0!7Rk8O=Nys$_jSKmnC`+T>ALo3Y2 zsvUE})lQSH!`u)*rAv?cZzzJSp1&v}nw_poE)KwJ)+7<5^h)`UjaZ z9%A$UK~^Zk{NViJ+M6ZbaQDSesMxH0)IFngg!%lRiLI|saj9y3J(v0_12Ui56idCD zK8YTfn;UDrTcPYmCH%7mXp#? z-(*4-X+}WY1>OqV}PK^)&+jq~pw@QCN_8UQ| zWYX3lvt!hTuG>Rxs>ZY%*Y{N_7sZ#IpC9J9epEc$A*QN8Xv@Ti*VHQMdEin^z(!a= zLi!V}6v3cnNbe_~ccEy?hpW`0?RgyF?+1kRe_*p6WZzwf8^AtfnxIp$&eLl&bAIdY zj##tl4O{oyO74Zla_;Mu=0)9id12YgNrpC2s$W-TiAo;ovlp>X>7S;Z@ba9F9vMmz zRNB9A8=(tV>hv`w?!&3ORW0sO&x<{+iji0hQ8q8h@#~MSu6*MM=7tLu`Ne@+w_DXJ>AYGEb zJRhsZo#YhWb?jjKQjd+Si@S+}qCH)LdtYoH!oSNsu})9@7(f33p-;)xb9QY%&NQb- zo0|FnwzVF@o$^9CWjsnGNw=>`QjqL3<0j#(+$k?eGb9SyM_1C^e!X8of_|Yn0Jd@D z7N0ihEZIh2S+ox*Ioe!tlZQ;LLjFG7s_3j%D=Z?PYi4*e zskt=bXwdKEv{li|Vx!P+B5(L`s7haE*WHESFU7T|OinhBqU>n$Se})0``RubnKwpG zikoLTSFT*A$I~KchZ#!T^Ow*ASjsoB%>|}R-RWuyv5T$0h>{YWC^vT}M1p1-vcFJy z%j3LA$A4m?q&5WcoU^3MiEQhxLEFwfye< zajavwQ-GxA)pibN>+Z-@rBj)t1{itk2MAVM9&^5}()Sp=Th@PfOMn58QwkRM+vePn zuR4*+@0%uyTGDEk7Ye$^1;umhg3E$)J3|rNfE1iGFQ9$+&~dl9F1Z(ctX}ChFYkL# zccf%H@+}NrvfFe{T7??-BBSN$RHtl>%~@!NJ+Myv#RGER;)}mB_|32S0t$)ux|5xJ z%S87~{x;{LS!BdYL{B5z>oDSsrM~qS$0om#!SD3kM)Y`>#LiMVu5zgU{OVl6mraI< zG1hHQk0>{LhdErc(`EzSO(kuB+&`|gnC5sNrmO}!TQw1r%wOk^@==I(qs4Jam|=^k+k}Mi2c({R+F1e8_f%|$(xzuUFjW#y4o5t zn#)Tj&Bf5WO^2X~PL!;=% z#-A|hu3n~JZyb}Z?j#^YPKzU=MizIzamBaWq@<}Cy35yBcs>7^X1egJ`v3a0MGrl` z5Tv$iAgrwl)0S2hIR7fV+w7$w@;!shPM~f4R$qU7exY^Ge9Jff7ac2|40Tr8h0oh8 zA6u$jn4JG-#KC)x;pbw-b~wu^Umo|$`ihOZrSpd18~!ae#%gJR5&HXXxB5=^Wr^bz zYX z>Oei!tLvkl9>CG&<*r;H3{UU+ExBOwjH}C#fyye?9X43}J*DX{jtvxbPGzWt$xE`3XBp2iKm&=Ouc?EnC={Kurt`TE2gne z5=%Y)A})GBVsY%sgnrv~sp2jZgK8bMLk5f9Vq9}I7hIhKPB(08sSWJFepB+0-O z6{KhyiLpjznH|YWvR-u={#!ckb3L}7CQXWW(&xHTZ9n%Z@`-}NlX~j$>&sE?Q&HYQ zBZ8mrG?fqH9rvnW4!lSwzXyOEP0`Wlug4bO2rjC0PdP&^(8e2kPWq!@nm%#;@y>++ zf$VHs9`q7{Y?UTG1xDGrFP4K@1?g&`f8_et3L`d8^N&sup6whCknBrcNJ>*?b(#~v zmQXBjNd}biJED^Jv)?f&`{jN6Nnn-MTC0+H%2p?b7`ys-XLE2o0U3_Y&`--din1!3 zpNnzOt|wF@$z_!a7d7poHplNb&tXpMg~_{Z+ZFXs7#OFkkv9N_o-r3!?hb}(nsk_W z(g%~~c4}sduPf1|8>Yfu>##hMXPJ9nHCTnp2(pmO$kL^IGTNK2rk20DX?@$N*5q-0 zovrzGuXgN_Xr!uT$V4h)DKLLBlhBaY8lM2Xjs9x_$Sb~b4pXJ!C90eZA_dUTVB zH1)9hP^Uy&4l{yB!??(BPF0>m=M_+%0y1<&uhGhVn0C&-YUgyXvwFTfy4DiOTkSAB+|JuddFJ}a))hVm8088yJM6789GhIrD;lcO9d2sLaKMPW;TWYv4B%L@CiHej`4 zAA9(1Sr2d`G|P}lgQY2i?MhX`&_LZ_SvUE1bNa7zl309vCs%KSTQ=d>oP)19TMDz& z1LbuP=C*+lfdD-6f9YeeZH(7^Xh(vD!%>2~!K?(b8yERecipdFmzXJTBwv!E>g!R+ zcF zs$pzZ$>pCyM^u+oRZxGnHLN;DzQAU3_yquW%OU6a@1=qlosz2aOj1Z?^%Xv>IF6`t{KR{&L2yiJ4ax%+HU`$BXM_3m~MMI<5|z03u~&m-1if)06WFt!Hw= zNMa}<+@VQC(M?wm8YAyU#Yw6Hq^a@WxX$XlkCN(O>JZj)<_{Byk)3fQ0h@Ep zt4~X3cUnfX|3MB}d2bU12g14p2N}n6XxQh2^`I@LrcV1!I^FYf8kRPZ5`}UZibVz{m zG+^OHe}BzA`I?u1{;C1KfY)jMk0*yJl5ZD7ju(FRzE+%HY|oMDskx!o!aPSrUFsDt z$r7uyDnwxghG_B;LpCz%6G=)%IIH>%84zz(xjFj4-ig{6O!ub?7l3vgM-{bDHuNQ&jO)k~b27k=V9~}t2yw%>O)K9tHAi)%ic#P0d%-lr z>C5oAj%`Gd?QGRh#TIatF7{&%lUo-HE4*snWppTfCFDoRkY$oQUhT-ybA$OKOAG&< z2UKE=Mc?~`RsGB&xCf{6UYq~pg4UolF;U}2_f@lh7!^NN0+Y;Vi;;@i$vY-Ww8wQUw z_8|8eM7xRgu<7)&Xgzmd@qU`?{Q56jM9Vs;fAfTl*ywwd^dC7R7^e2jI z!p`(E;o=c{_lS+sJI!w;Eq0+N5uNLoUk#qc&%f^Xdrl zML&~~-OA&M=u*j#lCiVc9|`*I{ufHqT6hN{d-NY)-itL#+B9rl3%OgfGe2)?i~41w z=evvLR99T}wlsOyGHkS!qnue|orEAvqVb|V{UwzPAb_O!l&JsAlP3-~EB}^zikSvE zTh+1bZ84c1=5G&`5gB0ZZAuD~B z*AbfelgTe7ZKFm~u0+XQ;rhvU{?+ElA=!BbTWjnyJ46H94Kl%Pur{g--tEJq_k{q`bo4oGd_s0{yY@JhbY(o$XCcilPs4EIZ2UBjUC*5vc&?YEm`!l6FGS@&<@7z{WA8iuN`c3hD}(qkZ#%&&Ypv+a@4HF;Z=|7q z*)fwgvzCl&>DmDkN&VgB34$qX2cM=e&(AE*_y~%d%BG9mHkoxKO!jSJ9(5CLY}#1c zSfS7fw)A?X3NCZpbwLNvfwZU_VTG%sR@+oZiA;>Int7eRy*JM-0Vo$2OXAo z@+>~~Pe{w74M{cuVbX?)mP3w(*lhu1k(cbVR94snK^uw`JUTmG-FF5}m+L#O- z&IB)gX#UHn*bqRv+3Pv{m4#h$*t2W;Zqj7yt$`=4j{!NlpFiFx3#rvFyH{zd!i#N< zLyXtUm3!n=8EzT-3Jnn5bLa7L{R@BAw=QOWPpUZy>(NKb*UUw4W?od1Zmk8#pVS}`Q83%32oTYWCMtnc``tYJ?F&MmlCm>r>04~f$ECI z#QaTOss1Gc`S5PF+$hy}W%VRSA0Fgk+*CFSG3|&?Oh~H$9HP%!`tyUK+m~cqj6Qmk z9`CwjdvR04j^_}|(}DD$l0jX%$_FAQN&R*)>JrTiu~(9``Wl^wcSCpc=4=_ztx-Aj zVBlqu)!k`R2wG{X`HFCbedaTAq2YNxJPqV?aMLYLT(i4knVLo(riN8VE9>*pL*TDD zF8yaf_SVk6ICtl5x00S#TZz-&?vBT?A^3wNRms9`d8Qt*lbAoho^NI)cH1;eBMj2I zL}RVw-4_<;-0ZdcTS~3uUB_P=fXfhf-QY;>u8l#O8^KsfzCryZX!ZFm=~e?2N;vxB z9YDS9!ev2uGKHSIlxV<4ssR$GHA=rlK}Wu=io`hs^7o}lP_p(s^dCeO67vHzdEJty*$L=O>=%*gi3_pyFiMoU_cE_?wPg+f@Iq9)<}y0(W|$#G9&}v5 zBC=Q)z2$vg#kc_QWo?3N<*@g3N{;pW@zN0q(9N1f?+3xl+RlQd@`3?klN)dTMax4d z*H86906?dqp&{GUnW~CbgrnqqUx!8ub0whvXQ0D%;Y5IQF<7n!jrZQt!*;hbPB7|d z*D@5iG2}aWpe}aYRCj9cEnd4r!LAMmfR#Ytj{z{O{d}+|dHD?;ZSm=@Qnp$$y7dTZ z>oMpHyI<*wuwye&_d)MJZ_RW`tvSQ$(4JqYeop~@ZXZ->knz@XSR*)|EMf;0Rbv1PeF zfHb1W#_yaez%ZTHr}g)~)1z^!42KfxSU0Qy4NY0f0lV9uKD`*4Y(l@I;H^nFD3~3H zfOh#aYJF7r5WS7&2^5>fr|wsAC+uI>&x`4+q?lx{}TRsq`5$zd9Tz=L1 z10pdWz{OU7iJ}cd`~Z&UCF6wD?WIQNI9X|h+KB0oSHiLY!IQ@m(J+AX2QRCEw%}zo zq_6of1z*X328?CaNTaV0S`YzbY`Cj-1M{4f+Q$Iw+BpCQVA^1G=jwY{RVjpHI&Vbl zjyzCGTo+a_SJ*m`di{PgPb=rvfwpC-d0Z`}F$pZ-0VlvTRqySRvmvCcwZJzy`rh!s zxLSYc6+BBqh792gHnyAi5fV|z<8iXXwFZBKZ06Mg5ke0DtZ1%egFIIr4L{090HLEa zz;_dp-wAA*(Ro$@40-{_7qs-r=L<^~Le#^I|u+%!(EB-j1lQyG|W82gk2Nf!7|z zucR)kb6>k+ahX=cKZ*Ilm$ z;gc+c=$lf9B+miP2^>Gwqudj>@h0)ZsR*jKwU}b`y?6?!|EHbRzB4%xBmsejT;uNJ z?w=HfdJKY20fZa+f3)|VVNGq@+JI5OLY1O)5fqgs0wPirv7@+=W@w6llz@OVsR0!$ zpwgsUfKBg-KxiVM6se&k5I~yL&;ta*H`fa8bI!f{e$RKF@BF;}P@fRinq`bJ=N$8W z-+_#naeV@TB=s6+49XyY-Y(L7@b7meIWXxpPI0{ki0vye*a{l})&&4XuL=}V$=VCc zE)E06US3Z63ZcsX#4`LdB=XGw=$p}_lg!X4on~5|nb8Vn)9*6#p1#MnEae%MA6Gm8 zfigL8s7Q(yyV~CP?Hac#rJ&-n5aesI93RBY9M`+f-d`x);}GyXu<{uByW{qq5UeOnXQ0Oh&{Jl5RS18K zjvAQPwd0)CY%ulEl(xj8rfLR=54VUz6Fg{1V`|UGajQkHKu2R|3sr-osbK3kb zzeNepI^exU_J(eNAl^Fpi3xz3^gy`S)e25~kYR;uS=i+v57JSY zDK83@g2#RL8R#o+FdQ}b=_$BAG6V&?r=4`J3dy6n=*V*(22}F{(X2bzBi&K|z#e^d zNT+x-03r(|JP_-%@BynX0&H4U33e9#*P!sk?~r@2A)1;FGe6YQa6_OSm#dH@@rPqI zVK<>6dU8-M3Yz>d4@S&y+x~U{a8RVjbQa6kCNeTMs1zG~@HsfC*rEX=bRlN^U4_!D z^S*!?K=IJnTwgF~{6z(==?KJDX|5=7O+n9kd7+Zsg$%B2#N6#QMj*W426(?+6;d(c zSC$TW$r?jp+U^gH*2}XNjt~s`hU2cCU}1+nQ&c(W%*p84tY+nYA6GXb3M(x$-BVEa zCYR2elwgTd^~in*YFzX3p7@AtJ_8}|YZqF$QoO%|Ji)HoJ>qHVnyi6Xm5Ikr@gLbO z?smhd_F&C!TEj#fgt*`6Wj6ZVObb>BOm0x5yAnB@53adN8--yHcql50MiK>_aCs{> zXDR4y%+OMV3FeAaXbm1PCAf?pcmtZMGmBxlyn>ky)>yy2G~Xagya3iDFaRyD19o7F z`T&GpfC(FqG&wxL4uIun00|1fcC$KdniH}Xn=z2CVXQjQYwLGH0*WZE(*R(MeW9H8 zdouY0Doqeh86}ULSdVjAI#2dK9)mcuO*>~}Gq zKHY2s8{@qTl?F|RZ%cZ_j{&rggp5sg32cp%Gdq63(4@+}5Ywg0p(5>6;aHDUZw%@k zOmcCkem(?^g~1z`pi=fmEcFj~Y(SB3POy7YlO=f43APO!Mh3Go&MK~R@VYMRS{KF# z&%v5x7(}9Sybj}l>FXjGTsc8HW$NS%UC#mpIMKJ@3SRQ?7Sm)H5y^wQ0@B>NFjnzx zzxzpGq`)^rG$HgY5nX@!DzqRdh`phs_RovKqY=u*^iSpsNHp5P_HdJRxzJV!@*eCXAd8Hd0uc5*GcQ`o>ETm*WN%#|{u6=h>6@;vYf$;)No0`_N5>9~( z^&Q70IwbZZ2QVFgdS_*%qnE*#!vG*G{zEkk)8{z+Ow+K63Zy3SK=}uIa|EbX^}9Ge z=Y}6Ib>QqfdN=5gH@%QW^MMZ@&9CS@eK~ddh^~9;9-wa7 zeqEWBv318L!47b!V){M=BnK2OG&CsKZ4qA4kQvyVJ-81Z#FuC33`7aUKlZ*5Wc4{) z7aSpz4l2+K<=B7Jsz83+w&C3iCm6N@cb8PU`s7Zyu@y^0-%br^QZ7!!$AjKq7fv*Fa3d%56hz+*70C9M(-II>9 z!#tg7VH^kGYn#9WbZfYJP1JBUp?%8nKS;cc z1ZcpmODqu+dK27|;MNJoAKGOeD^8v-Mu_KK&%8K|8qP`SpMP{>&$r9)92;6Al>^d9DdvSzwJemRn(5e zOFm(xCvsiXzUDIO0{BWzyUEgGBam7X$TD2qqC(4on6B_|5v;>ODtFUqt@S_eUd6|fc^b_QgiP^PQKp#rC6l_HGkvAlwhAsRyev|mCa*Do-6YuF zncnPPp0>YEpM_b(Wyj4C8)%7~7XK~}k17aHZ^4R{fKYu5uV9+YOdu;WQ&9R?OtIVa z!+a*q>uIxm*rvVE1Dl|L^Y3LNPt$Sx)1d#^c9%_aUNpf9C6%9Uy~(R07$7A9Hv7$ zEIcj+V);rGD}7gH2enVI3!cn9ck?@aqbSdmdM!R3u&oT^PdeXmHvZzlx~(4$Kt>1K z$Nr*|(o%=M>q9T{<$zScd96E`b*VX7k3=fK7_pFE$j#CDv)~{s#oDMpYrWbbMc!Wz z&WQ%^DnWWSz1erhRog&*FV66`J603e&J_+^Sqnsgga|s)^E7Z)1&`0TEGoE?;L@T> zUy|6s#x4cm@Xso1xY&zW!XzG}Cjn~=l0MgaXc=tqw&eH?N!j0%P{tAcc3_&hH*5ab zd+USnQNbu7!2JAlhWW~T-u*|+hiYh-m~@OukNqL+WW@fXATx0@5c{ley*ta7zC>AEWLmCPenxuO^t~Gi zADP`x9A<~Z0BD&s#99Tzc7PJe;R5UCB_XEXmmpe+yGR+wOOaI|BYRx}P)6C;`gg(` z`$)3fgBd#c^yo0ZVmMr7RT>ZD1%pEc`{S| zZ}Oz*Wgzf?yaS1=H*wju{^`sGT2uKMaUymqEj1y%aCp_Dg4*|;G=~bL908lTgY;86 z5reYax8S@Pdjmd88Xz1}C_|wdX0N2^y+=O2{~B<-qtDKE9v=ndDoeM;U#-g#Vssk# z%d<&eMalrqi0k1$ZT7*Do%q!;Qum5>2)yO5J!DZ(mdaJ81MMPd{S=2BTJz?RY+Z1h z)-!s(qG`qtG<@IGj2I>w^i)*tAftU$oe2RdPx2J(-a#T~ZSX6kabC8uOx!Twq8}w% z07JT!7(HEa1YYeZ`|rHmA}fftJO*4+UJ_^}-VhcG=BQsYcQwP{c&H1_JENy$3o;<1EQ zYcF6GfL}|?2HsK2i?iJ%)`i0JlX5_=rfn2<{8YGe^rQqJ}TcO4o)!r?o13($I_9_E;)Q_Lx9lfJSARw9l z3#F8>DR1KiZ|e0(d&0LET>+JWnjrG*=z$rkxj;n62 z*x-TU$;m*E@N1=S8)xk5UUWeHy)2sK;T57dKzBlaH?r-)j?S(wO& zWq$}}VwWsVciij_J$QO?I2Q-c)-kmFj_%2?jvA%N0Kj6Z&{>)5 znctlUwaa2BlVxF@fh@vFytTP}ZxvOwf0gYhZjt+sOlWBXEPTI5Kv;avj!Y^GiURyd zQBU@eL?*aT;OF-sfrutmt=JDWZcykS;pBHQUf9Ja(~-TNw-D-&VFl$RBn9Kuf7RJQ zj6~^$Yv;eym0Uk^@Y_QK=U5LWIMF?a;eVa2-A~tL`;_*9|!Q!jIhAd#_=y@IpL+n8wq*j1}R<;A&2ZtB;b2Y1=}w zU)2GZGZQQij#eP5kYsW>5OM%$i^pLmkgZCBmA6dLk2Lr%3@_clfh>v-Vnk`$|vNWijx{b5}tL>tHewfn?mAMjM>{CSFQA{OVthc9r}|XbrEQPv9G?ePW&$@(NEq8|d5FMK~DwbQFv2_apib1KF*^kH9Hf41I6>2`E|N zH(9U!qXD~Vr?n1JMXYa!%eZt!HrH1d1eCxvz~jqfAfJoQ2Utp3KK8$Pf8LuP_x2S6 zWk!974VSXk2cXk0n!ygE??D2419wZ#94aX1wS~rBwEA{(c(*UO$uThOTiFDr$@A?B zCX&FCavCCp?6(HD*i7n2?ZVw*kMN&PwQK2!Y$=4)+H>*J(Q5QbIT zdIUNY)~kY;mGMEw5T}WesRrx?&1Cih_ThhJ{aPl+wQ9}lT~{j_Hym{1x%;f+)bZ(N zY8C4a3BrQ-1#c#h#Dzq2Cj64b@~GefGNXL+tvCy5S}Qz$Ta~H+o^WutjYr1MLTP($ zzaysUQeYVI`HHmwX5mQIO00Ro=#d8BzV413J^?O^enMQTP{%iZL2cgt_zd zZ)k`Hwfi;$rj8~DTcMUKr*g*m2fYorw75Xh@dr<-KB&?sCjR}+XmwkBq8lK+o{w^RyK)A<{_|N~#&zJr3G#qh ziqvuXJosdyWHKB1@VhIBU-f1PAF9Ys(5zl@j>DlK*X1IA=5J`D1s=5;f*Hq)D;UTz zPRo`1j||j2HmNGDIv6F^q6#GyJ5)=zlzAN9RzL)|MH+e^sZ2b+=!`~bgPF3$*v-6N zE1eDYyvC3p<3)!EQx}}}pR1V05P8Rr_5IZPdA%{@jdIVty5h!3ho^<3SbQI~cQ~{y zz5i2Fs-lJ>w|T75sEf(nf#xZwXu859ka;Yx#dH);Zxp2fDz4<&NjFnpZ~Mf|EXtTm z;+(d0r`dR%x#Icp%s$6<$C{~ckqNOyH4etew8`>4__1!@1&zpSV=isA1pg78)yzqZ z*JhTV`wxPDGF}Co$2y|on>dFyL=HA?8e~}|Q z7vh{mvR-kr`m1Ao=e#nrik34%vK{ISf)~auQ3(}}b@E73YRbbE7s98d-bTKN43V`S zcGfAM&!HnrwwWT>z zG&JsLyW7Ayp;U<$gV|25QU4`Fe|>RoP^q}<4$)fS{li=u4kP=$JNDe+D>5!@C zXP3WrQoFJGUMZsP*?5z zxc8waZ@vhjRfXt72zynLvN*4Tr=?Cfl_Sftu3P#2!YMi5L5eVy$O5UD1Q@encnZ(_ z`8UvEKz-2^MEL%5dCs2oeD>h(O=_e~hV>iiWGVa7hE{U#cI>z-}JlJAl4^zLA@#XVnqxeqiT?mLX# z?ZMW_x9V?>O_!=EuGqURFL&%X?9`auMlA#gEDdb)>v=^t9V>f1Brls}*XnTllcj~O zyVxI3vPV{|mXs6f`6AO?9VWF06cSe(+sH0IB$gL8=lbp5nk|%WW-ssO!*Fplm{+)s zbne;~_j*Mrb_r-ZFigFRDO{H5Oj%Jng%2^t`x0%Y^&7L3DbwbrC2=S6$9(soyadu7 z^6y%ZJ%lofoPVgAH)*XgQjjd)U!#Xizi#E#oukg!#|Pl?LofdZLs;Vu zxmUwHe?$yVR`Ta&r{ zd=Hf;qhboFnq!G_qxs*DsF*yf8At*2J**5qIpGQih|vPPl;_U?SMf@v$@@$ll>Rm9 zu=C2|Df6xr;{-Y1?#LtYMgBUNaDydVyUocdPu|NP)itjXa$iy0zu3>q_pYBjn@OXU z%#U{OZWvgY;B4^)1zXgz|Atp^{7NuE{af-8TlT!m+}_^+IF3E;J&KI%3%I{>Uz&(% zjGnUfD!gx780~ar>gXLqKVfk^!Io(?yR7sqTM_A3hIPsREp%fQ8MQA1>~VgqMs z1ap<@!7efoMfdaifsZK?OFMJ(%EwckMZpG}xn;JOb9}5dIL`j%^wU)B^pN+ikA1G!s5E8}1w3t{*8N+|rNz$UpP^z8Jl@&-`3??I z+Lc&oH0Z{GW^GhKJt-wZ}g+mB4}iUNkQDkm|d* z9RNk+kRd*^DAQkWD|lV5m@bsBST-N41rpCAtiE&ZC(UHR?qiaNtZtX_j%2?6lP@F3 zXXGdzNRK=n=wt49N0^GbTGWKE?N~kiAXKSq&Df z$b&kcdE07{SKURD2BSGPyf1<%s_uPLx#qGUB!bbY1BD7zRb94D1xF*_3zL@+9 z-)dBo{n%u>V1%`y&22%ePRIF={g@@uBq~!?sW;e3{GF{Vtq$Pt9$jMK2c-Symh6vd zC2B3TQ{5_?41(&P9|HDys$ZX~B7zsI2>Qmkwxv!q6xp&Kjw}xJ zFnmw6dkRidPkO+1quNczz*m45A+DL#CqpF@Glu_0;dCA4! z2z+BS$maIgMB9_Gkrz1?tDV;52CPq-JE<+#Tm#G>?WZj50|7`9CPip0O(g2)Y%Z+c z+o!7T?&>yBqVdXX=x$u4zZx|WWZb9q@j3k^#}kr*3%H7lY?ZLjdP&CsjUq3h4n^Rx z-=%#7pH0P_>Wr+pNL4EMfw|6IaC^EU8zfK-LUX5;s5BqFTM<0VhCUl^Wz(Y#Slno^ zdhm$x{^d!)huR+KLHYbQz}WA{gyz}J+}Kl!dF=D?qqj}02-`A3EX&`inKpafTV&53 z)VenjGllKLmGz!sin#5zbE93c`J-&C>AJ)70w1ebX2zQQOS5JMzm&LGNaycFs|8Zz zOvdiuvFvuTu^4cYLgCtvcan@~E!pJ?F-raCKM|~ZFdp`)4k?7vlKf2RCEwtb!aNtB z4L$Wv(-%D_`v2&ioO1SaWBmo~Od4kcDc2Lzu9dA_-8=j*n-WNi77x?&08K*vHPIWS zNo_Ox^PEyUvIW(BPO^$&H^}oZfBDW$JN?q_pRSN?AUy1+t(B}4b_!DqM>Co&#|TrF zDBP-~Y8vfEtjk4J|1F0DVl98;aJn-=w%111wc`Av3vtW3`@9(nPx$o@$6~t{?>o6q z3z^A_;5IMk1e>E!QO5ehn_+_9PYq(aM<)*o_}Tv)Y+ti1NOK*mcH`41T^scM`FS@} z$(YV;WNy|pI7nnTDBYN$7Q5O70RVlP_kM_uVIY>enLz#{F&8B#U|LT0Mc%UMnj)H^ zUhf!0sXG6BBqXz(r~X)GMX9mPGtqYy)plzz@3ec?B;k97_Xs!Mb@2 z>q=7g?;}!d3Bn%ZB1O_ciwl7C47p8^w?O1)#d778y>(MyZ<_K_(~Bp=;`mI@{Ji>0>^51@4`RO)ZA z{*7&7n{r4>rGUXz_P2Q+c0HomZFRywryj<-Oc{~}wF)Szi?U;`^~`vu9xH<~f7Fie zdN0?tT)v6x+a$`qyuzokVR2SK;}3NgAazlq4Dw}`IaMeseQNJg*FQK0kFU8)y4{UJ_szbsiZ|9!4nwaFOGKSF;B+1AlmEOoTtuUv($QQOVi8<6OqO*R+ASKE_}P( zHHo`i+T^o7`)YeV$h=0|EhqhH-m%ObS7+;^Z?9D3WlIaVT{aT1FweYpIaD!LA0| z2?ftYD~081xrkU*OwxQ=;#%B%k788qYYn%)NoG$21>%1K*SKCcI=r4`kO*YJI z9zZ=Q#}}o_tExNQQy8(2EiB({F6*y8>iVrU)oDUxwNNmJee0wO?jZhVc>bK;W)W=` zab0a-yrlWQM>zu{R>Gpc~{P)$!Fu@w8BV9{jO7`ZeEc3 zi*meG&YWamgt^XVR?w_6 zotE4*?(qPe9SWW1jcHFZDpuA{IS$!GM>YRn`&oz=O3l9bg{VhW?h%;)S;wvdsnyFc;Lv4wHVeVzhD!5^y&_`4<{Qa%=N0=B z;z16mvbSJc5;)Lz?&&0SK>~92`#>T+;YP0Ya5|p?u_T_$wkeX&XxhZV=|J|gJgM77 zubQPqh~2N7b(oy>OfAbn3_F5kKR?LM_COtz-0dY2J7I}YOASX|X)ZTmDgiZwqh4GG zl4)B@YlR8pyJARam|7iqm3s>n_6#KQt|R=u-+`~HB?jLJLHDrx;oSVUzst>!#La!M zHXS+GZI^bV;+XB#udhaO3Y?VibCf#{Z*$WNNujw{os@B-8V#~2rGj4bf*})Rq`JQ0 z>NM7*3h6a56$37S)S#0XR^1!q4=usvB}5|vGT9kWVK43Gr^X3NywELFCIq1Ls#U}@ z^Xtv}0pf+7;ggs{#yALBpDPT`U{Nm={zl2s$kyiSRg6UGO=GvljnNuI)~xqolkICm zE$;WaQk&G+P)E%9N)4D<$J0v2fDD#(`}~8shIs-BupFtm_RBpvGC-lI5!_8c;25Bh zi*zhbPWP_);!<>wu1>mJTXW-N*1bibOUthR^+}(ehHuKLr3nRr#Eq)0?R#h9&(CN8 zM3RdffP``<27^UN^DkQmPDR0}za$1G%Z&jYeE=T#T_WNZ!)tdLfT$hPwu2LZ0h+C# zhcDQgZ=JIF;4yP`u;|gbWHDV1=z<~$Trq<&>$Je`D4S-MYv|g&3$`G+9gItf5O-yw z@tr~tA3&Ca`=L+`c>KzTLR8bieNw6Kc35T^fN?na7nq&W@4~={9+YMk-3l35_<=yb zcvbm9rins@($Alsf(i$8oBmz~5a8g&;AU|6ry?lAT>M&G2r}H`Knb>dzW!*2_=S+& zGeCA4h|;Uk04Btts&stm`n3H8IxL9)O@I#nKFh~^co?ZCe7voDOMO*S^aVh$D{q@J zet&?@6=A){T157vl;X$KDaBPARND2Bo2jyXM{oTASK*!z2q47l=*C?@R&l5T95k}^ z6T@-k-8ASznS}-1hXRHYK%iGzv57z;*9a$P0tSTyG7G_~J79?KdSqqvQQV(55#;{- zF**kX^n{5OT%s#fnhi9euR!Ag^-Oq@52)ttV>CYg9lBwj6DCWD(f#Bp!a=3IPQ+iZ z%KO}mFOUB7fh>t8z_t@Ivu`uB$b)NbBA6cXiezjU0v(bIEJw7VO93j%R!tlfKk!xc_~%N%TWXnEF`#egmlv46_?(!%m0z&U>Wpq z9ZmKg4cc62hRDXR3m;F`?OXef-^s+HXI(#SOJt@Z7AU zP)hP~(TB2+LmQ#K><|Y+et^rE283@_ja1~TVVDqq-ePeZ!pZB!ulSICaJAB_+LeS& zsjrTB6$rALFp-^PLU(%xgc3_QC|NDlH1GH8eK0KMzw(U{zQYdJt`shWWbSVWfkPt? z<%8Tg^4vQ;2*8fs%V-~z%27H3uKcOLtI^c^W^ac|m5dzt;K6ZN9m=jnM+X#b>OJdA z_k-PjZqL~{*c?H)y{T^tbltr)fNb?sQg0me@lFW~c7kPjDN#2weZce<(gBB(ZKaMx znWvzXL}5zCRn38pG(G&*aj87!@{~bQh2IQ%V$5m7EgW2F?7cF7H!Tnwrv!*ZpAKn# z(}ubk#5Fs;2{DA84XEAwQyx;YhTNhuA8}dm?uA7&rSxHl`XaX*Emh-)XQ}dz{yb__ zgfX_=xAH^m{C?=$8ytsilaPvMXJ-y&TPFkFjr!%q&J@bScABj2EDR<(4DKs)x}q*l z2~!m%Z)F&14(>M9tN+Hqb+*(8mZ|P${q3z_~YIvrN@ix;1 zf+ee?u^B|iQdBio4Tu^GLgKfe1%3aZ<-AjjMG`v{N_elWk6s^V%A&Zj)w!-RqWQ{e z@D#>-n`8G=2gN~un3-;UuTcV&7{FdR1O-_-^9D@c0agGqNMr6n>6(7PDv>5lhov9s zff0GmJzH@s@y9L`4ja*)Wn z#gM0SpayIWE$;v^r<2wY7C-4wIQBA4X>uXaudvMj(JH(1 z#eM25>x`jM(c+ptOR-U3cvmx(4*?prczf=tE&;wF`Ml3TDDZ*7JElFbbcH-a%ZVZl znqt-5W;e4-zpn0QlA0TcxuPL_^1=881WbnmkLP0fL^y^Za~*g@FEaz`SRRsNt|Fej z`vt3B@F?cSZOBKHM40jaFJ8F)b~^9b1Fq4q7Fvj_>AI`)yEH!Bat@Ji0G&soz2i5m_@ zPyvoPFw=b3K5L{gPpLG|CRie;zI+jbJC@0c4nKJmv96i1@hS|`aQSC?$k^N_jx2yD zZ#s4DFfJ)Fg-11k4De zAq>jOF7FZcnNJTjWGB(4SD9b$2Lk}CER*#UA$?nynvu{-M^Hg_|K1;PlQ)G{83|L>SGGfw z-x#Br{_2i9#pV6RS}}8aV#QL|4e>6Mm_FZMZO5GGe8qsuaD+jT2*47Lm}>WIy@QcO zF2jN0&#jWiB$g9JdKm{i7q%6W9QW;pXGAj;5fj;We%6f5{qk(&NU;oesUQzs?pjj{ zVNeVnXj*UjL}crM!mOYC(&{qvh0j>-ga_#sx6#r_Xv7a(aamyZJlR^N+c_^b9nqRikdXcnMlhj17a98ejJ_yfg##gW4OtmBvyKc$-gC_Os7syss329l$F+{0VI z*1b9b##Y+F%y2o9Olfq=vMo>;>|k$sz%AF{joLh_>kLibTaJ4%_!^hdjYkp^o*9Tp zMRKr$0W|2ENowfu0W}I}D4=nm1-C}vCu?SGlCF>DeL8lZ7tv{@;=0R2&%5!MlXwk1 zc-D%B?H@K+Wy#w(hqh{>4nF09E$3I1W;=WTD2W{?%u)QR=o}Z#Bz`8Hg8EU?GpQXX zLSWn1R6=)XQq!LUGT$!20$hQU2p4<{Rmp{j*-<>Eo?*{=BIvG8(dIFGbrzSG@Xs{2 zq4k|&IdB4w3^H4|%F)Y7wgx@v(gLboCkLTp0@_WiHam1rV&QS!ur&u*@-fb?xhfFIkArHgVe?X%Y5uJm<;OwkeHFkpTZx%C%7{CJ# zJ@cyg!oE#t78`>rPq7y&<9+pOXsf;Po)A*qqKGxKTGYdq)8STUoG~d`d$&I{wwX%k z6q`2v6*1mGid}zo2UJ`O#D+M{_WaS^xjCzQ&ccjVwv_9K zh)Jk3OqKkC0m2>|GZ0puGt%2(v z7qW?VYW-K^#HpHc+ek|K)7^Tx$yZ^6xF^}NIE2-=1Ih}qWadBM$pHo zjG4LMb+a){^KEz5k*s!2@AlA^uR%)2bb7R=g)x)P*^H^2Hc?7G?n!;(LUoY7r_+S) zdM=&Q*~fYU6yL>N_5~-j8OlzTEnh1T3ll#nSE(L*|0h(I6K%|qH2o|%%~nNa6Ku-oETg#vz;1Nt>@&!^3|?#gs6UWV7+ze%0l4&ZA~P^W zMsytG^6%|c*^U!`(#e;*&je6zj@@XoeQU+qsv3GI4K_Rl#-`zib3oWcqoUA>V05bV zShoT3=7Pe*R?=7uitWw1ZtZXqajhvZSIFAZjq(fL5pzok^Kt>~T z4He+xDW%xx*YHvtwukPkq8>eejNHOA&NQ8?r-c{nWOF{E`ot4f54g#sa))vxM>250 zbRzzl`Bpe$1idBg&SvLlxC*(w(HSNc)Qj|{ zKa;lIzY5KV(QT$esRm|c8%$Kh7p}4MfzT3+UXl#xR_k9Gt7U8TE|;%`B_}L72Z|&s zBwh_{AbACU?*N`_3ZMh4mWoOg|AZ_J?Z8xg#ic^st(wCNv{p~sfj=`6Y> zu`pGw?x&(L`=8}67k>b^t7Vf`HEd4MY=oap(n+)sTH6L_yuI&Mph`_pKLQxe)*xD> zU>d`7j-)-~8eypD^aqc~G|K>ZtDnquPa&vbB9|?QXwaoHLwrj5>Jq|P3sbGDIg|GE z)bmz(ALahBWqX~^HDjyXtjDtOS!b*u7dpp!?{OF`AnQ@FLjAQgdk}Tu;=zq#ol=KL zj`CLsZ`e&PB)w>n-r2Y_p{^{9`a>VTqt#njz6P#eq{W>y{gfLBoVpIueNEFMi=|M= zT$zI1M-QK=0wBXf-LT!_Xr4!uJnv2a3FUT{{U@sI z>XOxnr&$W$C8zEe^Jx~(_JqLnppphEQqHhvveOLgGJWfAZxK!8^q5R{ba9s3?04ve zqFKuu%_yaxASb8w0QJ4+P}kKpXs7C*z9g?uffN^qNHOS3EcY0Y#1|m1w6^LLQhM@0 z9u?@dmfn?U{)!+xCp}-nN6~~nFAnXXZ%2$!Os0M;SjRiq86Zi3r}=A6LsJHZG>hc{ z=;?~%<9A;n1GClwa#B4I>^lz4uF0x%VEgZXSw4OMtAY#JMl_)4_h5O-vPfS^bmz{!g(zX^JCK_V ztp{dV)I`G8ggz8&al3CCG`|00#lo6+*$NuypMW9fj|%Wx`IOb9T2dBl#|)|T(Dv4; zar`34P;^g7!mS6J%wxCBJOgf(bh#Jbax(jZRkHT=J|YPF4?Eh*(8rjf&Irl~&A6T3 z+C>9lkkBeOlK3gRZ-?Q)b%=?pSmM4t71WRQJtR>7)lMY7)yih-BkG6D%SW)w>7GEl ziFC-EI;nP&vgFEstO%#}(}Vp?CBy^-Z9R6>CHBPxUnbRFhXXh5Xz^D(2et3$wy{5Y+&jEwmZlm&(2w zA&!@P^~$s^aVi`y>&Hxgp1{_*1FQ@5mEBqve;USoLzk+&R8R>->2nPz&yB ziz?&Vj@Z`+Je@|XwpJ4c1Q&v%ocsP~C{1GVisj-j_4|E^24HJx{_$3nseVx_0Vu?G z-f0%0`@06?j{Gr^>T`$TiE@pne;HPjY{0GCcD^3J-GhZ5^CiThujTh^0;q3 zO|AZE@6cBZp3ARiv!**TM5YYiU+aJ>lQAHEA~;HhOuiJbkiEL2EinOmQWk_; zJAZwH{3q!mFdq+-rC@wWsBffI!;6=?71z>)Ao4+qL~hoaAs#bfeti;=%zmCh%v-LI zzx*7-Tr3focR-~bXRb(H-p?zgp8+}bucR)NP|OXt+i{Ryp6q3Sw4n^vQ`zHoRh1At zz@TNKVP^)Gu(XngASyI9l6M;%grc)97vr;D%=|8m@JZf%2(n}Ex9zZpgVkA*F;;Oj z3bMqjo^V>fMVt{CVH$ zGtEERuPDizy+7a+nT11CGk}4S2?75{Qxb5-!=L=?hkyO__kRYYL_IZ%d^!L5H-G=^ zf1mY_5%@hO|2tm)YhS-d;D2pmE7bfR2oN*>lQBn*V}t)z?S&~J2Jq*S_O)}_XDtH% E4=aZg=Kufz literal 0 HcmV?d00001 diff --git a/fern/assets/recipes/mcp_and_tooluse/basic_mcp.py b/fern/assets/recipes/mcp_and_tooluse/basic_mcp.py new file mode 100644 index 00000000..150e26d9 --- /dev/null +++ b/fern/assets/recipes/mcp_and_tooluse/basic_mcp.py @@ -0,0 +1,240 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "data-designer", +# "mcp", +# ] +# /// +"""Basic MCP Recipe: Simple Tool Use Example + +This recipe demonstrates the minimal MCP tool-calling workflow with Data Designer: + +1) Define a simple MCP server with basic tools (get_fact, add_numbers) +2) Configure Data Designer to use the MCP tools +3) Generate data that requires tool calls to complete + +Prerequisites: + - OPENAI_API_KEY environment variable for OpenAI provider model aliases. + - NVIDIA_API_KEY environment variable for NVIDIA provider model aliases (default model alias is "nvidia-text"). + +Run: + # Basic usage (generates 2 records by default) + uv run basic_mcp.py + + # For help message and available options + uv run basic_mcp.py --help +""" + +from __future__ import annotations + +import argparse +import json +import os +import sys +from pathlib import Path + +from mcp.server.fastmcp import FastMCP + +import data_designer.config as dd +from data_designer.interface import DataDesigner + +MCP_SERVER_NAME = "basic-tools" + + +# ============================================================================= +# MCP Server Definition +# ============================================================================= + +mcp_server = FastMCP(MCP_SERVER_NAME) + +# Simple knowledge base for the get_fact tool +FACTS = { + "python": "Python was created by Guido van Rossum and first released in 1991.", + "earth": "Earth is the third planet from the Sun and has one natural satellite, the Moon.", + "water": "Water (H2O) freezes at 0°C (32°F) and boils at 100°C (212°F) at sea level.", + "light": "The speed of light in a vacuum is approximately 299,792 kilometers per second.", +} + + +@mcp_server.tool() +def get_fact(topic: str) -> str: + """Get a fact about a topic from the knowledge base. + + Args: + topic: The topic to look up (e.g., "python", "earth", "water", "light") + + Returns: + A fact about the topic, or an error message if not found. + """ + topic_lower = topic.lower() + if topic_lower in FACTS: + return json.dumps({"topic": topic, "fact": FACTS[topic_lower]}) + return json.dumps({"error": f"No fact found for topic: {topic}", "available_topics": list(FACTS.keys())}) + + +@mcp_server.tool() +def add_numbers(a: float, b: float) -> str: + """Add two numbers together. + + Args: + a: First number + b: Second number + + Returns: + The sum of the two numbers. + """ + result = a + b + return json.dumps({"a": a, "b": b, "sum": result}) + + +@mcp_server.tool() +def list_topics() -> str: + """List all available topics in the knowledge base. + + Returns: + List of available topics. + """ + return json.dumps({"topics": list(FACTS.keys())}) + + +# ============================================================================= +# Data Designer Configuration +# ============================================================================= + + +def build_config(model_alias: str, provider_name: str) -> dd.DataDesignerConfigBuilder: + """Build the Data Designer configuration for basic tool use.""" + tool_config = dd.ToolConfig( + tool_alias="basic-tools", + providers=[provider_name], + allow_tools=["get_fact", "add_numbers", "list_topics"], + max_tool_call_turns=5, + timeout_sec=30.0, + ) + + config_builder = dd.DataDesignerConfigBuilder(tool_configs=[tool_config]) + + # Add a seed column with topics to look up + config_builder.add_column( + dd.SamplerColumnConfig( + name="topic", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=["python", "earth", "water", "light"]), + ) + ) + + # Add a column that uses the get_fact tool + config_builder.add_column( + dd.LLMTextColumnConfig( + name="fact_response", + model_alias=model_alias, + prompt=( + "Use the get_fact tool to look up information about '{{ topic }}', " + "then provide a one-sentence summary of what you learned." + ), + system_prompt="You must call the get_fact tool before answering. Only use information from tool results.", + tool_alias="basic-tools", + with_trace=dd.TraceType.ALL_MESSAGES, + ) + ) + + # Add a column that uses the add_numbers tool + config_builder.add_column( + dd.SamplerColumnConfig( + name="num_a", + sampler_type=dd.SamplerType.UNIFORM, + params=dd.UniformSamplerParams(low=1, high=100), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="num_b", + sampler_type=dd.SamplerType.UNIFORM, + params=dd.UniformSamplerParams(low=1, high=100), + ) + ) + + config_builder.add_column( + dd.LLMTextColumnConfig( + name="math_response", + model_alias=model_alias, + prompt=( + "Use the add_numbers tool to calculate {{ num_a }} + {{ num_b }}, " + "then report the result in a complete sentence." + ), + system_prompt="You must call the add_numbers tool to perform the calculation. Report the exact result.", + tool_alias="basic-tools", + with_trace=dd.TraceType.ALL_MESSAGES, + ) + ) + + return config_builder + + +# ============================================================================= +# Main Entry Points +# ============================================================================= + + +def serve() -> None: + """Run the MCP server (called when launched as subprocess by Data Designer).""" + mcp_server.run() + + +def parse_args() -> argparse.Namespace: + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description="Basic MCP tool use example with Data Designer.") + subparsers = parser.add_subparsers(dest="command") + + # 'serve' subcommand for running the MCP server + subparsers.add_parser("serve", help="Run the MCP server (used by Data Designer)") + + # Default command arguments (demo mode) + parser.add_argument("--model-alias", type=str, default="nvidia-text", help="Model alias to use for generation") + parser.add_argument("--num-records", type=int, default=2, help="Number of records to generate") + # For compatibility with Makefile test-run-recipes target (ignored in demo mode) + parser.add_argument("--artifact-path", type=str, default=None, help=argparse.SUPPRESS) + + return parser.parse_args() + + +def main() -> None: + """Main entry point for the demo.""" + args = parse_args() + + # Handle 'serve' subcommand + if args.command == "serve": + serve() + return + + # Demo mode: run Data Designer with the MCP server + if os.environ.get("NVIDIA_API_KEY") is None and args.model_alias.startswith("nvidia"): + raise RuntimeError("NVIDIA_API_KEY must be set when using NVIDIA model aliases.") + + # Configure MCP provider to run via stdio transport (local subprocess) + mcp_provider = dd.LocalStdioMCPProvider( + name=MCP_SERVER_NAME, + command=sys.executable, + args=[str(Path(__file__).resolve()), "serve"], + ) + + config_builder = build_config( + model_alias=args.model_alias, + provider_name=MCP_SERVER_NAME, + ) + + data_designer = DataDesigner(mcp_providers=[mcp_provider]) + preview_results = data_designer.preview(config_builder, num_records=args.num_records) + + # Display results + print("\n" + "=" * 60) + print("GENERATED DATA") + print("=" * 60) + preview_results.display_sample_record() + + +if __name__ == "__main__": + main() diff --git a/fern/assets/recipes/mcp_and_tooluse/pdf_qa.py b/fern/assets/recipes/mcp_and_tooluse/pdf_qa.py new file mode 100644 index 00000000..024ed4f3 --- /dev/null +++ b/fern/assets/recipes/mcp_and_tooluse/pdf_qa.py @@ -0,0 +1,572 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "data-designer", +# "mcp", +# "bm25s", +# "pymupdf", +# "rich", +# ] +# /// +"""MCP + Tool Use Recipe: Document Q&A with BM25S Lexical Search + +This recipe demonstrates an end-to-end MCP tool-calling workflow: + +1) Load one or more PDF documents from URLs or local paths. +2) Index them with BM25S for fast lexical search. +3) Use Data Designer tool calls (`search_docs`) to generate grounded Q&A pairs. + +Prerequisites: + - OPENAI_API_KEY environment variable for OpenAI provider model aliases. + - NVIDIA_API_KEY environment variable for NVIDIA provider model aliases (default model alias is "nvidia-reasoning"). + +Run: + # Basic usage with default sample PDF (generates 4 Q&A pairs) + uv run pdf_qa.py + + # For help message and available options + uv run pdf_qa.py --help +""" + +from __future__ import annotations + +import argparse +import io +import json +import os +import sys +from pathlib import Path +from urllib.parse import urlparse +from urllib.request import urlopen + +import bm25s +import fitz +from mcp.server.fastmcp import FastMCP +from pydantic import BaseModel, Field + +import data_designer.config as dd +from data_designer.config.preview_results import PreviewResults +from data_designer.interface import DataDesigner + +DEFAULT_PDF_URL = "https://research.nvidia.com/labs/nemotron/files/NVIDIA-Nemotron-3-Nano-Technical-Report.pdf" +MCP_SERVER_NAME = "doc-bm25-search" + +# Global state for the BM25 index (populated at server startup) +_bm25_retriever: bm25s.BM25 | None = None +_corpus: list[dict[str, str]] = [] + + +class QAPair(BaseModel): + question: str = Field(..., description="A question grounded in the document text.") + answer: str = Field(..., description="A concise answer grounded in the supporting passage.") + supporting_passage: str = Field( + ..., description="A short excerpt (2-4 sentences) copied from the search result that supports the answer." + ) + citation: str = Field( + ..., description="The citation (e.g. source url, page number, etc) of the supporting passage." + ) + + +class TopicList(BaseModel): + topics: list[str] = Field( + ..., + description="High-level topics covered by the document.", + ) + + +def _is_url(path_or_url: str) -> bool: + """Check if the given string is a URL.""" + parsed = urlparse(path_or_url) + return parsed.scheme in ("http", "https") + + +def _get_source_name(path_or_url: str) -> str: + """Extract a human-readable source name from a path or URL.""" + if _is_url(path_or_url): + parsed = urlparse(path_or_url) + return Path(parsed.path).name or parsed.netloc + return Path(path_or_url).name + + +def extract_pdf_text(path_or_url: str) -> list[dict[str, str]]: + """Extract text from a PDF file or URL, returning a list of passages with metadata. + + Each passage corresponds to a page from the PDF. + + Args: + path_or_url: Either a local file path or a URL to a PDF document. + URLs are streamed directly into memory without saving to disk. + + Returns: + List of passage dictionaries with 'text', 'page', and 'source' keys. + """ + passages: list[dict[str, str]] = [] + source_name = _get_source_name(path_or_url) + + if _is_url(path_or_url): + with urlopen(path_or_url) as response: + pdf_bytes = response.read() + doc = fitz.open(stream=io.BytesIO(pdf_bytes), filetype="pdf") + else: + doc = fitz.open(path_or_url) + + for page_num in range(len(doc)): + page = doc[page_num] + text = page.get_text("text").strip() + if text: + passages.append( + { + "text": text, + "page": str(page_num + 1), + "source": source_name, + } + ) + + doc.close() + return passages + + +def build_bm25_index(passages: list[dict[str, str]]) -> bm25s.BM25: + """Build a BM25S index from the extracted passages.""" + corpus_texts = [p["text"] for p in passages] + corpus_tokens = bm25s.tokenize(corpus_texts, stopwords="en") + + retriever = bm25s.BM25() + retriever.index(corpus_tokens) + + return retriever + + +def initialize_search_index(pdf_sources: list[str]) -> None: + """Load PDFs from paths/URLs and build the BM25 index. + + Args: + pdf_sources: List of PDF file paths or URLs to index. + """ + global _bm25_retriever, _corpus + + _corpus = [] + for source in pdf_sources: + passages = extract_pdf_text(source) + _corpus.extend(passages) + + if _corpus: + _bm25_retriever = build_bm25_index(_corpus) + + +# MCP Server Definition +mcp_server = FastMCP(MCP_SERVER_NAME) + + +@mcp_server.tool() +def search_docs(query: str, limit: int = 5, document: str = "", page: str = "") -> str: + """Search through documents using BM25 lexical search. + + BM25 is a keyword-based retrieval algorithm that matches exact terms. For best results: + + - Use specific keywords, not full questions (e.g., "configuration parameters timeout" not "How do I set the timeout?") + - Include domain-specific terms that would appear in the source text + - Combine multiple relevant terms to narrow results (e.g., "installation requirements dependencies") + - Try synonyms or alternative phrasings if initial searches return poor results + - Avoid filler words and focus on content-bearing terms + + Examples: + Good queries: + - "error handling retry mechanism" + - "authentication token expiration" + - "memory allocation buffer size" + + Less effective queries: + - "What are the error handling options?" + - "Tell me about authentication" + - "How does memory work?" + + Args: + query: Search query string - use specific keywords for best results + limit: Maximum number of results to return (default: 5) + document: Optional document source name to restrict search to (use list_docs to see available documents) + page: Optional page number to restrict search to (requires document to be specified) + + Returns: + JSON string with search results including text excerpts and page numbers + """ + global _bm25_retriever, _corpus + + if _bm25_retriever is None or not _corpus: + return json.dumps({"error": "Search index not initialized"}) + + # Validate that page requires document + if page and not document: + return json.dumps({"error": "The 'page' parameter requires 'document' to be specified"}) + + query_tokens = bm25s.tokenize([query], stopwords="en") + + # When filtering, retrieve more results to ensure we have enough after filtering + retrieve_limit = len(_corpus) if (document or page) else limit + results, scores = _bm25_retriever.retrieve(query_tokens, k=min(retrieve_limit, len(_corpus))) + + search_results: list[dict[str, str | float]] = [] + for i in range(results.shape[1]): + doc_idx = results[0, i] + score = float(scores[0, i]) + + if score <= 0: + continue + + passage = _corpus[doc_idx] + + # Apply document filter + if document and passage["source"] != document: + continue + + # Apply page filter + if page and passage["page"] != page: + continue + + search_results.append( + { + "text": passage["text"][:2000], + "page": passage["page"], + "source": passage["source"], + "score": round(score, 4), + "url": f"file://{passage['source']}#page={passage['page']}", + } + ) + + # Stop once we have enough results + if len(search_results) >= limit: + break + + return json.dumps({"results": search_results, "query": query, "total": len(search_results)}) + + +@mcp_server.tool() +def list_docs() -> str: + """List all documents in the search index with their page counts. + + Returns: + JSON string with a list of documents, each containing the source name and page count. + """ + global _corpus + + if not _corpus: + return json.dumps({"error": "Search index not initialized", "documents": []}) + + doc_pages: dict[str, set[str]] = {} + for passage in _corpus: + source = passage["source"] + page = passage["page"] + if source not in doc_pages: + doc_pages[source] = set() + doc_pages[source].add(page) + + documents = [{"source": source, "page_count": len(pages)} for source, pages in sorted(doc_pages.items())] + + return json.dumps({"documents": documents, "total_documents": len(documents)}) + + +def build_config(model_alias: str, provider_name: str) -> dd.DataDesignerConfigBuilder: + """Build the Data Designer configuration for document Q&A generation.""" + tool_config = dd.ToolConfig( + tool_alias="doc-search", + providers=[provider_name], + allow_tools=["list_docs", "search_docs"], + max_tool_call_turns=100, + timeout_sec=30.0, + ) + + config_builder = dd.DataDesignerConfigBuilder(tool_configs=[tool_config]) + config_builder.add_column( + dd.SamplerColumnConfig( + name="seed_id", + sampler_type=dd.SamplerType.UUID, + params=dd.UUIDSamplerParams(), + drop=True, + ) + ) + + config_builder.add_column( + dd.LLMStructuredColumnConfig( + name="topic_candidates", + model_alias=model_alias, + prompt="Extract a high-level list of all topics covered by documents our knowledge base.", + system_prompt=( + "You must call tools before answering. " + "Do not use outside knowledge; only use tool results. " + "You can use as many tool calls as required to answer the user query." + ), + output_format=TopicList, + tool_alias="doc-search", + with_trace=dd.TraceType.ALL_MESSAGES, # Enable trace to capture tool call history + ) + ) + + config_builder.add_column( + dd.ExpressionColumnConfig( + name="topic", + expr="{{ topic_candidates.topics | random }}", + ) + ) + + qa_prompt = """\ +Create a question-answer pair on the topic "{{topic}}", with supporting text and citation. +The supporting_passage must be a 2-4 sentence excerpt copied from the tool result that demonstrates +why the answer is correct. +""" + + config_builder.add_column( + dd.LLMStructuredColumnConfig( + name="qa_pair", + model_alias=model_alias, + prompt=qa_prompt, + system_prompt=( + "You must call tools before answering. " + "Do not use outside knowledge; only use tool results. " + "You can use as many tool calls as required to answer the user query." + ), + output_format=QAPair, + tool_alias="doc-search", + with_trace=dd.TraceType.ALL_MESSAGES, # Enable trace to capture tool call history + extract_reasoning_content=True, + ) + ) + + config_builder.add_column( + dd.ExpressionColumnConfig( + name="question", + expr="{{ qa_pair.question }}", + ) + ) + config_builder.add_column( + dd.ExpressionColumnConfig( + name="answer", + expr="{{ qa_pair.answer }}", + ) + ) + config_builder.add_column( + dd.ExpressionColumnConfig( + name="supporting_passage", + expr="{{ qa_pair.supporting_passage }}", + ) + ) + config_builder.add_column( + dd.ExpressionColumnConfig( + name="citation", + expr="{{ qa_pair.citation }}", + ) + ) + return config_builder + + +def generate_preview( + config_builder: dd.DataDesignerConfigBuilder, + num_records: int, + mcp_provider: dd.LocalStdioMCPProvider, +) -> PreviewResults: + """Run Data Designer preview with the MCP provider.""" + data_designer = DataDesigner(mcp_providers=[mcp_provider]) + # Traces are enabled per-column via with_trace=True on LLM column configs + return data_designer.preview(config_builder, num_records=num_records) + + +def _truncate(text: str, max_length: int = 100) -> str: + """Truncate text to max_length, adding ellipsis if needed.""" + text = text.replace("\n", " ").strip() + if len(text) <= max_length: + return text + return text[: max_length - 3] + "..." + + +def _summarize_content(content: object) -> str: + """Summarize ChatML-style content blocks for display.""" + if isinstance(content, list): + parts: list[str] = [] + for block in content: + if isinstance(block, dict): + block_type = block.get("type", "block") + if block_type == "text": + text = str(block.get("text", "")) + if text: + parts.append(text) + elif block_type == "image_url": + parts.append("[image]") + else: + parts.append(f"[{block_type}]") + else: + parts.append(str(block)) + return " ".join(parts) + return str(content) + + +def _format_trace_step(msg: dict[str, object]) -> str: + """Format a single trace message as a concise one-liner.""" + role = msg.get("role", "unknown") + content = _summarize_content(msg.get("content", "")) + reasoning = msg.get("reasoning_content") + tool_calls = msg.get("tool_calls") + tool_call_id = msg.get("tool_call_id") + + if role == "system": + return f"[bold cyan]system[/]({_truncate(str(content))})" + + if role == "user": + return f"[bold green]user[/]({_truncate(str(content))})" + + if role == "assistant": + parts: list[str] = [] + if reasoning: + parts.append(f"[bold magenta]reasoning[/]({_truncate(str(reasoning))})") + if tool_calls and isinstance(tool_calls, list): + for tc in tool_calls: + if isinstance(tc, dict): + func = tc.get("function", {}) + if isinstance(func, dict): + name = func.get("name", "?") + args = func.get("arguments", "") + parts.append(f"[bold yellow]tool_call[/]({name}: {_truncate(str(args), 60)})") + if content: + parts.append(f"[bold blue]content[/]({_truncate(str(content))})") + return "\n".join(parts) if parts else "[bold blue]assistant[/](empty)" + + if role == "tool": + tool_id = str(tool_call_id or "?")[:8] + return f"[bold red]tool_response[/]([{tool_id}] {_truncate(str(content), 80)})" + + return f"[dim]{role}[/]({_truncate(str(content))})" + + +def _display_column_trace(column_name: str, trace: list[dict[str, object]]) -> None: + """Display a trace for a single column using Rich Panel.""" + from rich.console import Console + from rich.panel import Panel + + console = Console() + lines: list[str] = [] + + for msg in trace: + if not isinstance(msg, dict): + continue + formatted = _format_trace_step(msg) + for line in formatted.split("\n"): + lines.append(f" * {line}") + + trace_content = "\n".join(lines) if lines else " (no trace messages)" + panel = Panel( + trace_content, + title=f"[bold]Column Trace: {column_name}[/]", + border_style="blue", + padding=(0, 1), + ) + console.print(panel) + + +def display_preview_record(preview_results: PreviewResults) -> None: + """Display a sample record from the preview results with trace visualization.""" + from rich.console import Console + + console = Console() + dataset = preview_results.dataset + + if dataset is None or dataset.empty: + console.print("[red]No preview records generated.[/]") + return + + record = dataset.iloc[0].to_dict() + + # Find trace columns and their base column names + trace_columns = [col for col in dataset.columns if col.endswith("__trace")] + + # Display non-trace columns as summary + non_trace_record = {k: v for k, v in record.items() if not k.endswith("__trace")} + console.print("\n[bold]Sample Record (data columns):[/]") + console.print(json.dumps(non_trace_record, indent=2, default=str)) + + # Display each trace column in its own panel + if trace_columns: + console.print("\n[bold]Generation Traces:[/]") + for trace_col in trace_columns: + base_name = trace_col.replace("__trace", "") + trace_data = record.get(trace_col) + if isinstance(trace_data, list): + _display_column_trace(base_name, trace_data) + + preview_results.display_sample_record() + + +def serve() -> None: + """Run the MCP server (called when launched as subprocess by Data Designer).""" + pdf_sources_json = os.environ.get("PDF_SOURCES", "[]") + pdf_sources = json.loads(pdf_sources_json) + if not pdf_sources: + pdf_sources = [DEFAULT_PDF_URL] + initialize_search_index(pdf_sources) + mcp_server.run() + + +def parse_args() -> argparse.Namespace: + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description="Generate document Q&A pairs using MCP tool calls with BM25S search.") + subparsers = parser.add_subparsers(dest="command") + + # 'serve' subcommand for running the MCP server + subparsers.add_parser("serve", help="Run the MCP server (used by Data Designer)") + + # Default command arguments (demo mode) + parser.add_argument("--model-alias", type=str, default="nvidia-reasoning", help="Model alias to use for generation") + parser.add_argument("--num-records", type=int, default=4, help="Number of Q&A pairs to generate") + parser.add_argument( + "--pdf", + type=str, + action="append", + dest="pdfs", + metavar="PATH_OR_URL", + help="PDF file path or URL to index (can be specified multiple times). Defaults to a sample PDF if not provided.", + ) + # For compatibility with Makefile test-run-recipes target (ignored in demo mode) + parser.add_argument("--artifact-path", type=str, default=None, help=argparse.SUPPRESS) + + return parser.parse_args() + + +def main() -> None: + """Main entry point for the demo.""" + args = parse_args() + + # Handle 'serve' subcommand + if args.command == "serve": + serve() + return + + # Demo mode: run Data Designer with the BM25S MCP server + if os.environ.get("NVIDIA_API_KEY") is None and args.model_alias.startswith("nvidia"): + raise RuntimeError("NVIDIA_API_KEY must be set when using NVIDIA model aliases.") + + # Use provided PDFs or fall back to default + pdf_sources = args.pdfs if args.pdfs else [DEFAULT_PDF_URL] + + # Configure MCP provider to run via stdio transport (local subprocess) + mcp_provider = dd.LocalStdioMCPProvider( + name=MCP_SERVER_NAME, + command=sys.executable, + args=[str(Path(__file__).resolve()), "serve"], + env={"PDF_SOURCES": json.dumps(pdf_sources)}, + ) + + config_builder = build_config( + model_alias=args.model_alias, + provider_name=MCP_SERVER_NAME, + ) + + preview_results = generate_preview( + config_builder=config_builder, + num_records=args.num_records, + mcp_provider=mcp_provider, + ) + + display_preview_record(preview_results) + + +if __name__ == "__main__": + main() diff --git a/fern/components/CustomCard.tsx b/fern/components/CustomCard.tsx new file mode 100644 index 00000000..16fc0e2c --- /dev/null +++ b/fern/components/CustomCard.tsx @@ -0,0 +1,10 @@ +export const CustomCard = ({ title, text, link, sparkle = false }) => { + return ( + +

+ {title} {sparkle && "✨"} +

+

{text}

+
+ ); +}; diff --git a/fern/components/ExpandableCode.tsx b/fern/components/ExpandableCode.tsx new file mode 100644 index 00000000..7e2a4a94 --- /dev/null +++ b/fern/components/ExpandableCode.tsx @@ -0,0 +1,47 @@ +/** + * ExpandableCode - Collapsible code block with summary. + * + * Used for "Full source" code snippets in dev notes. + * NOTE: Fern's custom component pipeline uses the automatic JSX runtime. + * Do NOT import React -- the `react` module is not resolvable in Fern's build. + * + * Usage in MDX: + * import { ExpandableCode } from "@/components/ExpandableCode"; + * + * + */ + +export interface ExpandableCodeProps { + summary: string; + code: string; + language?: string; + defaultOpen?: boolean; +} + +export const ExpandableCode = ({ + summary, + code, + language = "python", + defaultOpen = false, +}: ExpandableCodeProps) => { + return ( +
+ + {summary} + +
+
+          {code.trim()}
+        
+
+
+ ); +}; diff --git a/fern/components/MetricsTable.tsx b/fern/components/MetricsTable.tsx new file mode 100644 index 00000000..234c8f53 --- /dev/null +++ b/fern/components/MetricsTable.tsx @@ -0,0 +1,101 @@ +/** + * MetricsTable - Styled comparison table for benchmark results. + * + * Optional: highlights best values per column (bold). + * NOTE: Fern's custom component pipeline uses the automatic JSX runtime. + * Do NOT import React -- the `react` module is not resolvable in Fern's build. + * + * Usage in MDX: + * import { MetricsTable } from "@/components/MetricsTable"; + * + * + */ + +export interface MetricsTableProps { + headers: string[]; + rows: (string | number)[][]; + /** Column indices where lower is better (for highlighting) */ + lowerIsBetter?: number[]; + /** Column indices where higher is better (default for non-lowerIsBetter) */ + higherIsBetter?: number[]; +} + +function findBestIndices( + rows: (string | number)[][], + colIndex: number, + lowerIsBetter: boolean +): Set { + const values = rows.map((r) => { + const v = r[colIndex]; + if (typeof v === "number") return v; + const parsed = parseFloat(String(v)); + return isNaN(parsed) ? (lowerIsBetter ? Infinity : -Infinity) : parsed; + }); + const best = lowerIsBetter ? Math.min(...values) : Math.max(...values); + const bestIndices = new Set(); + values.forEach((v, i) => { + if (v === best) bestIndices.add(i); + }); + return bestIndices; +} + +export const MetricsTable = ({ + headers, + rows, + lowerIsBetter = [], + higherIsBetter = [], +}: MetricsTableProps) => { + const lowerSet = new Set(lowerIsBetter); + const bestByCol: Record> = {}; + + for (let c = 0; c < headers.length; c++) { + if (lowerSet.has(c)) { + bestByCol[c] = findBestIndices(rows, c, true); + } else if (higherIsBetter.includes(c)) { + bestByCol[c] = findBestIndices(rows, c, false); + } else { + const numLike = rows.every((r) => { + const v = r[c]; + return typeof v === "number" || !isNaN(parseFloat(String(v))); + }); + if (numLike) { + bestByCol[c] = findBestIndices(rows, c, false); + } + } + } + + return ( +
+ + + + {headers.map((h, i) => ( + + ))} + + + + {rows.map((row, rowIdx) => ( + + {row.map((cell, colIdx) => { + const isBest = bestByCol[colIdx]?.has(rowIdx); + return ( + + ); + })} + + ))} + +
{h}
+ {cell} +
+
+ ); +}; diff --git a/fern/components/NotebookViewer.tsx b/fern/components/NotebookViewer.tsx new file mode 100644 index 00000000..c942c6e0 --- /dev/null +++ b/fern/components/NotebookViewer.tsx @@ -0,0 +1,220 @@ +/** + * NotebookViewer - Renders Jupyter notebook content in Fern docs. + * + * Accepts notebook cells (markdown + code) and optionally a Colab URL. + * Designed to work with Jupytext-generated notebooks from docs/notebook_source/*.py. + * + * NOTE: Fern's custom component pipeline uses the automatic JSX runtime. + * Do NOT import React -- the `react` module is not resolvable in Fern's build. + * This means class components (e.g. ErrorBoundary) are also not available. + * + * Usage in MDX: + * import { NotebookViewer } from "@/components/NotebookViewer"; + * import notebook from "@/components/notebooks/1-the-basics"; + * + * + */ + +export interface CellOutput { + type: "text" | "image"; + data: string; + format?: "plain" | "html"; +} + +export interface NotebookCell { + type: "markdown" | "code"; + source: string; + language?: string; + outputs?: CellOutput[]; +} + +export interface NotebookData { + cells: NotebookCell[]; +} + +export interface NotebookViewerProps { + /** Notebook data with cells array. If import fails, this may be undefined. */ + notebook?: NotebookData | null; + /** Optional Colab URL for "Run in Colab" badge */ + colabUrl?: string; + /** Show code cell outputs (default: true) */ + showOutputs?: boolean; +} + +function NotebookViewerError({ message, detail }: { message: string; detail?: string }) { + return ( +
+ NotebookViewer error: {message} + {detail && ( +
+          {detail}
+        
+ )} +
+ ); +} + +function escapeHtml(text: string): string { + if (typeof text !== "string") return ""; + return text + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """); +} + +function isSafeUrl(url: string): boolean { + const trimmed = url.trim(); + return ( + trimmed.startsWith("http://") || + trimmed.startsWith("https://") || + trimmed.startsWith("mailto:") || + trimmed.startsWith("#") || + trimmed.startsWith("/") + ); +} + +function renderMarkdown(markdown: string): string { + if (typeof markdown !== "string") return ""; + let html = markdown + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/\[([^\]]+)\]\(([^)]+)\)/g, (_, text, url) => + isSafeUrl(url) + ? `${text}` + : escapeHtml(`[${text}](${url})`) + ) + .replace(/\*\*(.*?)\*\*/g, "$1") + .replace(/\*(.*?)\*/g, "$1") + .replace(/`([^`]+)`/g, "$1"); + html = html + .split("\n") + .map((line) => { + if (/^#### (.*)$/.test(line)) return `

${line.slice(5)}

`; + if (/^### (.*)$/.test(line)) return `

${line.slice(4)}

`; + if (/^## (.*)$/.test(line)) return `

${line.slice(3)}

`; + if (/^# (.*)$/.test(line)) return `

${line.slice(2)}

`; + if (/^- (.*)$/.test(line)) return `
  • ${line.slice(2)}
  • `; + if (/^\d+\. (.*)$/.test(line)) return `
  • ${line.replace(/^\d+\. /, "")}
  • `; + if (line.trim() === "") return ""; + return `

    ${line}

    `; + }) + .join("\n"); + return html.replace(/(
  • .*?<\/li>\n?)+/gs, (m) => `
      ${m}
    `); +} + +function renderCell(cell: NotebookCell, index: number, showOutputs: boolean) { + return ( +
    + {cell.type === "markdown" ? ( +
    + ) : ( + <> +
    +
    +              
    +            
    +
    + {showOutputs && cell.outputs && cell.outputs.length > 0 && ( +
    + {cell.outputs.map((out, i) => + out.type === "image" ? ( + Output + ) : out.format === "html" ? ( +
    + ) : ( +
    +                )
    +              )}
    +            
    + )} + + )} +
    + ); +} + +export const NotebookViewer = ({ + notebook, + colabUrl, + showOutputs = true, +}: NotebookViewerProps) => { + if (notebook == null || typeof notebook !== "object") { + return ( + + ); + } + + const cells = notebook?.cells; + if (!Array.isArray(cells)) { + return ( + + ); + } + + return ( +
    + {colabUrl && ( + + )} + +
    + {cells.map((cell, index) => renderCell(cell, index, showOutputs))} +
    +
    + ); +}; diff --git a/fern/components/PipelineDiagram.tsx b/fern/components/PipelineDiagram.tsx new file mode 100644 index 00000000..750b5c08 --- /dev/null +++ b/fern/components/PipelineDiagram.tsx @@ -0,0 +1,40 @@ +/** + * PipelineDiagram - Renders ASCII pipeline diagrams with monospace styling. + * + * Used for SDG stage diagrams in design-principles dev note. + * NOTE: Fern's custom component pipeline uses the automatic JSX runtime. + * Do NOT import React -- the `react` module is not resolvable in Fern's build. + * + * Usage in MDX: + * import { PipelineDiagram } from "@/components/PipelineDiagram"; + * + * + */ + +export interface PipelineDiagramProps { + diagram: string; + title?: string; + maxWidth?: string; +} + +export const PipelineDiagram = ({ + diagram, + title, + maxWidth = "640px", +}: PipelineDiagramProps) => { + return ( +
    + {title &&
    {title}
    } +
    +        {diagram.trim()}
    +      
    +
    + ); +}; diff --git a/fern/components/TrajectoryViewer.tsx b/fern/components/TrajectoryViewer.tsx new file mode 100644 index 00000000..4dafc9c0 --- /dev/null +++ b/fern/components/TrajectoryViewer.tsx @@ -0,0 +1,120 @@ +/** + * TrajectoryViewer - Renders multi-turn research trajectories with tool calls. + * + * Displays search, open, find, and answer steps with color-coded styling. + * Used for deep research / MCP tool-use dev notes. + * + * NOTE: Fern's custom component pipeline uses the automatic JSX runtime. + * Do NOT import React -- the `react` module is not resolvable in Fern's build. + * + * Usage in MDX: + * import { TrajectoryViewer } from "@/components/TrajectoryViewer"; + * import trajectory from "@/components/trajectories/4hop-example"; + * + * + */ + +export interface ToolCall { + fn: "search" | "open" | "find" | "answer"; + arg?: string; + body?: string; + isGolden?: boolean; +} + +export interface TrajectoryTurn { + turnIndex: number; + calls: ToolCall[]; +} + +export interface TrajectoryViewerProps { + question: string; + referenceAnswer?: string; + goldenPassageHint?: string; + turns: TrajectoryTurn[]; + summary?: string; + defaultOpen?: boolean; +} + +function ToolCallBlock({ call }: { call: ToolCall }) { + const isAnswer = call.fn === "answer"; + const argDisplay = call.arg ?? ""; + const cn = `trajectory-viewer__call trajectory-viewer__call--${call.fn}`; + + if (isAnswer && call.body) { + return ( +
    + {call.fn} +
    +
    + ); + } + + return ( +
    + {call.fn} + + {argDisplay} + {call.isGolden && " ⭐"} + +
    + ); +} + +export const TrajectoryViewer = ({ + question, + referenceAnswer, + goldenPassageHint, + turns, + summary, + defaultOpen = false, +}: TrajectoryViewerProps) => { + const content = ( +
    +
    + Q: {question} +
    + {referenceAnswer && ( +
    + Reference: {referenceAnswer} +
    + )} + {goldenPassageHint && ( +
    {goldenPassageHint}
    + )} +
    + {turns.map((turn) => ( +
    +
    T{turn.turnIndex}
    +
    +
    1 ? "trajectory-viewer__group--multi" : "" + }`} + > + {turn.calls.map((call, i) => ( + + ))} +
    +
    +
    + ))} +
    +
    + ); + + if (summary) { + return ( +
    + + {summary} + + {content} +
    + ); + } + + return content; +}; diff --git a/fern/components/diagrams/sdg-pipeline.ts b/fern/components/diagrams/sdg-pipeline.ts new file mode 100644 index 00000000..59f1758c --- /dev/null +++ b/fern/components/diagrams/sdg-pipeline.ts @@ -0,0 +1,26 @@ +/** SDG pipeline diagram from docs/devnotes/posts/design-principles.md */ + +export const sdgPipelineDiagram = ` + Seed Documents Seed dataset column ingests documents + │ from local files or HuggingFace + ▼ +┌─────────────────────────┐ +│ Artifact Extraction │ LLM extracts key concepts, entities, +│ │ relationships from each document +└───────────┬─────────────┘ + │ + ▼ +┌─────────────────────────┐ +│ QA Generation │ LLM generates questions & answers grounded +│ │ in the extracted artifacts +└───────────┬─────────────┘ + │ + ▼ +┌─────────────────────────┐ +│ Quality Evaluation │ LLM judge scores each QA pair +│ │ on relevance, accuracy, clarity +└───────────┬─────────────┘ + │ + ▼ + Final Dataset +`; diff --git a/fern/components/notebooks/1-the-basics.json b/fern/components/notebooks/1-the-basics.json new file mode 100644 index 00000000..1c8dbf7f --- /dev/null +++ b/fern/components/notebooks/1-the-basics.json @@ -0,0 +1,145 @@ +{ + "cells": [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: The Basics\n\n#### \ud83d\udcda What you'll learn\n\nThis notebook demonstrates the basics of Data Designer by generating a simple product review dataset." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides access to the configuration API.\n\n- `DataDesigner` is the main interface for data generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "import data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\n- `DataDesigner` is the main object responsible for managing the data generation process.\n\n- When initialized without arguments, the [default model providers](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) are used." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define model configurations\n\n- Each `ModelConfig` defines a model that can be used during the generation process.\n\n- The \"model alias\" is used to reference the model in the Data Designer config (as we will see below).\n\n- The \"model provider\" is the external service that hosts the model (see the [model config](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) docs for more details).\n\n- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider." + }, + { + "type": "code", + "source": "# This name is set in the model provider configuration.\nMODEL_PROVIDER = \"nvidia\"\n\n# The model ID is from build.nvidia.com.\nMODEL_ID = \"nvidia/nemotron-3-nano-30b-a3b\"\n\n# We choose this alias to be descriptive for our use case.\nMODEL_ALIAS = \"nemotron-nano-v3\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ChatCompletionInferenceParams(\n temperature=1.0,\n top_p=1.0,\n max_tokens=2048,\n extra_body={\"chat_template_kwargs\": {\"enable_thinking\": False}},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Initialize the Data Designer Config Builder\n\n- The Data Designer config defines the dataset schema and generation process.\n\n- The config builder provides an intuitive interface for building this configuration.\n\n- The list of model configs is provided to the builder at initialization." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \ud83c\udfb2 Getting started with sampler columns\n\n- Sampler columns offer non-LLM based generation of synthetic data.\n\n- They are particularly useful for **steering the diversity** of the generated data, as we demonstrate below.\n\n
    \n\nYou can view available samplers using the config builder's `info` property:" + }, + { + "type": "code", + "source": "config_builder.info.display(\"samplers\")", + "language": "python" + }, + { + "type": "markdown", + "source": "Let's start designing our product review dataset by adding product category and subcategory columns." + }, + { + "type": "code", + "source": "config_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"product_category\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"Electronics\",\n \"Clothing\",\n \"Home & Kitchen\",\n \"Books\",\n \"Home Office\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"product_subcategory\",\n sampler_type=dd.SamplerType.SUBCATEGORY,\n params=dd.SubcategorySamplerParams(\n category=\"product_category\",\n values={\n \"Electronics\": [\n \"Smartphones\",\n \"Laptops\",\n \"Headphones\",\n \"Cameras\",\n \"Accessories\",\n ],\n \"Clothing\": [\n \"Men's Clothing\",\n \"Women's Clothing\",\n \"Winter Coats\",\n \"Activewear\",\n \"Accessories\",\n ],\n \"Home & Kitchen\": [\n \"Appliances\",\n \"Cookware\",\n \"Furniture\",\n \"Decor\",\n \"Organization\",\n ],\n \"Books\": [\n \"Fiction\",\n \"Non-Fiction\",\n \"Self-Help\",\n \"Textbooks\",\n \"Classics\",\n ],\n \"Home Office\": [\n \"Desks\",\n \"Chairs\",\n \"Storage\",\n \"Office Supplies\",\n \"Lighting\",\n ],\n },\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"target_age_range\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(values=[\"18-25\", \"25-35\", \"35-50\", \"50-65\", \"65+\"]),\n )\n)\n\n# Optionally validate that the columns are configured correctly.\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "Next, let's add samplers to generate data related to the customer and their review." + }, + { + "type": "code", + "source": "config_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"customer\",\n sampler_type=dd.SamplerType.PERSON_FROM_FAKER,\n params=dd.PersonFromFakerSamplerParams(age_range=[18, 70], locale=\"en_US\"),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"number_of_stars\",\n sampler_type=dd.SamplerType.UNIFORM,\n params=dd.UniformSamplerParams(low=1, high=5),\n convert_to=\"int\", # Convert the sampled float to an integer.\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"review_style\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"rambling\", \"brief\", \"detailed\", \"structured with bullet points\"],\n weights=[1, 2, 2, 1],\n ),\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \ud83e\udd9c LLM-generated columns\n\n- The real power of Data Designer comes from leveraging LLMs to generate text, code, and structured data.\n\n- When prompting the LLM, we can use Jinja templating to reference other columns in the dataset.\n\n- As we see below, nested json fields can be accessed using dot notation." + }, + { + "type": "code", + "source": "config_builder.add_column(\n dd.LLMTextColumnConfig(\n name=\"product_name\",\n prompt=(\n \"You are a helpful assistant that generates product names. DO NOT add quotes around the product name.\\n\\n\"\n \"Come up with a creative product name for a product in the '{{ product_category }}' category, focusing \"\n \"on products related to '{{ product_subcategory }}'. The target age range of the ideal customer is \"\n \"{{ target_age_range }} years old. Respond with only the product name, no other text.\"\n ),\n model_alias=MODEL_ALIAS,\n )\n)\n\nconfig_builder.add_column(\n dd.LLMTextColumnConfig(\n name=\"customer_review\",\n prompt=(\n \"You are a customer named {{ customer.first_name }} from {{ customer.city }}, {{ customer.state }}. \"\n \"You are {{ customer.age }} years old and recently purchased a product called {{ product_name }}. \"\n \"Write a review of this product, which you gave a rating of {{ number_of_stars }} stars. \"\n \"The style of the review should be '{{ review_style }}'. \"\n \"Respond with only the review, no other text.\"\n ),\n model_alias=MODEL_ALIAS,\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Iteration is key \u2013\u00a0preview the dataset!\n\n1. Use the `preview` method to generate a sample of records quickly.\n\n2. Inspect the results for quality and format issues.\n\n3. Adjust column configurations, prompts, or parameters as needed.\n\n4. Re-run the preview until satisfied." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "# Run this cell multiple times to cycle through the 2 preview records.\npreview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "# The preview dataset is available as a pandas DataFrame.\npreview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udcca Analyze the generated data\n\n- Data Designer automatically generates a basic statistical analysis of the generated data.\n\n- This analysis is available via the `analysis` property of generation result objects." + }, + { + "type": "code", + "source": "# Print the analysis as a table.\npreview.analysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Scale up!\n\n- Happy with your preview data?\n\n- Use the `create` method to submit larger Data Designer generation jobs." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=10, dataset_name=\"tutorial-1\")", + "language": "python" + }, + { + "type": "code", + "source": "# Load the generated dataset as a pandas DataFrame.\ndataset = results.load_dataset()\n\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Load the analysis results into memory.\nanalysis = results.load_analysis()\n\nanalysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next Steps\n\nNow that you've seen the basics of Data Designer, check out the following notebooks to learn more about:\n\n- [Structured outputs and jinja expressions](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/2-structured-outputs-and-jinja-expressions/)\n\n- [Seeding synthetic data generation with an external dataset](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/3-seeding-with-a-dataset/)\n\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/)\n\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/)" + } + ] +} \ No newline at end of file diff --git a/fern/components/notebooks/1-the-basics.ts b/fern/components/notebooks/1-the-basics.ts new file mode 100644 index 00000000..04654ada --- /dev/null +++ b/fern/components/notebooks/1-the-basics.ts @@ -0,0 +1,144 @@ +/** Auto-generated by ipynb-to-fern-json.py - do not edit */ +export default { cells: [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: The Basics\n\n#### \ud83d\udcda What you'll learn\n\nThis notebook demonstrates the basics of Data Designer by generating a simple product review dataset." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides access to the configuration API.\n\n- `DataDesigner` is the main interface for data generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "import data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\n- `DataDesigner` is the main object responsible for managing the data generation process.\n\n- When initialized without arguments, the [default model providers](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) are used." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define model configurations\n\n- Each `ModelConfig` defines a model that can be used during the generation process.\n\n- The \"model alias\" is used to reference the model in the Data Designer config (as we will see below).\n\n- The \"model provider\" is the external service that hosts the model (see the [model config](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) docs for more details).\n\n- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider." + }, + { + "type": "code", + "source": "# This name is set in the model provider configuration.\nMODEL_PROVIDER = \"nvidia\"\n\n# The model ID is from build.nvidia.com.\nMODEL_ID = \"nvidia/nemotron-3-nano-30b-a3b\"\n\n# We choose this alias to be descriptive for our use case.\nMODEL_ALIAS = \"nemotron-nano-v3\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ChatCompletionInferenceParams(\n temperature=1.0,\n top_p=1.0,\n max_tokens=2048,\n extra_body={\"chat_template_kwargs\": {\"enable_thinking\": False}},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Initialize the Data Designer Config Builder\n\n- The Data Designer config defines the dataset schema and generation process.\n\n- The config builder provides an intuitive interface for building this configuration.\n\n- The list of model configs is provided to the builder at initialization." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \ud83c\udfb2 Getting started with sampler columns\n\n- Sampler columns offer non-LLM based generation of synthetic data.\n\n- They are particularly useful for **steering the diversity** of the generated data, as we demonstrate below.\n\n
    \n\nYou can view available samplers using the config builder's `info` property:" + }, + { + "type": "code", + "source": "config_builder.info.display(\"samplers\")", + "language": "python" + }, + { + "type": "markdown", + "source": "Let's start designing our product review dataset by adding product category and subcategory columns." + }, + { + "type": "code", + "source": "config_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"product_category\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"Electronics\",\n \"Clothing\",\n \"Home & Kitchen\",\n \"Books\",\n \"Home Office\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"product_subcategory\",\n sampler_type=dd.SamplerType.SUBCATEGORY,\n params=dd.SubcategorySamplerParams(\n category=\"product_category\",\n values={\n \"Electronics\": [\n \"Smartphones\",\n \"Laptops\",\n \"Headphones\",\n \"Cameras\",\n \"Accessories\",\n ],\n \"Clothing\": [\n \"Men's Clothing\",\n \"Women's Clothing\",\n \"Winter Coats\",\n \"Activewear\",\n \"Accessories\",\n ],\n \"Home & Kitchen\": [\n \"Appliances\",\n \"Cookware\",\n \"Furniture\",\n \"Decor\",\n \"Organization\",\n ],\n \"Books\": [\n \"Fiction\",\n \"Non-Fiction\",\n \"Self-Help\",\n \"Textbooks\",\n \"Classics\",\n ],\n \"Home Office\": [\n \"Desks\",\n \"Chairs\",\n \"Storage\",\n \"Office Supplies\",\n \"Lighting\",\n ],\n },\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"target_age_range\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(values=[\"18-25\", \"25-35\", \"35-50\", \"50-65\", \"65+\"]),\n )\n)\n\n# Optionally validate that the columns are configured correctly.\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "Next, let's add samplers to generate data related to the customer and their review." + }, + { + "type": "code", + "source": "config_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"customer\",\n sampler_type=dd.SamplerType.PERSON_FROM_FAKER,\n params=dd.PersonFromFakerSamplerParams(age_range=[18, 70], locale=\"en_US\"),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"number_of_stars\",\n sampler_type=dd.SamplerType.UNIFORM,\n params=dd.UniformSamplerParams(low=1, high=5),\n convert_to=\"int\", # Convert the sampled float to an integer.\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"review_style\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"rambling\", \"brief\", \"detailed\", \"structured with bullet points\"],\n weights=[1, 2, 2, 1],\n ),\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \ud83e\udd9c LLM-generated columns\n\n- The real power of Data Designer comes from leveraging LLMs to generate text, code, and structured data.\n\n- When prompting the LLM, we can use Jinja templating to reference other columns in the dataset.\n\n- As we see below, nested json fields can be accessed using dot notation." + }, + { + "type": "code", + "source": "config_builder.add_column(\n dd.LLMTextColumnConfig(\n name=\"product_name\",\n prompt=(\n \"You are a helpful assistant that generates product names. DO NOT add quotes around the product name.\\n\\n\"\n \"Come up with a creative product name for a product in the '{{ product_category }}' category, focusing \"\n \"on products related to '{{ product_subcategory }}'. The target age range of the ideal customer is \"\n \"{{ target_age_range }} years old. Respond with only the product name, no other text.\"\n ),\n model_alias=MODEL_ALIAS,\n )\n)\n\nconfig_builder.add_column(\n dd.LLMTextColumnConfig(\n name=\"customer_review\",\n prompt=(\n \"You are a customer named {{ customer.first_name }} from {{ customer.city }}, {{ customer.state }}. \"\n \"You are {{ customer.age }} years old and recently purchased a product called {{ product_name }}. \"\n \"Write a review of this product, which you gave a rating of {{ number_of_stars }} stars. \"\n \"The style of the review should be '{{ review_style }}'. \"\n \"Respond with only the review, no other text.\"\n ),\n model_alias=MODEL_ALIAS,\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Iteration is key \u2013\u00a0preview the dataset!\n\n1. Use the `preview` method to generate a sample of records quickly.\n\n2. Inspect the results for quality and format issues.\n\n3. Adjust column configurations, prompts, or parameters as needed.\n\n4. Re-run the preview until satisfied." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "# Run this cell multiple times to cycle through the 2 preview records.\npreview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "# The preview dataset is available as a pandas DataFrame.\npreview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udcca Analyze the generated data\n\n- Data Designer automatically generates a basic statistical analysis of the generated data.\n\n- This analysis is available via the `analysis` property of generation result objects." + }, + { + "type": "code", + "source": "# Print the analysis as a table.\npreview.analysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Scale up!\n\n- Happy with your preview data?\n\n- Use the `create` method to submit larger Data Designer generation jobs." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=10, dataset_name=\"tutorial-1\")", + "language": "python" + }, + { + "type": "code", + "source": "# Load the generated dataset as a pandas DataFrame.\ndataset = results.load_dataset()\n\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Load the analysis results into memory.\nanalysis = results.load_analysis()\n\nanalysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next Steps\n\nNow that you've seen the basics of Data Designer, check out the following notebooks to learn more about:\n\n- [Structured outputs and jinja expressions](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/2-structured-outputs-and-jinja-expressions/)\n\n- [Seeding synthetic data generation with an external dataset](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/3-seeding-with-a-dataset/)\n\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/)\n\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/)" + } +] }; diff --git a/fern/components/notebooks/2-structured-outputs-and-jinja-expressions.json b/fern/components/notebooks/2-structured-outputs-and-jinja-expressions.json new file mode 100644 index 00000000..077a3c60 --- /dev/null +++ b/fern/components/notebooks/2-structured-outputs-and-jinja-expressions.json @@ -0,0 +1,136 @@ +{ + "cells": [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Structured Outputs and Jinja Expressions\n\n#### \ud83d\udcda What you'll learn\n\nIn this notebook, we will continue our exploration of Data Designer, demonstrating more advanced data generation using structured outputs and Jinja expressions.\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides access to the configuration API.\n\n- `DataDesigner` is the main interface for data generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "import data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\n- `DataDesigner` is the main object that is used to interface with the library.\n\n- When initialized without arguments, the [default model providers](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) are used." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define model configurations\n\n- Each `ModelConfig` defines a model that can be used during the generation process.\n\n- The \"model alias\" is used to reference the model in the Data Designer config (as we will see below).\n\n- The \"model provider\" is the external service that hosts the model (see the [model config](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) docs for more details).\n\n- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider." + }, + { + "type": "code", + "source": "# This name is set in the model provider configuration.\nMODEL_PROVIDER = \"nvidia\"\n\n# The model ID is from build.nvidia.com.\nMODEL_ID = \"nvidia/nemotron-3-nano-30b-a3b\"\n\n# We choose this alias to be descriptive for our use case.\nMODEL_ALIAS = \"nemotron-nano-v3\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ChatCompletionInferenceParams(\n temperature=1.0,\n top_p=1.0,\n max_tokens=2048,\n extra_body={\"chat_template_kwargs\": {\"enable_thinking\": False}},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Initialize the Data Designer Config Builder\n\n- The Data Designer config defines the dataset schema and generation process.\n\n- The config builder provides an intuitive interface for building this configuration.\n\n- The list of model configs is provided to the builder at initialization." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83e\uddd1\u200d\ud83c\udfa8 Designing our data\n\n- We will again create a product review dataset, but this time we will use structured outputs and Jinja expressions.\n\n- Structured outputs let you specify the exact schema of the data you want to generate.\n\n- Data Designer supports schemas specified using either json schema or Pydantic data models (recommended).\n\n
    \n\nWe'll define our structured outputs using [Pydantic](https://docs.pydantic.dev/latest/) data models\n\n> \ud83d\udca1 **Why Pydantic?**\n>\n> - Pydantic models provide better IDE support and type validation.\n>\n> - They are more Pythonic than raw JSON schemas.\n>\n> - They integrate seamlessly with Data Designer's structured output system." + }, + { + "type": "code", + "source": "from decimal import Decimal\nfrom typing import Literal\n\nfrom pydantic import BaseModel, Field\n\n\n# We define a Product schema so that the name, description, and price are generated\n# in one go, with the types and constraints specified.\nclass Product(BaseModel):\n name: str = Field(description=\"The name of the product\")\n description: str = Field(description=\"A description of the product\")\n price: Decimal = Field(description=\"The price of the product\", ge=10, le=1000, decimal_places=2)\n\n\nclass ProductReview(BaseModel):\n rating: int = Field(description=\"The rating of the product\", ge=1, le=5)\n customer_mood: Literal[\"irritated\", \"mad\", \"happy\", \"neutral\", \"excited\"] = Field(\n description=\"The mood of the customer\"\n )\n review: str = Field(description=\"A review of the product\")", + "language": "python" + }, + { + "type": "markdown", + "source": "Next, let's design our product review dataset using a few more tricks compared to the previous notebook." + }, + { + "type": "code", + "source": "# Since we often only want a few attributes from Person objects, we can\n# set drop=True in the column config to drop the column from the final dataset.\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"customer\",\n sampler_type=dd.SamplerType.PERSON_FROM_FAKER,\n params=dd.PersonFromFakerSamplerParams(),\n drop=True,\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"product_category\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"Electronics\",\n \"Clothing\",\n \"Home & Kitchen\",\n \"Books\",\n \"Home Office\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"product_subcategory\",\n sampler_type=dd.SamplerType.SUBCATEGORY,\n params=dd.SubcategorySamplerParams(\n category=\"product_category\",\n values={\n \"Electronics\": [\n \"Smartphones\",\n \"Laptops\",\n \"Headphones\",\n \"Cameras\",\n \"Accessories\",\n ],\n \"Clothing\": [\n \"Men's Clothing\",\n \"Women's Clothing\",\n \"Winter Coats\",\n \"Activewear\",\n \"Accessories\",\n ],\n \"Home & Kitchen\": [\n \"Appliances\",\n \"Cookware\",\n \"Furniture\",\n \"Decor\",\n \"Organization\",\n ],\n \"Books\": [\n \"Fiction\",\n \"Non-Fiction\",\n \"Self-Help\",\n \"Textbooks\",\n \"Classics\",\n ],\n \"Home Office\": [\n \"Desks\",\n \"Chairs\",\n \"Storage\",\n \"Office Supplies\",\n \"Lighting\",\n ],\n },\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"target_age_range\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(values=[\"18-25\", \"25-35\", \"35-50\", \"50-65\", \"65+\"]),\n )\n)\n\n# Sampler columns support conditional params, which are used if the condition is met.\n# In this example, we set the review style to rambling if the target age range is 18-25.\n# Note conditional parameters are only supported for Sampler column types.\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"review_style\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"rambling\", \"brief\", \"detailed\", \"structured with bullet points\"],\n weights=[1, 2, 2, 1],\n ),\n conditional_params={\n \"target_age_range == '18-25'\": dd.CategorySamplerParams(values=[\"rambling\"]),\n },\n )\n)\n\n# Optionally validate that the columns are configured correctly.\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "Next, we will use more advanced Jinja expressions to create new columns.\n\nJinja expressions let you:\n\n- Access nested attributes: `{{ customer.first_name }}`\n\n- Combine values: `{{ customer.first_name }} {{ customer.last_name }}`\n\n- Use conditional logic: `{% if condition %}...{% endif %}`" + }, + { + "type": "code", + "source": "# We can create new columns using Jinja expressions that reference\n# existing columns, including attributes of nested objects.\nconfig_builder.add_column(\n dd.ExpressionColumnConfig(name=\"customer_name\", expr=\"{{ customer.first_name }} {{ customer.last_name }}\")\n)\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"customer_age\", expr=\"{{ customer.age }}\"))\n\nconfig_builder.add_column(\n dd.LLMStructuredColumnConfig(\n name=\"product\",\n prompt=(\n \"Create a product in the '{{ product_category }}' category, focusing on products \"\n \"related to '{{ product_subcategory }}'. The target age range of the ideal customer is \"\n \"{{ target_age_range }} years old. The product should be priced between $10 and $1000.\"\n ),\n output_format=Product,\n model_alias=MODEL_ALIAS,\n )\n)\n\n# We can even use if/else logic in our Jinja expressions to create more complex prompt patterns.\nconfig_builder.add_column(\n dd.LLMStructuredColumnConfig(\n name=\"customer_review\",\n prompt=(\n \"Your task is to write a review for the following product:\\n\\n\"\n \"Product Name: {{ product.name }}\\n\"\n \"Product Description: {{ product.description }}\\n\"\n \"Price: {{ product.price }}\\n\\n\"\n \"Imagine your name is {{ customer_name }} and you are from {{ customer.city }}, {{ customer.state }}. \"\n \"Write the review in a style that is '{{ review_style }}'.\"\n \"{% if target_age_range == '18-25' %}\"\n \"Make sure the review is more informal and conversational.\\n\"\n \"{% else %}\"\n \"Make sure the review is more formal and structured.\\n\"\n \"{% endif %}\"\n \"The review field should contain only the review, no other text.\"\n ),\n output_format=ProductReview,\n model_alias=MODEL_ALIAS,\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Iteration is key \u2013\u00a0preview the dataset!\n\n1. Use the `preview` method to generate a sample of records quickly.\n\n2. Inspect the results for quality and format issues.\n\n3. Adjust column configurations, prompts, or parameters as needed.\n\n4. Re-run the preview until satisfied." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "# Run this cell multiple times to cycle through the 2 preview records.\npreview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "# The preview dataset is available as a pandas DataFrame.\npreview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udcca Analyze the generated data\n\n- Data Designer automatically generates a basic statistical analysis of the generated data.\n\n- This analysis is available via the `analysis` property of generation result objects." + }, + { + "type": "code", + "source": "# Print the analysis as a table.\npreview.analysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Scale up!\n\n- Happy with your preview data?\n\n- Use the `create` method to submit larger Data Designer generation jobs." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=10, dataset_name=\"tutorial-2\")", + "language": "python" + }, + { + "type": "code", + "source": "# Load the generated dataset as a pandas DataFrame.\ndataset = results.load_dataset()\n\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Load the analysis results into memory.\nanalysis = results.load_analysis()\n\nanalysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next Steps\n\nCheck out the following notebook to learn more about:\n\n- [Seeding synthetic data generation with an external dataset](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/3-seeding-with-a-dataset/)\n\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/)\n\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/)" + } + ] +} \ No newline at end of file diff --git a/fern/components/notebooks/2-structured-outputs-and-jinja-expressions.ts b/fern/components/notebooks/2-structured-outputs-and-jinja-expressions.ts new file mode 100644 index 00000000..1ecf5d02 --- /dev/null +++ b/fern/components/notebooks/2-structured-outputs-and-jinja-expressions.ts @@ -0,0 +1,135 @@ +/** Auto-generated by ipynb-to-fern-json.py - do not edit */ +export default { cells: [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Structured Outputs and Jinja Expressions\n\n#### \ud83d\udcda What you'll learn\n\nIn this notebook, we will continue our exploration of Data Designer, demonstrating more advanced data generation using structured outputs and Jinja expressions.\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides access to the configuration API.\n\n- `DataDesigner` is the main interface for data generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "import data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\n- `DataDesigner` is the main object that is used to interface with the library.\n\n- When initialized without arguments, the [default model providers](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) are used." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define model configurations\n\n- Each `ModelConfig` defines a model that can be used during the generation process.\n\n- The \"model alias\" is used to reference the model in the Data Designer config (as we will see below).\n\n- The \"model provider\" is the external service that hosts the model (see the [model config](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) docs for more details).\n\n- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider." + }, + { + "type": "code", + "source": "# This name is set in the model provider configuration.\nMODEL_PROVIDER = \"nvidia\"\n\n# The model ID is from build.nvidia.com.\nMODEL_ID = \"nvidia/nemotron-3-nano-30b-a3b\"\n\n# We choose this alias to be descriptive for our use case.\nMODEL_ALIAS = \"nemotron-nano-v3\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ChatCompletionInferenceParams(\n temperature=1.0,\n top_p=1.0,\n max_tokens=2048,\n extra_body={\"chat_template_kwargs\": {\"enable_thinking\": False}},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Initialize the Data Designer Config Builder\n\n- The Data Designer config defines the dataset schema and generation process.\n\n- The config builder provides an intuitive interface for building this configuration.\n\n- The list of model configs is provided to the builder at initialization." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83e\uddd1\u200d\ud83c\udfa8 Designing our data\n\n- We will again create a product review dataset, but this time we will use structured outputs and Jinja expressions.\n\n- Structured outputs let you specify the exact schema of the data you want to generate.\n\n- Data Designer supports schemas specified using either json schema or Pydantic data models (recommended).\n\n
    \n\nWe'll define our structured outputs using [Pydantic](https://docs.pydantic.dev/latest/) data models\n\n> \ud83d\udca1 **Why Pydantic?**\n>\n> - Pydantic models provide better IDE support and type validation.\n>\n> - They are more Pythonic than raw JSON schemas.\n>\n> - They integrate seamlessly with Data Designer's structured output system." + }, + { + "type": "code", + "source": "from decimal import Decimal\nfrom typing import Literal\n\nfrom pydantic import BaseModel, Field\n\n\n# We define a Product schema so that the name, description, and price are generated\n# in one go, with the types and constraints specified.\nclass Product(BaseModel):\n name: str = Field(description=\"The name of the product\")\n description: str = Field(description=\"A description of the product\")\n price: Decimal = Field(description=\"The price of the product\", ge=10, le=1000, decimal_places=2)\n\n\nclass ProductReview(BaseModel):\n rating: int = Field(description=\"The rating of the product\", ge=1, le=5)\n customer_mood: Literal[\"irritated\", \"mad\", \"happy\", \"neutral\", \"excited\"] = Field(\n description=\"The mood of the customer\"\n )\n review: str = Field(description=\"A review of the product\")", + "language": "python" + }, + { + "type": "markdown", + "source": "Next, let's design our product review dataset using a few more tricks compared to the previous notebook." + }, + { + "type": "code", + "source": "# Since we often only want a few attributes from Person objects, we can\n# set drop=True in the column config to drop the column from the final dataset.\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"customer\",\n sampler_type=dd.SamplerType.PERSON_FROM_FAKER,\n params=dd.PersonFromFakerSamplerParams(),\n drop=True,\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"product_category\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"Electronics\",\n \"Clothing\",\n \"Home & Kitchen\",\n \"Books\",\n \"Home Office\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"product_subcategory\",\n sampler_type=dd.SamplerType.SUBCATEGORY,\n params=dd.SubcategorySamplerParams(\n category=\"product_category\",\n values={\n \"Electronics\": [\n \"Smartphones\",\n \"Laptops\",\n \"Headphones\",\n \"Cameras\",\n \"Accessories\",\n ],\n \"Clothing\": [\n \"Men's Clothing\",\n \"Women's Clothing\",\n \"Winter Coats\",\n \"Activewear\",\n \"Accessories\",\n ],\n \"Home & Kitchen\": [\n \"Appliances\",\n \"Cookware\",\n \"Furniture\",\n \"Decor\",\n \"Organization\",\n ],\n \"Books\": [\n \"Fiction\",\n \"Non-Fiction\",\n \"Self-Help\",\n \"Textbooks\",\n \"Classics\",\n ],\n \"Home Office\": [\n \"Desks\",\n \"Chairs\",\n \"Storage\",\n \"Office Supplies\",\n \"Lighting\",\n ],\n },\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"target_age_range\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(values=[\"18-25\", \"25-35\", \"35-50\", \"50-65\", \"65+\"]),\n )\n)\n\n# Sampler columns support conditional params, which are used if the condition is met.\n# In this example, we set the review style to rambling if the target age range is 18-25.\n# Note conditional parameters are only supported for Sampler column types.\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"review_style\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"rambling\", \"brief\", \"detailed\", \"structured with bullet points\"],\n weights=[1, 2, 2, 1],\n ),\n conditional_params={\n \"target_age_range == '18-25'\": dd.CategorySamplerParams(values=[\"rambling\"]),\n },\n )\n)\n\n# Optionally validate that the columns are configured correctly.\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "Next, we will use more advanced Jinja expressions to create new columns.\n\nJinja expressions let you:\n\n- Access nested attributes: `{{ customer.first_name }}`\n\n- Combine values: `{{ customer.first_name }} {{ customer.last_name }}`\n\n- Use conditional logic: `{% if condition %}...{% endif %}`" + }, + { + "type": "code", + "source": "# We can create new columns using Jinja expressions that reference\n# existing columns, including attributes of nested objects.\nconfig_builder.add_column(\n dd.ExpressionColumnConfig(name=\"customer_name\", expr=\"{{ customer.first_name }} {{ customer.last_name }}\")\n)\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"customer_age\", expr=\"{{ customer.age }}\"))\n\nconfig_builder.add_column(\n dd.LLMStructuredColumnConfig(\n name=\"product\",\n prompt=(\n \"Create a product in the '{{ product_category }}' category, focusing on products \"\n \"related to '{{ product_subcategory }}'. The target age range of the ideal customer is \"\n \"{{ target_age_range }} years old. The product should be priced between $10 and $1000.\"\n ),\n output_format=Product,\n model_alias=MODEL_ALIAS,\n )\n)\n\n# We can even use if/else logic in our Jinja expressions to create more complex prompt patterns.\nconfig_builder.add_column(\n dd.LLMStructuredColumnConfig(\n name=\"customer_review\",\n prompt=(\n \"Your task is to write a review for the following product:\\n\\n\"\n \"Product Name: {{ product.name }}\\n\"\n \"Product Description: {{ product.description }}\\n\"\n \"Price: {{ product.price }}\\n\\n\"\n \"Imagine your name is {{ customer_name }} and you are from {{ customer.city }}, {{ customer.state }}. \"\n \"Write the review in a style that is '{{ review_style }}'.\"\n \"{% if target_age_range == '18-25' %}\"\n \"Make sure the review is more informal and conversational.\\n\"\n \"{% else %}\"\n \"Make sure the review is more formal and structured.\\n\"\n \"{% endif %}\"\n \"The review field should contain only the review, no other text.\"\n ),\n output_format=ProductReview,\n model_alias=MODEL_ALIAS,\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Iteration is key \u2013\u00a0preview the dataset!\n\n1. Use the `preview` method to generate a sample of records quickly.\n\n2. Inspect the results for quality and format issues.\n\n3. Adjust column configurations, prompts, or parameters as needed.\n\n4. Re-run the preview until satisfied." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "# Run this cell multiple times to cycle through the 2 preview records.\npreview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "# The preview dataset is available as a pandas DataFrame.\npreview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udcca Analyze the generated data\n\n- Data Designer automatically generates a basic statistical analysis of the generated data.\n\n- This analysis is available via the `analysis` property of generation result objects." + }, + { + "type": "code", + "source": "# Print the analysis as a table.\npreview.analysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Scale up!\n\n- Happy with your preview data?\n\n- Use the `create` method to submit larger Data Designer generation jobs." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=10, dataset_name=\"tutorial-2\")", + "language": "python" + }, + { + "type": "code", + "source": "# Load the generated dataset as a pandas DataFrame.\ndataset = results.load_dataset()\n\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Load the analysis results into memory.\nanalysis = results.load_analysis()\n\nanalysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next Steps\n\nCheck out the following notebook to learn more about:\n\n- [Seeding synthetic data generation with an external dataset](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/3-seeding-with-a-dataset/)\n\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/)\n\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/)" + } +] }; diff --git a/fern/components/notebooks/3-seeding-with-a-dataset.json b/fern/components/notebooks/3-seeding-with-a-dataset.json new file mode 100644 index 00000000..9be0595d --- /dev/null +++ b/fern/components/notebooks/3-seeding-with-a-dataset.json @@ -0,0 +1,127 @@ +{ + "cells": [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Seeding Synthetic Data Generation with an External Dataset\n\n#### \ud83d\udcda What you'll learn\n\nIn this notebook, we will demonstrate how to seed synthetic data generation in Data Designer with an external dataset.\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides access to the configuration API.\n\n- `DataDesigner` is the main interface for data generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "import data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\n- `DataDesigner` is the main object responsible for managing the data generation process.\n\n- When initialized without arguments, the [default model providers](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) are used." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define model configurations\n\n- Each `ModelConfig` defines a model that can be used during the generation process.\n\n- The \"model alias\" is used to reference the model in the Data Designer config (as we will see below).\n\n- The \"model provider\" is the external service that hosts the model (see the [model config](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) docs for more details).\n\n- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider." + }, + { + "type": "code", + "source": "# This name is set in the model provider configuration.\nMODEL_PROVIDER = \"nvidia\"\n\n# The model ID is from build.nvidia.com.\nMODEL_ID = \"nvidia/nemotron-3-nano-30b-a3b\"\n\n# We choose this alias to be descriptive for our use case.\nMODEL_ALIAS = \"nemotron-nano-v3\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ChatCompletionInferenceParams(\n temperature=1.0,\n top_p=1.0,\n max_tokens=2048,\n extra_body={\"chat_template_kwargs\": {\"enable_thinking\": False}},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Initialize the Data Designer Config Builder\n\n- The Data Designer config defines the dataset schema and generation process.\n\n- The config builder provides an intuitive interface for building this configuration.\n\n- The list of model configs is provided to the builder at initialization." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \ud83c\udfe5 Prepare a seed dataset\n\n- For this notebook, we'll create a synthetic dataset of patient notes.\n\n- We will _seed_ the generation process with a [symptom-to-diagnosis dataset](https://huggingface.co/datasets/gretelai/symptom_to_diagnosis).\n\n- We already have the dataset downloaded in the [data](../data) directory of this repository.\n\n
    \n\n> \ud83c\udf31 **Why use a seed dataset?**\n>\n> - Seed datasets let you steer the generation process by providing context that is specific to your use case.\n>\n> - Seed datasets are also an excellent way to inject real-world diversity into your synthetic data.\n>\n> - During generation, prompt templates can reference any of the seed dataset fields." + }, + { + "type": "code", + "source": "# Download sample dataset from Github\nimport urllib.request\n\nurl = \"https://raw.githubusercontent.com/NVIDIA/GenerativeAIExamples/refs/heads/main/nemo/NeMo-Data-Designer/data/gretelai_symptom_to_diagnosis.csv\"\nlocal_filename, _ = urllib.request.urlretrieve(url, \"gretelai_symptom_to_diagnosis.csv\")\n\n# Seed datasets are passed as reference objects to the config builder.\nseed_source = dd.LocalFileSeedSource(path=local_filename)\n\nconfig_builder.with_seed_dataset(seed_source)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \ud83c\udfa8 Designing our synthetic patient notes dataset\n\n- The prompt template can reference fields from our seed dataset:\n - `{{ diagnosis }}` - the medical diagnosis from the seed data\n - `{{ patient_summary }}` - the symptom description from the seed data" + }, + { + "type": "code", + "source": "config_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"patient_sampler\",\n sampler_type=dd.SamplerType.PERSON_FROM_FAKER,\n params=dd.PersonFromFakerSamplerParams(),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"doctor_sampler\",\n sampler_type=dd.SamplerType.PERSON_FROM_FAKER,\n params=dd.PersonFromFakerSamplerParams(),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"patient_id\",\n sampler_type=dd.SamplerType.UUID,\n params=dd.UUIDSamplerParams(\n prefix=\"PT-\",\n short_form=True,\n uppercase=True,\n ),\n )\n)\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"first_name\", expr=\"{{ patient_sampler.first_name }}\"))\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"last_name\", expr=\"{{ patient_sampler.last_name }}\"))\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"dob\", expr=\"{{ patient_sampler.birth_date }}\"))\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"symptom_onset_date\",\n sampler_type=dd.SamplerType.DATETIME,\n params=dd.DatetimeSamplerParams(start=\"2024-01-01\", end=\"2024-12-31\"),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"date_of_visit\",\n sampler_type=dd.SamplerType.TIMEDELTA,\n params=dd.TimeDeltaSamplerParams(dt_min=1, dt_max=30, reference_column_name=\"symptom_onset_date\"),\n )\n)\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"physician\", expr=\"Dr. {{ doctor_sampler.last_name }}\"))\n\nconfig_builder.add_column(\n dd.LLMTextColumnConfig(\n name=\"physician_notes\",\n prompt=\"\"\"\\\nYou are a primary-care physician who just had an appointment with {{ first_name }} {{ last_name }},\nwho has been struggling with symptoms from {{ diagnosis }} since {{ symptom_onset_date }}.\nThe date of today's visit is {{ date_of_visit }}.\n\n{{ patient_summary }}\n\nWrite careful notes about your visit with {{ first_name }},\nas Dr. {{ doctor_sampler.first_name }} {{ doctor_sampler.last_name }}.\n\nFormat the notes as a busy doctor might.\nRespond with only the notes, no other text.\n\"\"\",\n model_alias=MODEL_ALIAS,\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Iteration is key \u2013\u00a0preview the dataset!\n\n1. Use the `preview` method to generate a sample of records quickly.\n\n2. Inspect the results for quality and format issues.\n\n3. Adjust column configurations, prompts, or parameters as needed.\n\n4. Re-run the preview until satisfied." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "# Run this cell multiple times to cycle through the 2 preview records.\npreview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "# The preview dataset is available as a pandas DataFrame.\npreview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udcca Analyze the generated data\n\n- Data Designer automatically generates a basic statistical analysis of the generated data.\n\n- This analysis is available via the `analysis` property of generation result objects." + }, + { + "type": "code", + "source": "# Print the analysis as a table.\npreview.analysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Scale up!\n\n- Happy with your preview data?\n\n- Use the `create` method to submit larger Data Designer generation jobs." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=10, dataset_name=\"tutorial-3\")", + "language": "python" + }, + { + "type": "code", + "source": "# Load the generated dataset as a pandas DataFrame.\ndataset = results.load_dataset()\n\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Load the analysis results into memory.\nanalysis = results.load_analysis()\n\nanalysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next Steps\n\nCheck out the following notebook to learn more about:\n\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/)\n\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/)" + } + ] +} \ No newline at end of file diff --git a/fern/components/notebooks/3-seeding-with-a-dataset.ts b/fern/components/notebooks/3-seeding-with-a-dataset.ts new file mode 100644 index 00000000..a610af17 --- /dev/null +++ b/fern/components/notebooks/3-seeding-with-a-dataset.ts @@ -0,0 +1,126 @@ +/** Auto-generated by ipynb-to-fern-json.py - do not edit */ +export default { cells: [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Seeding Synthetic Data Generation with an External Dataset\n\n#### \ud83d\udcda What you'll learn\n\nIn this notebook, we will demonstrate how to seed synthetic data generation in Data Designer with an external dataset.\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides access to the configuration API.\n\n- `DataDesigner` is the main interface for data generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "import data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\n- `DataDesigner` is the main object responsible for managing the data generation process.\n\n- When initialized without arguments, the [default model providers](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) are used." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define model configurations\n\n- Each `ModelConfig` defines a model that can be used during the generation process.\n\n- The \"model alias\" is used to reference the model in the Data Designer config (as we will see below).\n\n- The \"model provider\" is the external service that hosts the model (see the [model config](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) docs for more details).\n\n- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider." + }, + { + "type": "code", + "source": "# This name is set in the model provider configuration.\nMODEL_PROVIDER = \"nvidia\"\n\n# The model ID is from build.nvidia.com.\nMODEL_ID = \"nvidia/nemotron-3-nano-30b-a3b\"\n\n# We choose this alias to be descriptive for our use case.\nMODEL_ALIAS = \"nemotron-nano-v3\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ChatCompletionInferenceParams(\n temperature=1.0,\n top_p=1.0,\n max_tokens=2048,\n extra_body={\"chat_template_kwargs\": {\"enable_thinking\": False}},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Initialize the Data Designer Config Builder\n\n- The Data Designer config defines the dataset schema and generation process.\n\n- The config builder provides an intuitive interface for building this configuration.\n\n- The list of model configs is provided to the builder at initialization." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \ud83c\udfe5 Prepare a seed dataset\n\n- For this notebook, we'll create a synthetic dataset of patient notes.\n\n- We will _seed_ the generation process with a [symptom-to-diagnosis dataset](https://huggingface.co/datasets/gretelai/symptom_to_diagnosis).\n\n- We already have the dataset downloaded in the [data](../data) directory of this repository.\n\n
    \n\n> \ud83c\udf31 **Why use a seed dataset?**\n>\n> - Seed datasets let you steer the generation process by providing context that is specific to your use case.\n>\n> - Seed datasets are also an excellent way to inject real-world diversity into your synthetic data.\n>\n> - During generation, prompt templates can reference any of the seed dataset fields." + }, + { + "type": "code", + "source": "# Download sample dataset from Github\nimport urllib.request\n\nurl = \"https://raw.githubusercontent.com/NVIDIA/GenerativeAIExamples/refs/heads/main/nemo/NeMo-Data-Designer/data/gretelai_symptom_to_diagnosis.csv\"\nlocal_filename, _ = urllib.request.urlretrieve(url, \"gretelai_symptom_to_diagnosis.csv\")\n\n# Seed datasets are passed as reference objects to the config builder.\nseed_source = dd.LocalFileSeedSource(path=local_filename)\n\nconfig_builder.with_seed_dataset(seed_source)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \ud83c\udfa8 Designing our synthetic patient notes dataset\n\n- The prompt template can reference fields from our seed dataset:\n - `{{ diagnosis }}` - the medical diagnosis from the seed data\n - `{{ patient_summary }}` - the symptom description from the seed data" + }, + { + "type": "code", + "source": "config_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"patient_sampler\",\n sampler_type=dd.SamplerType.PERSON_FROM_FAKER,\n params=dd.PersonFromFakerSamplerParams(),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"doctor_sampler\",\n sampler_type=dd.SamplerType.PERSON_FROM_FAKER,\n params=dd.PersonFromFakerSamplerParams(),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"patient_id\",\n sampler_type=dd.SamplerType.UUID,\n params=dd.UUIDSamplerParams(\n prefix=\"PT-\",\n short_form=True,\n uppercase=True,\n ),\n )\n)\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"first_name\", expr=\"{{ patient_sampler.first_name }}\"))\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"last_name\", expr=\"{{ patient_sampler.last_name }}\"))\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"dob\", expr=\"{{ patient_sampler.birth_date }}\"))\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"symptom_onset_date\",\n sampler_type=dd.SamplerType.DATETIME,\n params=dd.DatetimeSamplerParams(start=\"2024-01-01\", end=\"2024-12-31\"),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"date_of_visit\",\n sampler_type=dd.SamplerType.TIMEDELTA,\n params=dd.TimeDeltaSamplerParams(dt_min=1, dt_max=30, reference_column_name=\"symptom_onset_date\"),\n )\n)\n\nconfig_builder.add_column(dd.ExpressionColumnConfig(name=\"physician\", expr=\"Dr. {{ doctor_sampler.last_name }}\"))\n\nconfig_builder.add_column(\n dd.LLMTextColumnConfig(\n name=\"physician_notes\",\n prompt=\"\"\"\\\nYou are a primary-care physician who just had an appointment with {{ first_name }} {{ last_name }},\nwho has been struggling with symptoms from {{ diagnosis }} since {{ symptom_onset_date }}.\nThe date of today's visit is {{ date_of_visit }}.\n\n{{ patient_summary }}\n\nWrite careful notes about your visit with {{ first_name }},\nas Dr. {{ doctor_sampler.first_name }} {{ doctor_sampler.last_name }}.\n\nFormat the notes as a busy doctor might.\nRespond with only the notes, no other text.\n\"\"\",\n model_alias=MODEL_ALIAS,\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Iteration is key \u2013\u00a0preview the dataset!\n\n1. Use the `preview` method to generate a sample of records quickly.\n\n2. Inspect the results for quality and format issues.\n\n3. Adjust column configurations, prompts, or parameters as needed.\n\n4. Re-run the preview until satisfied." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "# Run this cell multiple times to cycle through the 2 preview records.\npreview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "# The preview dataset is available as a pandas DataFrame.\npreview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udcca Analyze the generated data\n\n- Data Designer automatically generates a basic statistical analysis of the generated data.\n\n- This analysis is available via the `analysis` property of generation result objects." + }, + { + "type": "code", + "source": "# Print the analysis as a table.\npreview.analysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Scale up!\n\n- Happy with your preview data?\n\n- Use the `create` method to submit larger Data Designer generation jobs." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=10, dataset_name=\"tutorial-3\")", + "language": "python" + }, + { + "type": "code", + "source": "# Load the generated dataset as a pandas DataFrame.\ndataset = results.load_dataset()\n\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Load the analysis results into memory.\nanalysis = results.load_analysis()\n\nanalysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next Steps\n\nCheck out the following notebook to learn more about:\n\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/)\n\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/)" + } +] }; diff --git a/fern/components/notebooks/4-providing-images-as-context.json b/fern/components/notebooks/4-providing-images-as-context.json new file mode 100644 index 00000000..5459f949 --- /dev/null +++ b/fern/components/notebooks/4-providing-images-as-context.json @@ -0,0 +1,156 @@ +{ + "cells": [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Providing Images as Context for Vision-Based Data Generation" + }, + { + "type": "markdown", + "source": "#### \ud83d\udcda What you'll learn\n\nThis notebook demonstrates how to provide images as context to generate text descriptions using vision-language models.\n\n- \u2728 **Visual Document Processing**: Converting images to chat-ready format for model consumption\n- \ud83d\udd0d **Vision-Language Generation**: Using vision models to generate detailed summaries from images\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides access to the configuration API.\n\n- `DataDesigner` is the main interface for data generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer \"pillow>=12.0.0,<13\" \"datasets>=4.0.0,<5\"", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "# Standard library imports\nimport base64\nimport io\nimport uuid\n\n# Third-party imports\nimport pandas as pd\nimport rich\nfrom datasets import load_dataset\nfrom IPython.display import display\nfrom rich.panel import Panel\n\n# Data Designer imports\nimport data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\n- `DataDesigner` is the main object responsible for managing the data generation process.\n\n- When initialized without arguments, the [default model providers](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) are used." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define model configurations\n\n- Each `ModelConfig` defines a model that can be used during the generation process.\n\n- The \"model alias\" is used to reference the model in the Data Designer config (as we will see below).\n\n- The \"model provider\" is the external service that hosts the model (see the [model config](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) docs for more details).\n\n- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider." + }, + { + "type": "code", + "source": "# This name is set in the model provider configuration.\nMODEL_PROVIDER = \"nvidia\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=\"vision\",\n model=\"meta/llama-4-scout-17b-16e-instruct\",\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ChatCompletionInferenceParams(\n temperature=0.60,\n top_p=0.95,\n max_tokens=2048,\n ),\n ),\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Initialize the Data Designer Config Builder\n\n- The Data Designer config defines the dataset schema and generation process.\n\n- The config builder provides an intuitive interface for building this configuration.\n\n- The list of model configs is provided to the builder at initialization." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf31 Seed Dataset Creation\n\nIn this section, we'll prepare our visual documents as a seed dataset for summarization:\n\n- **Loading Visual Documents**: We use the ColPali dataset containing document images\n- **Image Processing**: Convert images to base64 format for vision model consumption\n- **Metadata Extraction**: Preserve relevant document information (filename, page number, source, etc.)\n\nThe seed dataset will be used to generate detailed text summaries of each document image." + }, + { + "type": "code", + "source": "# Dataset processing configuration\nIMG_COUNT = 512 # Number of images to process\nBASE64_IMAGE_HEIGHT = 512 # Standardized height for model input\n\n# Load ColPali dataset for visual documents\nimg_dataset_cfg = {\"path\": \"vidore/colpali_train_set\", \"split\": \"train\", \"streaming\": True}", + "language": "python" + }, + { + "type": "code", + "source": "def resize_image(image, height: int):\n \"\"\"\n Resize image while maintaining aspect ratio.\n\n Args:\n image: PIL Image object\n height: Target height in pixels\n\n Returns:\n Resized PIL Image object\n \"\"\"\n original_width, original_height = image.size\n width = int(original_width * (height / original_height))\n return image.resize((width, height))\n\n\ndef convert_image_to_chat_format(record, height: int) -> dict:\n \"\"\"\n Convert PIL image to base64 format for chat template usage.\n\n Args:\n record: Dataset record containing image and metadata\n height: Target height for image resizing\n\n Returns:\n Updated record with base64_image and uuid fields\n \"\"\"\n # Resize image for consistent processing\n image = resize_image(record[\"image\"], height)\n\n # Convert to base64 string\n img_buffer = io.BytesIO()\n image.save(img_buffer, format=\"PNG\")\n byte_data = img_buffer.getvalue()\n base64_encoded_data = base64.b64encode(byte_data)\n base64_string = base64_encoded_data.decode(\"utf-8\")\n\n # Return updated record\n return record | {\"base64_image\": base64_string, \"uuid\": str(uuid.uuid4())}", + "language": "python" + }, + { + "type": "code", + "source": "# Load and process the visual document dataset\nprint(\"\ud83d\udce5 Loading and processing document images...\")\n\nimg_dataset_iter = iter(\n load_dataset(**img_dataset_cfg).map(convert_image_to_chat_format, fn_kwargs={\"height\": BASE64_IMAGE_HEIGHT})\n)\nimg_dataset = pd.DataFrame([next(img_dataset_iter) for _ in range(IMG_COUNT)])\n\nprint(f\"\u2705 Loaded {len(img_dataset)} images with columns: {list(img_dataset.columns)}\")", + "language": "python" + }, + { + "type": "code", + "source": "img_dataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Add the seed dataset containing our processed images\ndf_seed = pd.DataFrame(img_dataset)[[\"uuid\", \"image_filename\", \"base64_image\", \"page\", \"options\", \"source\"]]\nconfig_builder.with_seed_dataset(dd.DataFrameSeedSource(df=df_seed))", + "language": "python" + }, + { + "type": "code", + "source": "# Add a column to generate detailed document summaries\nconfig_builder.add_column(\n dd.LLMTextColumnConfig(\n name=\"summary\",\n model_alias=\"vision\",\n prompt=(\n \"Provide a detailed summary of the content in this image in Markdown format. \"\n \"Start from the top of the image and then describe it from top to bottom. \"\n \"Place a summary at the bottom.\"\n ),\n multi_modal_context=[\n dd.ImageContext(\n column_name=\"base64_image\",\n data_type=dd.ModalityDataType.BASE64,\n image_format=dd.ImageFormat.PNG,\n )\n ],\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Iteration is key \u2013 preview the dataset!\n\n1. Use the `preview` method to generate a sample of records quickly.\n\n2. Inspect the results for quality and format issues.\n\n3. Adjust column configurations, prompts, or parameters as needed.\n\n4. Re-run the preview until satisfied." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "# Run this cell multiple times to cycle through the 2 preview records.\npreview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "# The preview dataset is available as a pandas DataFrame.\npreview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udcca Analyze the generated data\n\n- Data Designer automatically generates a basic statistical analysis of the generated data.\n\n- This analysis is available via the `analysis` property of generation result objects." + }, + { + "type": "code", + "source": "# Print the analysis as a table.\npreview.analysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd0e Visual Inspection\n\nLet's compare the original document image with the generated summary to validate quality:" + }, + { + "type": "code", + "source": "# Compare original document with generated summary\nindex = 0 # Change this to view different examples\n\n# Merge preview data with original images for comparison\ncomparison_dataset = preview.dataset.merge(pd.DataFrame(img_dataset)[[\"uuid\", \"image\"]], how=\"left\", on=\"uuid\")\n\n# Extract the record for display\nrecord = comparison_dataset.iloc[index]\n\nprint(\"\ud83d\udcc4 Original Document Image:\")\ndisplay(resize_image(record.image, BASE64_IMAGE_HEIGHT))\n\nprint(\"\\n\ud83d\udcdd Generated Summary:\")\nrich.print(Panel(record.summary, title=\"Document Summary\", title_align=\"left\"))", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Scale up!\n\n- Happy with your preview data?\n\n- Use the `create` method to submit larger Data Designer generation jobs." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=10, dataset_name=\"tutorial-4\")", + "language": "python" + }, + { + "type": "code", + "source": "# Load the generated dataset as a pandas DataFrame.\ndataset = results.load_dataset()\n\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Load the analysis results into memory.\nanalysis = results.load_analysis()\n\nanalysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next Steps\n\nNow that you've learned how to use visual context for image summarization in Data Designer, explore more:\n\n- Experiment with different vision models for specific document types\n- Try different prompt variations to generate specialized descriptions (e.g., technical details, key findings)\n- Combine vision-based summaries with other column types for multi-modal workflows\n- Apply this pattern to other vision tasks like image captioning, OCR validation, or visual question answering\n\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/) with Data Designer" + } + ] +} \ No newline at end of file diff --git a/fern/components/notebooks/4-providing-images-as-context.ts b/fern/components/notebooks/4-providing-images-as-context.ts new file mode 100644 index 00000000..1cfbaea1 --- /dev/null +++ b/fern/components/notebooks/4-providing-images-as-context.ts @@ -0,0 +1,155 @@ +/** Auto-generated by ipynb-to-fern-json.py - do not edit */ +export default { cells: [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Providing Images as Context for Vision-Based Data Generation" + }, + { + "type": "markdown", + "source": "#### \ud83d\udcda What you'll learn\n\nThis notebook demonstrates how to provide images as context to generate text descriptions using vision-language models.\n\n- \u2728 **Visual Document Processing**: Converting images to chat-ready format for model consumption\n- \ud83d\udd0d **Vision-Language Generation**: Using vision models to generate detailed summaries from images\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides access to the configuration API.\n\n- `DataDesigner` is the main interface for data generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer \"pillow>=12.0.0,<13\" \"datasets>=4.0.0,<5\"", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "# Standard library imports\nimport base64\nimport io\nimport uuid\n\n# Third-party imports\nimport pandas as pd\nimport rich\nfrom datasets import load_dataset\nfrom IPython.display import display\nfrom rich.panel import Panel\n\n# Data Designer imports\nimport data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\n- `DataDesigner` is the main object responsible for managing the data generation process.\n\n- When initialized without arguments, the [default model providers](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) are used." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define model configurations\n\n- Each `ModelConfig` defines a model that can be used during the generation process.\n\n- The \"model alias\" is used to reference the model in the Data Designer config (as we will see below).\n\n- The \"model provider\" is the external service that hosts the model (see the [model config](https://nvidia-nemo.github.io/DataDesigner/latest/concepts/models/default-model-settings/) docs for more details).\n\n- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider." + }, + { + "type": "code", + "source": "# This name is set in the model provider configuration.\nMODEL_PROVIDER = \"nvidia\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=\"vision\",\n model=\"meta/llama-4-scout-17b-16e-instruct\",\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ChatCompletionInferenceParams(\n temperature=0.60,\n top_p=0.95,\n max_tokens=2048,\n ),\n ),\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Initialize the Data Designer Config Builder\n\n- The Data Designer config defines the dataset schema and generation process.\n\n- The config builder provides an intuitive interface for building this configuration.\n\n- The list of model configs is provided to the builder at initialization." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf31 Seed Dataset Creation\n\nIn this section, we'll prepare our visual documents as a seed dataset for summarization:\n\n- **Loading Visual Documents**: We use the ColPali dataset containing document images\n- **Image Processing**: Convert images to base64 format for vision model consumption\n- **Metadata Extraction**: Preserve relevant document information (filename, page number, source, etc.)\n\nThe seed dataset will be used to generate detailed text summaries of each document image." + }, + { + "type": "code", + "source": "# Dataset processing configuration\nIMG_COUNT = 512 # Number of images to process\nBASE64_IMAGE_HEIGHT = 512 # Standardized height for model input\n\n# Load ColPali dataset for visual documents\nimg_dataset_cfg = {\"path\": \"vidore/colpali_train_set\", \"split\": \"train\", \"streaming\": True}", + "language": "python" + }, + { + "type": "code", + "source": "def resize_image(image, height: int):\n \"\"\"\n Resize image while maintaining aspect ratio.\n\n Args:\n image: PIL Image object\n height: Target height in pixels\n\n Returns:\n Resized PIL Image object\n \"\"\"\n original_width, original_height = image.size\n width = int(original_width * (height / original_height))\n return image.resize((width, height))\n\n\ndef convert_image_to_chat_format(record, height: int) -> dict:\n \"\"\"\n Convert PIL image to base64 format for chat template usage.\n\n Args:\n record: Dataset record containing image and metadata\n height: Target height for image resizing\n\n Returns:\n Updated record with base64_image and uuid fields\n \"\"\"\n # Resize image for consistent processing\n image = resize_image(record[\"image\"], height)\n\n # Convert to base64 string\n img_buffer = io.BytesIO()\n image.save(img_buffer, format=\"PNG\")\n byte_data = img_buffer.getvalue()\n base64_encoded_data = base64.b64encode(byte_data)\n base64_string = base64_encoded_data.decode(\"utf-8\")\n\n # Return updated record\n return record | {\"base64_image\": base64_string, \"uuid\": str(uuid.uuid4())}", + "language": "python" + }, + { + "type": "code", + "source": "# Load and process the visual document dataset\nprint(\"\ud83d\udce5 Loading and processing document images...\")\n\nimg_dataset_iter = iter(\n load_dataset(**img_dataset_cfg).map(convert_image_to_chat_format, fn_kwargs={\"height\": BASE64_IMAGE_HEIGHT})\n)\nimg_dataset = pd.DataFrame([next(img_dataset_iter) for _ in range(IMG_COUNT)])\n\nprint(f\"\u2705 Loaded {len(img_dataset)} images with columns: {list(img_dataset.columns)}\")", + "language": "python" + }, + { + "type": "code", + "source": "img_dataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Add the seed dataset containing our processed images\ndf_seed = pd.DataFrame(img_dataset)[[\"uuid\", \"image_filename\", \"base64_image\", \"page\", \"options\", \"source\"]]\nconfig_builder.with_seed_dataset(dd.DataFrameSeedSource(df=df_seed))", + "language": "python" + }, + { + "type": "code", + "source": "# Add a column to generate detailed document summaries\nconfig_builder.add_column(\n dd.LLMTextColumnConfig(\n name=\"summary\",\n model_alias=\"vision\",\n prompt=(\n \"Provide a detailed summary of the content in this image in Markdown format. \"\n \"Start from the top of the image and then describe it from top to bottom. \"\n \"Place a summary at the bottom.\"\n ),\n multi_modal_context=[\n dd.ImageContext(\n column_name=\"base64_image\",\n data_type=dd.ModalityDataType.BASE64,\n image_format=dd.ImageFormat.PNG,\n )\n ],\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Iteration is key \u2013 preview the dataset!\n\n1. Use the `preview` method to generate a sample of records quickly.\n\n2. Inspect the results for quality and format issues.\n\n3. Adjust column configurations, prompts, or parameters as needed.\n\n4. Re-run the preview until satisfied." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "# Run this cell multiple times to cycle through the 2 preview records.\npreview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "# The preview dataset is available as a pandas DataFrame.\npreview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udcca Analyze the generated data\n\n- Data Designer automatically generates a basic statistical analysis of the generated data.\n\n- This analysis is available via the `analysis` property of generation result objects." + }, + { + "type": "code", + "source": "# Print the analysis as a table.\npreview.analysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd0e Visual Inspection\n\nLet's compare the original document image with the generated summary to validate quality:" + }, + { + "type": "code", + "source": "# Compare original document with generated summary\nindex = 0 # Change this to view different examples\n\n# Merge preview data with original images for comparison\ncomparison_dataset = preview.dataset.merge(pd.DataFrame(img_dataset)[[\"uuid\", \"image\"]], how=\"left\", on=\"uuid\")\n\n# Extract the record for display\nrecord = comparison_dataset.iloc[index]\n\nprint(\"\ud83d\udcc4 Original Document Image:\")\ndisplay(resize_image(record.image, BASE64_IMAGE_HEIGHT))\n\nprint(\"\\n\ud83d\udcdd Generated Summary:\")\nrich.print(Panel(record.summary, title=\"Document Summary\", title_align=\"left\"))", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Scale up!\n\n- Happy with your preview data?\n\n- Use the `create` method to submit larger Data Designer generation jobs." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=10, dataset_name=\"tutorial-4\")", + "language": "python" + }, + { + "type": "code", + "source": "# Load the generated dataset as a pandas DataFrame.\ndataset = results.load_dataset()\n\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Load the analysis results into memory.\nanalysis = results.load_analysis()\n\nanalysis.to_report()", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next Steps\n\nNow that you've learned how to use visual context for image summarization in Data Designer, explore more:\n\n- Experiment with different vision models for specific document types\n- Try different prompt variations to generate specialized descriptions (e.g., technical details, key findings)\n- Combine vision-based summaries with other column types for multi-modal workflows\n- Apply this pattern to other vision tasks like image captioning, OCR validation, or visual question answering\n\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/) with Data Designer" + } +] }; diff --git a/fern/components/notebooks/5-generating-images.json b/fern/components/notebooks/5-generating-images.json new file mode 100644 index 00000000..9a574e7d --- /dev/null +++ b/fern/components/notebooks/5-generating-images.json @@ -0,0 +1,100 @@ +{ + "cells": [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Generating Images\n\n#### \ud83d\udcda What you'll learn\n\nThis notebook shows how to generate synthetic image data with Data Designer using image-generation models.\n\n- \ud83d\uddbc\ufe0f **Image generation columns**: Add columns that produce images from text prompts\n- \ud83d\udcdd **Jinja2 prompts**: Drive diversity by referencing other columns in your prompt template\n- \ud83d\udcbe **Preview vs create**: Preview stores base64 in the dataframe; create saves images to disk and stores paths\n\nData Designer supports both **diffusion** (e.g. DALL\u00b7E, Stable Diffusion, Imagen) and **autoregressive** (e.g. Gemini image, GPT image) models.\n\n> **Prerequisites**: This tutorial uses [OpenRouter](https://openrouter.ai) with the Flux 2 Pro image model. Set `OPENROUTER_API_KEY` in your environment before running.\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides the configuration API.\n- `DataDesigner` is the main interface for generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "from IPython.display import Image as IPImage\nfrom IPython.display import display\n\nimport data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\nWe initialize Data Designer without arguments here\u2014the image model is configured explicitly in the next cell. No default text model is needed for this tutorial." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define an image-generation model\n\n- Use `ImageInferenceParams` so Data Designer treats this model as an image generator.\n- Image options (size, quality, aspect ratio, etc.) are model-specific; pass them via `extra_body`." + }, + { + "type": "code", + "source": "MODEL_PROVIDER = \"openrouter\"\nMODEL_ID = \"black-forest-labs/flux.2-pro\"\nMODEL_ALIAS = \"image-model\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ImageInferenceParams(\n extra_body={\"height\": 512, \"width\": 512},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Build the config: samplers + image column\n\nWe'll generate diverse **dog portrait** images: sampler columns drive subject (breed), age, style, look direction, and emotion. The image-generation column uses a Jinja2 prompt that references all of them." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"style\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"photorealistic\",\n \"oil painting\",\n \"watercolor\",\n \"digital art\",\n \"sketch\",\n \"anime\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"dog_breed\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a Golden Retriever\",\n \"a German Shepherd\",\n \"a Labrador Retriever\",\n \"a Bulldog\",\n \"a Beagle\",\n \"a Poodle\",\n \"a Corgi\",\n \"a Siberian Husky\",\n \"a Dalmatian\",\n \"a Yorkshire Terrier\",\n \"a Boxer\",\n \"a Dachshund\",\n \"a Doberman Pinscher\",\n \"a Shih Tzu\",\n \"a Chihuahua\",\n \"a Border Collie\",\n \"an Australian Shepherd\",\n \"a Cocker Spaniel\",\n \"a Maltese\",\n \"a Pomeranian\",\n \"a Saint Bernard\",\n \"a Great Dane\",\n \"an Akita\",\n \"a Samoyed\",\n \"a Boston Terrier\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"cat_breed\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a Persian\",\n \"a Maine Coon\",\n \"a Siamese\",\n \"a Ragdoll\",\n \"a Bengal\",\n \"an Abyssinian\",\n \"a British Shorthair\",\n \"a Sphynx\",\n \"a Scottish Fold\",\n \"a Russian Blue\",\n \"a Birman\",\n \"an Oriental Shorthair\",\n \"a Norwegian Forest Cat\",\n \"a Devon Rex\",\n \"a Burmese\",\n \"an Egyptian Mau\",\n \"a Tonkinese\",\n \"a Himalayan\",\n \"a Savannah\",\n \"a Chartreux\",\n \"a Somali\",\n \"a Manx\",\n \"a Turkish Angora\",\n \"a Balinese\",\n \"an American Shorthair\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"dog_age\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"1-3\", \"3-6\", \"6-9\", \"9-12\", \"12-15\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"cat_age\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"1-3\", \"3-6\", \"6-9\", \"9-12\", \"12-18\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"dog_look_direction\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"left\", \"right\", \"front\", \"up\", \"down\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"cat_look_direction\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"left\", \"right\", \"front\", \"up\", \"down\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"dog_emotion\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"happy\", \"curious\", \"serious\", \"sleepy\", \"excited\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"cat_emotion\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"aloof\", \"curious\", \"content\", \"sleepy\", \"playful\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.ImageColumnConfig(\n name=\"generated_image\",\n prompt=(\n \"\"\"\nA {{ style }} family pet portrait of a {{ dog_breed }} dog of {{ dog_age }} years old looking {{dog_look_direction}} with an {{ dog_emotion }} expression and\n{{ cat_breed }} cat of {{ cat_age }} years old looking {{ cat_look_direction }} with an {{ cat_emotion }} expression in the background. Both subjects should be in focus.\n \"\"\"\n ),\n model_alias=MODEL_ALIAS,\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Preview: images as base64\n\nIn **preview** mode, generated images are stored as base64 strings in the dataframe. Run the next cell to step through each record (images are shown in the sample record display, but only in a notebook environment)." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "for i in range(len(preview.dataset)):\n preview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "preview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Create: images saved to disk\n\nIn **create** mode, images are written to an `images/` folder with UUID filenames; the dataframe stores relative paths (e.g. `images/1d16b6e2-562f-4f51-91e5-baaa999ea916.png`)." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=2, dataset_name=\"tutorial-5-images\")", + "language": "python" + }, + { + "type": "code", + "source": "dataset = results.load_dataset()\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Display all images from the created dataset. Paths are relative to the artifact output directory.\nfor index, row in dataset.iterrows():\n path_or_list = row.get(\"generated_image\")\n if path_or_list is not None:\n paths = path_or_list if not isinstance(path_or_list, str) else [path_or_list]\n for path in paths:\n full_path = results.artifact_storage.base_dataset_path / path\n display(IPImage(filename=str(full_path)))", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next steps\n\n- [The basics](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/): samplers and LLM text columns\n- [Structured outputs and Jinja](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/2-structured-outputs-and-jinja-expressions/)\n- [Seeding with a dataset](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/3-seeding-with-a-dataset/)\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/)\n- [Image-to-image editing](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/6-editing-images-with-image-context/): edit existing images with seed datasets" + } + ] +} \ No newline at end of file diff --git a/fern/components/notebooks/5-generating-images.ts b/fern/components/notebooks/5-generating-images.ts new file mode 100644 index 00000000..0f295503 --- /dev/null +++ b/fern/components/notebooks/5-generating-images.ts @@ -0,0 +1,99 @@ +/** Auto-generated by ipynb-to-fern-json.py - do not edit */ +export default { cells: [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Generating Images\n\n#### \ud83d\udcda What you'll learn\n\nThis notebook shows how to generate synthetic image data with Data Designer using image-generation models.\n\n- \ud83d\uddbc\ufe0f **Image generation columns**: Add columns that produce images from text prompts\n- \ud83d\udcdd **Jinja2 prompts**: Drive diversity by referencing other columns in your prompt template\n- \ud83d\udcbe **Preview vs create**: Preview stores base64 in the dataframe; create saves images to disk and stores paths\n\nData Designer supports both **diffusion** (e.g. DALL\u00b7E, Stable Diffusion, Imagen) and **autoregressive** (e.g. Gemini image, GPT image) models.\n\n> **Prerequisites**: This tutorial uses [OpenRouter](https://openrouter.ai) with the Flux 2 Pro image model. Set `OPENROUTER_API_KEY` in your environment before running.\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides the configuration API.\n- `DataDesigner` is the main interface for generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "from IPython.display import Image as IPImage\nfrom IPython.display import display\n\nimport data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\nWe initialize Data Designer without arguments here\u2014the image model is configured explicitly in the next cell. No default text model is needed for this tutorial." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define an image-generation model\n\n- Use `ImageInferenceParams` so Data Designer treats this model as an image generator.\n- Image options (size, quality, aspect ratio, etc.) are model-specific; pass them via `extra_body`." + }, + { + "type": "code", + "source": "MODEL_PROVIDER = \"openrouter\"\nMODEL_ID = \"black-forest-labs/flux.2-pro\"\nMODEL_ALIAS = \"image-model\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ImageInferenceParams(\n extra_body={\"height\": 512, \"width\": 512},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Build the config: samplers + image column\n\nWe'll generate diverse **dog portrait** images: sampler columns drive subject (breed), age, style, look direction, and emotion. The image-generation column uses a Jinja2 prompt that references all of them." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"style\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"photorealistic\",\n \"oil painting\",\n \"watercolor\",\n \"digital art\",\n \"sketch\",\n \"anime\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"dog_breed\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a Golden Retriever\",\n \"a German Shepherd\",\n \"a Labrador Retriever\",\n \"a Bulldog\",\n \"a Beagle\",\n \"a Poodle\",\n \"a Corgi\",\n \"a Siberian Husky\",\n \"a Dalmatian\",\n \"a Yorkshire Terrier\",\n \"a Boxer\",\n \"a Dachshund\",\n \"a Doberman Pinscher\",\n \"a Shih Tzu\",\n \"a Chihuahua\",\n \"a Border Collie\",\n \"an Australian Shepherd\",\n \"a Cocker Spaniel\",\n \"a Maltese\",\n \"a Pomeranian\",\n \"a Saint Bernard\",\n \"a Great Dane\",\n \"an Akita\",\n \"a Samoyed\",\n \"a Boston Terrier\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"cat_breed\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a Persian\",\n \"a Maine Coon\",\n \"a Siamese\",\n \"a Ragdoll\",\n \"a Bengal\",\n \"an Abyssinian\",\n \"a British Shorthair\",\n \"a Sphynx\",\n \"a Scottish Fold\",\n \"a Russian Blue\",\n \"a Birman\",\n \"an Oriental Shorthair\",\n \"a Norwegian Forest Cat\",\n \"a Devon Rex\",\n \"a Burmese\",\n \"an Egyptian Mau\",\n \"a Tonkinese\",\n \"a Himalayan\",\n \"a Savannah\",\n \"a Chartreux\",\n \"a Somali\",\n \"a Manx\",\n \"a Turkish Angora\",\n \"a Balinese\",\n \"an American Shorthair\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"dog_age\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"1-3\", \"3-6\", \"6-9\", \"9-12\", \"12-15\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"cat_age\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"1-3\", \"3-6\", \"6-9\", \"9-12\", \"12-18\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"dog_look_direction\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"left\", \"right\", \"front\", \"up\", \"down\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"cat_look_direction\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"left\", \"right\", \"front\", \"up\", \"down\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"dog_emotion\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"happy\", \"curious\", \"serious\", \"sleepy\", \"excited\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"cat_emotion\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\"aloof\", \"curious\", \"content\", \"sleepy\", \"playful\"],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.ImageColumnConfig(\n name=\"generated_image\",\n prompt=(\n \"\"\"\nA {{ style }} family pet portrait of a {{ dog_breed }} dog of {{ dog_age }} years old looking {{dog_look_direction}} with an {{ dog_emotion }} expression and\n{{ cat_breed }} cat of {{ cat_age }} years old looking {{ cat_look_direction }} with an {{ cat_emotion }} expression in the background. Both subjects should be in focus.\n \"\"\"\n ),\n model_alias=MODEL_ALIAS,\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Preview: images as base64\n\nIn **preview** mode, generated images are stored as base64 strings in the dataframe. Run the next cell to step through each record (images are shown in the sample record display, but only in a notebook environment)." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "for i in range(len(preview.dataset)):\n preview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "preview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Create: images saved to disk\n\nIn **create** mode, images are written to an `images/` folder with UUID filenames; the dataframe stores relative paths (e.g. `images/1d16b6e2-562f-4f51-91e5-baaa999ea916.png`)." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=2, dataset_name=\"tutorial-5-images\")", + "language": "python" + }, + { + "type": "code", + "source": "dataset = results.load_dataset()\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "# Display all images from the created dataset. Paths are relative to the artifact output directory.\nfor index, row in dataset.iterrows():\n path_or_list = row.get(\"generated_image\")\n if path_or_list is not None:\n paths = path_or_list if not isinstance(path_or_list, str) else [path_or_list]\n for path in paths:\n full_path = results.artifact_storage.base_dataset_path / path\n display(IPImage(filename=str(full_path)))", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next steps\n\n- [The basics](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/): samplers and LLM text columns\n- [Structured outputs and Jinja](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/2-structured-outputs-and-jinja-expressions/)\n- [Seeding with a dataset](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/3-seeding-with-a-dataset/)\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/)\n- [Image-to-image editing](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/6-editing-images-with-image-context/): edit existing images with seed datasets" + } +] }; diff --git a/fern/components/notebooks/6-editing-images-with-image-context.json b/fern/components/notebooks/6-editing-images-with-image-context.json new file mode 100644 index 00000000..0c695e5b --- /dev/null +++ b/fern/components/notebooks/6-editing-images-with-image-context.json @@ -0,0 +1,128 @@ +{ + "cells": [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Image-to-Image Editing\n\n#### \ud83d\udcda What you'll learn\n\nThis notebook shows how to edit existing images by combining a seed dataset with image generation. You'll load animal portrait photographs from HuggingFace, feed them as context to an autoregressive model, and generate fun edited versions with accessories like sunglasses, top hats, and bow ties.\n\n- \ud83c\udf31 **Seed datasets with images**: Load a HuggingFace image dataset and use it as a seed\n- \ud83d\uddbc\ufe0f **Image context for editing**: Pass existing images to an image-generation model via `multi_modal_context`\n- \ud83c\udfb2 **Sampler-driven diversity**: Combine sampled accessories and settings with seed images for varied results\n- \ud83d\udcbe **Preview vs create**: Preview stores base64 in the dataframe; create saves images to disk\n\nThis tutorial uses an **autoregressive** model (one that supports both image input *and* image output via the chat completions API). Diffusion models (DALL\u00b7E, Stable Diffusion, etc.) do not support image context\u2014see [Tutorial 5](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/) for text-to-image generation with diffusion models.\n\n> **Prerequisites**: This tutorial uses [OpenRouter](https://openrouter.ai) with the Flux 2 Pro model. Set `OPENROUTER_API_KEY` in your environment before running.\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides the configuration API.\n- `DataDesigner` is the main interface for generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "import base64\nimport io\nimport uuid\n\nimport pandas as pd\nfrom datasets import load_dataset\nfrom IPython.display import Image as IPImage\nfrom IPython.display import display\n\nimport data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\nWe initialize Data Designer without arguments here\u2014the image-editing model is configured explicitly in the next cell. No default text model is needed for this tutorial." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define an image-editing model\n\nWe need an **autoregressive** model that supports both image input and image output via the chat completions API. This lets us pass existing images as context and receive edited images back.\n\n- Use `ImageInferenceParams` so Data Designer treats this model as an image generator.\n- Image-specific options are model-dependent; pass them via `extra_body`.\n\n> **Note**: This tutorial uses the Flux 2 Pro model via [OpenRouter](https://openrouter.ai). Set `OPENROUTER_API_KEY` in your environment." + }, + { + "type": "code", + "source": "MODEL_PROVIDER = \"openrouter\"\nMODEL_ID = \"black-forest-labs/flux.2-pro\"\nMODEL_ALIAS = \"image-editor\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ImageInferenceParams(\n extra_body={\"height\": 512, \"width\": 512},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf31 Load animal portraits from HuggingFace\n\nWe'll load animal face photographs from the [AFHQ](https://huggingface.co/datasets/huggan/AFHQv2) (Animal Faces-HQ) dataset, convert them to base64, and use them as a seed dataset.\n\nAFHQ contains high-quality 512\u00d7512 close-up portraits of cats, dogs, and wildlife\u2014perfect subjects for adding fun accessories." + }, + { + "type": "code", + "source": "SEED_COUNT = 10\nBASE64_IMAGE_HEIGHT = 512\n\nANIMAL_LABELS = {0: \"cat\", 1: \"dog\", 2: \"wild\"}\n\n\ndef resize_image(image, height: int):\n \"\"\"Resize image maintaining aspect ratio.\"\"\"\n original_width, original_height = image.size\n width = int(original_width * (height / original_height))\n return image.resize((width, height))\n\n\ndef prepare_record(record: dict, height: int) -> dict:\n \"\"\"Convert a HuggingFace record to base64 with metadata.\"\"\"\n image = resize_image(record[\"image\"], height)\n img_buffer = io.BytesIO()\n image.save(img_buffer, format=\"PNG\")\n base64_string = base64.b64encode(img_buffer.getvalue()).decode(\"utf-8\")\n return {\n \"uuid\": str(uuid.uuid4()),\n \"base64_image\": base64_string,\n \"animal\": ANIMAL_LABELS[record[\"label\"]],\n }", + "language": "python" + }, + { + "type": "code", + "source": "print(\"\ud83d\udce5 Streaming animal portraits from HuggingFace...\")\nhf_dataset = load_dataset(\"huggan/AFHQv2\", split=\"train\", streaming=True)\n\nhf_iter = iter(hf_dataset)\nrecords = [prepare_record(next(hf_iter), BASE64_IMAGE_HEIGHT) for _ in range(SEED_COUNT)]\ndf_seed = pd.DataFrame(records)\n\nprint(f\"\u2705 Prepared {len(df_seed)} animal portraits with columns: {list(df_seed.columns)}\")\ndf_seed.head()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Build the configuration\n\nWe combine three ingredients:\n\n1. **Seed dataset** \u2014 original animal portraits as base64 and their species labels\n2. **Sampler columns** \u2014 randomly sample accessories and settings for each image\n3. **Image column with context** \u2014 generate an edited image using the original as reference\n\nThe `multi_modal_context` parameter on `ImageColumnConfig` tells Data Designer to pass the seed image to the model alongside the text prompt. The model receives both the image and the editing instructions, and generates a new image." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)\n\n# 1. Seed the original animal portraits\nconfig_builder.with_seed_dataset(dd.DataFrameSeedSource(df=df_seed))\n\n# 2. Add sampler columns for accessory diversity\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"accessory\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a tiny top hat\",\n \"oversized sunglasses\",\n \"a red bow tie\",\n \"a knitted beanie\",\n \"a flower crown\",\n \"a monocle and mustache\",\n \"a pirate hat and eye patch\",\n \"a chef hat\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"setting\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a cozy living room\",\n \"a sunny park\",\n \"a photo studio with soft lighting\",\n \"a red carpet event\",\n \"a holiday card backdrop with snowflakes\",\n \"a tropical beach at sunset\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"art_style\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a photorealistic style\",\n \"a Disney Pixar 3D render\",\n \"a watercolor painting\",\n \"a pop art poster\",\n ],\n ),\n )\n)\n\n# 3. Image column that reads the seed image as context and generates an edited version\nconfig_builder.add_column(\n dd.ImageColumnConfig(\n name=\"edited_image\",\n prompt=(\n \"Edit this {{ animal }} portrait photo. \"\n \"Add {{ accessory }} on the animal. \"\n \"Place the {{ animal }} in {{ setting }}. \"\n \"Render the result in {{ art_style }}. \"\n \"Keep the animal's face, expression, and features faithful to the original photo.\"\n ),\n model_alias=MODEL_ALIAS,\n multi_modal_context=[\n dd.ImageContext(\n column_name=\"base64_image\",\n data_type=dd.ModalityDataType.BASE64,\n image_format=dd.ImageFormat.PNG,\n )\n ],\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Preview: quick iteration\n\nIn **preview** mode, generated images are stored as base64 strings in the dataframe. Use this to iterate on your prompts, accessories, and sampler values before scaling up." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "for i in range(len(preview.dataset)):\n preview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "preview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd0e Compare original vs edited\n\nLet's display the original animal portraits next to their accessorized versions." + }, + { + "type": "code", + "source": "def display_before_after(row: pd.Series, index: int, base_path=None) -> None:\n \"\"\"Display original vs edited image for a single record.\n\n When base_path is None (preview mode), edited_image is decoded from base64.\n When base_path is provided (create mode), edited_image is loaded from disk.\n \"\"\"\n print(f\"\\n{'=' * 60}\")\n print(f\"Record {index}: {row['animal']} wearing {row['accessory']}\")\n print(f\"Setting: {row['setting']}\")\n print(f\"Style: {row['art_style']}\")\n print(f\"{'=' * 60}\")\n\n print(\"\\n\ud83d\udcf7 Original portrait:\")\n display(IPImage(data=base64.b64decode(row[\"base64_image\"])))\n\n print(\"\\n\ud83c\udfa8 Edited version:\")\n edited = row.get(\"edited_image\")\n if edited is None:\n return\n if base_path is None:\n images = edited if isinstance(edited, list) else [edited]\n for img_b64 in images:\n display(IPImage(data=base64.b64decode(img_b64)))\n else:\n paths = edited if not isinstance(edited, str) else [edited]\n for path in paths:\n display(IPImage(filename=str(base_path / path)))", + "language": "python" + }, + { + "type": "code", + "source": "for index, row in preview.dataset.iterrows():\n display_before_after(row, index)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Create at scale\n\nIn **create** mode, images are saved to disk in an `images//` folder with UUID filenames. The dataframe stores relative paths." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=5, dataset_name=\"tutorial-6-edited-images\")", + "language": "python" + }, + { + "type": "code", + "source": "dataset = results.load_dataset()\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "for index, row in dataset.head(10).iterrows():\n display_before_after(row, index, base_path=results.artifact_storage.base_dataset_path)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next steps\n\n- Experiment with different autoregressive models for image editing\n- Try more creative editing prompts (style transfer, background replacement, artistic filters)\n- Combine image editing with text generation (e.g., generate captions for edited images using an LLM-Text column)\n\nRelated tutorials:\n\n- [The basics](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/): samplers and LLM text columns\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/): image-to-text with VLMs\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/): text-to-image generation with diffusion models" + } + ] +} \ No newline at end of file diff --git a/fern/components/notebooks/6-editing-images-with-image-context.ts b/fern/components/notebooks/6-editing-images-with-image-context.ts new file mode 100644 index 00000000..feeca24c --- /dev/null +++ b/fern/components/notebooks/6-editing-images-with-image-context.ts @@ -0,0 +1,127 @@ +/** Auto-generated by ipynb-to-fern-json.py - do not edit */ +export default { cells: [ + { + "type": "markdown", + "source": "# \ud83c\udfa8 Data Designer Tutorial: Image-to-Image Editing\n\n#### \ud83d\udcda What you'll learn\n\nThis notebook shows how to edit existing images by combining a seed dataset with image generation. You'll load animal portrait photographs from HuggingFace, feed them as context to an autoregressive model, and generate fun edited versions with accessories like sunglasses, top hats, and bow ties.\n\n- \ud83c\udf31 **Seed datasets with images**: Load a HuggingFace image dataset and use it as a seed\n- \ud83d\uddbc\ufe0f **Image context for editing**: Pass existing images to an image-generation model via `multi_modal_context`\n- \ud83c\udfb2 **Sampler-driven diversity**: Combine sampled accessories and settings with seed images for varied results\n- \ud83d\udcbe **Preview vs create**: Preview stores base64 in the dataframe; create saves images to disk\n\nThis tutorial uses an **autoregressive** model (one that supports both image input *and* image output via the chat completions API). Diffusion models (DALL\u00b7E, Stable Diffusion, etc.) do not support image context\u2014see [Tutorial 5](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/) for text-to-image generation with diffusion models.\n\n> **Prerequisites**: This tutorial uses [OpenRouter](https://openrouter.ai) with the Flux 2 Pro model. Set `OPENROUTER_API_KEY` in your environment before running.\n\nIf this is your first time using Data Designer, we recommend starting with the [first notebook](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/) in this tutorial series." + }, + { + "type": "markdown", + "source": "### \ud83d\udce6 Import Data Designer\n\n- `data_designer.config` provides the configuration API.\n- `DataDesigner` is the main interface for generation." + }, + { + "type": "markdown", + "source": "### \u26a1 Colab Setup\n\nRun the cells below to install the dependencies and set up the API key. If you don't have an API key, you can generate one from [build.nvidia.com](https://build.nvidia.com)." + }, + { + "type": "code", + "source": "%%capture\n!pip install -U data-designer", + "language": "python" + }, + { + "type": "code", + "source": "import getpass\nimport os\n\nfrom google.colab import userdata\n\ntry:\n os.environ[\"NVIDIA_API_KEY\"] = userdata.get(\"NVIDIA_API_KEY\")\nexcept userdata.SecretNotFoundError:\n os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")", + "language": "python" + }, + { + "type": "code", + "source": "import base64\nimport io\nimport uuid\n\nimport pandas as pd\nfrom datasets import load_dataset\nfrom IPython.display import Image as IPImage\nfrom IPython.display import display\n\nimport data_designer.config as dd\nfrom data_designer.interface import DataDesigner", + "language": "python" + }, + { + "type": "markdown", + "source": "### \u2699\ufe0f Initialize the Data Designer interface\n\nWe initialize Data Designer without arguments here\u2014the image-editing model is configured explicitly in the next cell. No default text model is needed for this tutorial." + }, + { + "type": "code", + "source": "data_designer = DataDesigner()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf9b\ufe0f Define an image-editing model\n\nWe need an **autoregressive** model that supports both image input and image output via the chat completions API. This lets us pass existing images as context and receive edited images back.\n\n- Use `ImageInferenceParams` so Data Designer treats this model as an image generator.\n- Image-specific options are model-dependent; pass them via `extra_body`.\n\n> **Note**: This tutorial uses the Flux 2 Pro model via [OpenRouter](https://openrouter.ai). Set `OPENROUTER_API_KEY` in your environment." + }, + { + "type": "code", + "source": "MODEL_PROVIDER = \"openrouter\"\nMODEL_ID = \"black-forest-labs/flux.2-pro\"\nMODEL_ALIAS = \"image-editor\"\n\nmodel_configs = [\n dd.ModelConfig(\n alias=MODEL_ALIAS,\n model=MODEL_ID,\n provider=MODEL_PROVIDER,\n inference_parameters=dd.ImageInferenceParams(\n extra_body={\"height\": 512, \"width\": 512},\n ),\n )\n]", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udf31 Load animal portraits from HuggingFace\n\nWe'll load animal face photographs from the [AFHQ](https://huggingface.co/datasets/huggan/AFHQv2) (Animal Faces-HQ) dataset, convert them to base64, and use them as a seed dataset.\n\nAFHQ contains high-quality 512\u00d7512 close-up portraits of cats, dogs, and wildlife\u2014perfect subjects for adding fun accessories." + }, + { + "type": "code", + "source": "SEED_COUNT = 10\nBASE64_IMAGE_HEIGHT = 512\n\nANIMAL_LABELS = {0: \"cat\", 1: \"dog\", 2: \"wild\"}\n\n\ndef resize_image(image, height: int):\n \"\"\"Resize image maintaining aspect ratio.\"\"\"\n original_width, original_height = image.size\n width = int(original_width * (height / original_height))\n return image.resize((width, height))\n\n\ndef prepare_record(record: dict, height: int) -> dict:\n \"\"\"Convert a HuggingFace record to base64 with metadata.\"\"\"\n image = resize_image(record[\"image\"], height)\n img_buffer = io.BytesIO()\n image.save(img_buffer, format=\"PNG\")\n base64_string = base64.b64encode(img_buffer.getvalue()).decode(\"utf-8\")\n return {\n \"uuid\": str(uuid.uuid4()),\n \"base64_image\": base64_string,\n \"animal\": ANIMAL_LABELS[record[\"label\"]],\n }", + "language": "python" + }, + { + "type": "code", + "source": "print(\"\ud83d\udce5 Streaming animal portraits from HuggingFace...\")\nhf_dataset = load_dataset(\"huggan/AFHQv2\", split=\"train\", streaming=True)\n\nhf_iter = iter(hf_dataset)\nrecords = [prepare_record(next(hf_iter), BASE64_IMAGE_HEIGHT) for _ in range(SEED_COUNT)]\ndf_seed = pd.DataFrame(records)\n\nprint(f\"\u2705 Prepared {len(df_seed)} animal portraits with columns: {list(df_seed.columns)}\")\ndf_seed.head()", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udfd7\ufe0f Build the configuration\n\nWe combine three ingredients:\n\n1. **Seed dataset** \u2014 original animal portraits as base64 and their species labels\n2. **Sampler columns** \u2014 randomly sample accessories and settings for each image\n3. **Image column with context** \u2014 generate an edited image using the original as reference\n\nThe `multi_modal_context` parameter on `ImageColumnConfig` tells Data Designer to pass the seed image to the model alongside the text prompt. The model receives both the image and the editing instructions, and generates a new image." + }, + { + "type": "code", + "source": "config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs)\n\n# 1. Seed the original animal portraits\nconfig_builder.with_seed_dataset(dd.DataFrameSeedSource(df=df_seed))\n\n# 2. Add sampler columns for accessory diversity\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"accessory\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a tiny top hat\",\n \"oversized sunglasses\",\n \"a red bow tie\",\n \"a knitted beanie\",\n \"a flower crown\",\n \"a monocle and mustache\",\n \"a pirate hat and eye patch\",\n \"a chef hat\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"setting\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a cozy living room\",\n \"a sunny park\",\n \"a photo studio with soft lighting\",\n \"a red carpet event\",\n \"a holiday card backdrop with snowflakes\",\n \"a tropical beach at sunset\",\n ],\n ),\n )\n)\n\nconfig_builder.add_column(\n dd.SamplerColumnConfig(\n name=\"art_style\",\n sampler_type=dd.SamplerType.CATEGORY,\n params=dd.CategorySamplerParams(\n values=[\n \"a photorealistic style\",\n \"a Disney Pixar 3D render\",\n \"a watercolor painting\",\n \"a pop art poster\",\n ],\n ),\n )\n)\n\n# 3. Image column that reads the seed image as context and generates an edited version\nconfig_builder.add_column(\n dd.ImageColumnConfig(\n name=\"edited_image\",\n prompt=(\n \"Edit this {{ animal }} portrait photo. \"\n \"Add {{ accessory }} on the animal. \"\n \"Place the {{ animal }} in {{ setting }}. \"\n \"Render the result in {{ art_style }}. \"\n \"Keep the animal's face, expression, and features faithful to the original photo.\"\n ),\n model_alias=MODEL_ALIAS,\n multi_modal_context=[\n dd.ImageContext(\n column_name=\"base64_image\",\n data_type=dd.ModalityDataType.BASE64,\n image_format=dd.ImageFormat.PNG,\n )\n ],\n )\n)\n\ndata_designer.validate(config_builder)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd01 Preview: quick iteration\n\nIn **preview** mode, generated images are stored as base64 strings in the dataframe. Use this to iterate on your prompts, accessories, and sampler values before scaling up." + }, + { + "type": "code", + "source": "preview = data_designer.preview(config_builder, num_records=2)", + "language": "python" + }, + { + "type": "code", + "source": "for i in range(len(preview.dataset)):\n preview.display_sample_record()", + "language": "python" + }, + { + "type": "code", + "source": "preview.dataset", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83d\udd0e Compare original vs edited\n\nLet's display the original animal portraits next to their accessorized versions." + }, + { + "type": "code", + "source": "def display_before_after(row: pd.Series, index: int, base_path=None) -> None:\n \"\"\"Display original vs edited image for a single record.\n\n When base_path is None (preview mode), edited_image is decoded from base64.\n When base_path is provided (create mode), edited_image is loaded from disk.\n \"\"\"\n print(f\"\\n{'=' * 60}\")\n print(f\"Record {index}: {row['animal']} wearing {row['accessory']}\")\n print(f\"Setting: {row['setting']}\")\n print(f\"Style: {row['art_style']}\")\n print(f\"{'=' * 60}\")\n\n print(\"\\n\ud83d\udcf7 Original portrait:\")\n display(IPImage(data=base64.b64decode(row[\"base64_image\"])))\n\n print(\"\\n\ud83c\udfa8 Edited version:\")\n edited = row.get(\"edited_image\")\n if edited is None:\n return\n if base_path is None:\n images = edited if isinstance(edited, list) else [edited]\n for img_b64 in images:\n display(IPImage(data=base64.b64decode(img_b64)))\n else:\n paths = edited if not isinstance(edited, str) else [edited]\n for path in paths:\n display(IPImage(filename=str(base_path / path)))", + "language": "python" + }, + { + "type": "code", + "source": "for index, row in preview.dataset.iterrows():\n display_before_after(row, index)", + "language": "python" + }, + { + "type": "markdown", + "source": "### \ud83c\udd99 Create at scale\n\nIn **create** mode, images are saved to disk in an `images//` folder with UUID filenames. The dataframe stores relative paths." + }, + { + "type": "code", + "source": "results = data_designer.create(config_builder, num_records=5, dataset_name=\"tutorial-6-edited-images\")", + "language": "python" + }, + { + "type": "code", + "source": "dataset = results.load_dataset()\ndataset.head()", + "language": "python" + }, + { + "type": "code", + "source": "for index, row in dataset.head(10).iterrows():\n display_before_after(row, index, base_path=results.artifact_storage.base_dataset_path)", + "language": "python" + }, + { + "type": "markdown", + "source": "## \u23ed\ufe0f Next steps\n\n- Experiment with different autoregressive models for image editing\n- Try more creative editing prompts (style transfer, background replacement, artistic filters)\n- Combine image editing with text generation (e.g., generate captions for edited images using an LLM-Text column)\n\nRelated tutorials:\n\n- [The basics](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/1-the-basics/): samplers and LLM text columns\n- [Providing images as context](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/4-providing-images-as-context/): image-to-text with VLMs\n- [Generating images](https://nvidia-nemo.github.io/DataDesigner/latest/notebooks/5-generating-images/): text-to-image generation with diffusion models" + } +] }; diff --git a/fern/components/trajectories/4hop-example.ts b/fern/components/trajectories/4hop-example.ts new file mode 100644 index 00000000..594c257b --- /dev/null +++ b/fern/components/trajectories/4hop-example.ts @@ -0,0 +1,190 @@ +/** Extracted from docs/devnotes/posts/deep-research-trajectories.md - 4-hop example trajectory */ + +import type { TrajectoryViewerProps } from "../TrajectoryViewer"; + +const turns = [ + { turnIndex: 1, calls: [{ fn: "search" as const, arg: '"border war Libya 1st century BC"' }] }, + { + turnIndex: 2, + calls: [ + { fn: "search" as const, arg: '"ancient Egypt Libya war conflict 1st century BC"' }, + { fn: "search" as const, arg: '"Cyrene Libya ancient war border"' }, + ], + }, + { + turnIndex: 3, + calls: [ + { fn: "search" as const, arg: '"Ptolemaic Egypt Cyrenaica war conflict"' }, + { fn: "search" as const, arg: '"Egypt border war ancient 1st century BC winner"' }, + ], + }, + { turnIndex: 4, calls: [{ fn: "open" as const, arg: "md_001400" }] }, + { + turnIndex: 5, + calls: [ + { fn: "search" as const, arg: '"Egypt Marmarica border war ancient Roman"' }, + { fn: "search" as const, arg: '"Marmaric War Egypt Libya ancient"' }, + ], + }, + { + turnIndex: 6, + calls: [ + { fn: "search" as const, arg: '"Greek colony Libya border war Cyrene colonization"' }, + { fn: "search" as const, arg: '"Thera Cyrene colony Greek colonization ancient"' }, + ], + }, + { turnIndex: 7, calls: [{ fn: "open" as const, arg: "md_000167" }] }, + { + turnIndex: 8, + calls: [ + { fn: "search" as const, arg: '"1st century BC war border Libya Egypt conflict"' }, + { fn: "search" as const, arg: '"Chad Libya border war Aouzou Strip conflict"' }, + ], + }, + { turnIndex: 9, calls: [{ fn: "open" as const, arg: "md_001676", isGolden: true }] }, + { + turnIndex: 10, + calls: [ + { fn: "search" as const, arg: '"Roman Empire war Libya Numidia Africa ancient"' }, + { fn: "search" as const, arg: '"Jugurthine War Numidia Rome Libya"' }, + ], + }, + { turnIndex: 11, calls: [{ fn: "open" as const, arg: "fw_021746" }] }, + { + turnIndex: 12, + calls: [ + { fn: "search" as const, arg: '"Egypt ancient war tribes Libya Marmaridae border conflict"' }, + { fn: "search" as const, arg: '"Cornelius Gallus Egypt prefect war border"' }, + ], + }, + { turnIndex: 13, calls: [{ fn: "open" as const, arg: "md_001054", isGolden: true }] }, + { + turnIndex: 14, + calls: [ + { fn: "search" as const, arg: '"Roman Empire greatest extent Trajan mother"' }, + { fn: "search" as const, arg: '"Trajan Roman Empire greatest territorial extent"' }, + ], + }, + { + turnIndex: 15, + calls: [ + { fn: "open" as const, arg: "md_002075", isGolden: true }, + { fn: "open" as const, arg: "md_002510", isGolden: true }, + ], + }, + { + turnIndex: 16, + calls: [ + { fn: "search" as const, arg: '"Egypt Roman province colony Augustus annexed"' }, + { fn: "search" as const, arg: '"Trajan mother Marcia family"' }, + ], + }, + { + turnIndex: 17, + calls: [ + { fn: "open" as const, arg: "md_002117" }, + { fn: "search" as const, arg: '"Trajan mother Marcia parents father"' }, + ], + }, + { + turnIndex: 18, + calls: [ + { fn: "search" as const, arg: '"Ulpia Marciana Trajan sister mother family"' }, + { fn: "search" as const, arg: '"Egypt war Libya Marmaricae ancient Roman"' }, + ], + }, + { + turnIndex: 19, + calls: [ + { fn: "search" as const, arg: '"Marcia Trajan Roman empress mother emperor"' }, + { fn: "search" as const, arg: '"Egypt Marmaric war ancient Rome Cyrene 1st century"' }, + ], + }, + { turnIndex: 20, calls: [{ fn: "open" as const, arg: "md_000722" }] }, + { + turnIndex: 21, + calls: [ + { fn: "search" as const, arg: '"Ptolemaic Egypt border war Cyrene Cyrenaica territory"' }, + { fn: "search" as const, arg: '"Egypt vs Libya ancient war Ptolemy Rome colony"' }, + ], + }, + { + turnIndex: 22, + calls: [ + { fn: "search" as const, arg: '"Toyota War Chad Libya border winner conflict"' }, + { fn: "search" as const, arg: '"Chad Aouzou Strip war Libya France colonized"' }, + ], + }, + { + turnIndex: 23, + calls: [ + { fn: "search" as const, arg: '"1st century BC Egypt border war defeat victory Rome colonize"' }, + { fn: "search" as const, arg: '"ancient Egypt war won Libya 1st century BC Rome province"' }, + ], + }, + { turnIndex: 24, calls: [{ fn: "open" as const, arg: "md_002076", isGolden: true }] }, + { + turnIndex: 25, + calls: [ + { fn: "search" as const, arg: '"French Empire greatest extent Napoleon mother"' }, + { fn: "search" as const, arg: '"France Vichy border war Libya Chad colonial"' }, + ], + }, + { + turnIndex: 26, + calls: [ + { fn: "search" as const, arg: '"Egypt war 25 BC 30 BC Marmaridae tribe Roman conquest"' }, + { fn: "search" as const, arg: '"Cornelius Balbus Garamantes Libya war Roman conquest"' }, + ], + }, + { + turnIndex: 27, + calls: [ + { fn: "search" as const, arg: '"ancient Crete war Libya border Greek colonize"' }, + { fn: "search" as const, arg: '"Cyrene founded colonized by whom mother city Thera"' }, + ], + }, + { + turnIndex: 28, + calls: [ + { + fn: "search" as const, + arg: '"Egypt Roman province colonized 1st century BC Trajan mother Marcia"', + }, + ], + }, + { + turnIndex: 29, + calls: [{ fn: "search" as const, arg: '"Marcia Trajan mother Roman emperor name Ulpius"' }], + }, + { + turnIndex: 30, + calls: [ + { + fn: "search" as const, + arg: '"Egypt war border Libya Cyrenaica defeated 1st century BC Roman province"', + }, + ], + }, + { + turnIndex: 31, + calls: [ + { + fn: "answer" as const, + body: `Based on my extensive research, I have gathered the following evidence:

    1. The Roman Empire reached its greatest territorial extent under Trajan (117 CE) — confirmed by multiple documents
    2. Trajan's mother was from the Gens Marcia — the Trajan document states he "on his mother's side from the Gens Marcia, of an Italic family of Sabine origin"
    3. Egypt was colonized/annexed by Rome after Cleopatra's death (30 BC), making it a Roman province
    4. Egypt had border conflicts with Libya (ancient Marmaricae/Cyrenaica region) in the 1st century BC

    The answer is Marcia.`, + }, + ], + }, +]; + +const trajectory: TrajectoryViewerProps = { + question: + "Who was the mother of the person under whom the colonizer of the winner of a border war with Libya in the 1st century BC reached its greatest extent?", + referenceAnswer: "Marcia", + goldenPassageHint: "⭐ = golden passage (contains evidence for the answer)", + turns, + summary: "Example trajectory: 4-hop question, 31 turns, 49 tool calls", + defaultOpen: true, +}; + +export default trajectory; diff --git a/fern/docs.yml b/fern/docs.yml index fedb5508..3a4ea3d4 100644 --- a/fern/docs.yml +++ b/fern/docs.yml @@ -4,6 +4,9 @@ instances: title: NeMo Data Designer versions: + - display-name: v0.5.0 + path: versions/v0.5.0.yml + slug: v0.5.0 - display-name: v0.3.3 path: versions/v0.3.3.yml slug: v0.3.3 @@ -22,6 +25,17 @@ logo: favicon: assets/favicon.png +css: + - ./styles/notebook-viewer.css + - ./styles/trajectory-viewer.css + - ./styles/expandable-code.css + - ./styles/pipeline-diagram.css + - ./styles/metrics-table.css + navbar-links: - type: github value: https://github.com/NVIDIA-NeMo/DataDesigner + +experimental: + mdx-components: + - ./components diff --git a/fern/fern.config.json b/fern/fern.config.json index 9f0a3e5d..59390164 100644 --- a/fern/fern.config.json +++ b/fern/fern.config.json @@ -1,4 +1,4 @@ { "organization": "nvidia", - "version": "3.40.1" -} \ No newline at end of file + "version": "3.77.0" +} diff --git a/fern/scripts/ipynb-to-fern-json.py b/fern/scripts/ipynb-to-fern-json.py new file mode 100644 index 00000000..4abb1f17 --- /dev/null +++ b/fern/scripts/ipynb-to-fern-json.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Convert Jupyter notebooks (.ipynb) to Fern NotebookViewer JSON format. + +Reads notebook JSON and outputs a minimal format with cells array: + { "cells": [ { "type": "markdown"|"code", "source": "...", "language": "python" } ] } + +Usage: + python ipynb-to-fern-json.py input.ipynb -o output.json + python ipynb-to-fern-json.py docs/notebooks/1-the-basics.ipynb -o fern/assets/notebooks/1-the-basics.json + +Run after: make convert-execute-notebooks (generates .ipynb from docs/notebook_source/*.py) +""" + +from __future__ import annotations + +import json +import sys +from pathlib import Path + + +def get_language(metadata: dict) -> str: + """Extract language from notebook metadata.""" + info = metadata.get("kernelspec", {}) or {} + lang = info.get("language", "python") + return "python" if lang == "python3" else lang + + +def convert_cell(cell: dict, default_language: str) -> dict: + """Convert a Jupyter cell to Fern format.""" + cell_type = cell.get("cell_type", "code") + source = cell.get("source", []) + if isinstance(source, list): + source = "".join(source) + source = source.rstrip("\n") + out: dict = {"type": cell_type, "source": source} + if cell_type == "code": + out["language"] = default_language + return out + + +def convert_notebook(ipynb_path: Path) -> dict: + """Convert a .ipynb file to Fern NotebookViewer format.""" + with open(ipynb_path, encoding="utf-8") as f: + nb = json.load(f) + metadata = nb.get("metadata", {}) + default_language = get_language(metadata) + cells = [convert_cell(c, default_language) for c in nb.get("cells", [])] + return {"cells": cells} + + +def write_ts_export(data: dict, ts_path: Path) -> None: + """Write a .ts file that exports the notebook for MDX import (avoids JSON import).""" + cells_json = json.dumps(data["cells"], indent=2) + ts_path.write_text( + f'/** Auto-generated by ipynb-to-fern-json.py - do not edit */\n' + f'export default {{ cells: {cells_json} }};\n', + encoding="utf-8", + ) + + +def main() -> int: + args = sys.argv[1:] + if not args or "-h" in args or "--help" in args: + print(__doc__) + return 0 + input_path = Path(args[0]) + output_path: Path | None = None + if "-o" in args: + idx = args.index("-o") + if idx + 1 < len(args): + output_path = Path(args[idx + 1]) + if not output_path: + output_path = input_path.with_suffix(".json") + if not input_path.exists(): + print(f"Error: {input_path} not found", file=sys.stderr) + return 1 + data = convert_notebook(input_path) + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w", encoding="utf-8") as f: + json.dump(data, f, indent=2) + print(f"Wrote {output_path}") + ts_path = output_path.with_suffix(".ts") + write_ts_export(data, ts_path) + print(f"Wrote {ts_path}") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/fern/styles/expandable-code.css b/fern/styles/expandable-code.css new file mode 100644 index 00000000..e1284487 --- /dev/null +++ b/fern/styles/expandable-code.css @@ -0,0 +1,43 @@ +/* ExpandableCode component styles */ + +.expandable-code { + margin: 1rem 0; + border: 1px solid rgba(128, 128, 128, 0.2); + border-radius: 8px; + overflow: hidden; +} + +.expandable-code__summary { + cursor: pointer; + padding: 0.75rem 1rem; + background: rgba(0, 0, 0, 0.03); + font-size: 0.9rem; +} + +.dark .expandable-code__summary { + background: rgba(255, 255, 255, 0.05); +} + +.expandable-code__summary:hover { + background: rgba(0, 0, 0, 0.05); +} + +.dark .expandable-code__summary:hover { + background: rgba(255, 255, 255, 0.08); +} + +.expandable-code__content { + padding: 0; + overflow-x: auto; +} + +.expandable-code__pre { + margin: 0; + padding: 1rem; + font-size: 0.85rem; + line-height: 1.5; +} + +.expandable-code__pre code { + font-family: "SF Mono", Menlo, Monaco, "Cascadia Code", monospace; +} diff --git a/fern/styles/metrics-table.css b/fern/styles/metrics-table.css new file mode 100644 index 00000000..01062c38 --- /dev/null +++ b/fern/styles/metrics-table.css @@ -0,0 +1,32 @@ +/* MetricsTable component styles */ + +.metrics-table-wrapper { + margin: 1.5rem 0; + overflow-x: auto; +} + +.metrics-table { + width: 100%; + border-collapse: collapse; + font-size: 0.9rem; +} + +.metrics-table th, +.metrics-table td { + border: 1px solid rgba(128, 128, 128, 0.25); + padding: 0.5rem 0.75rem; + text-align: left; +} + +.metrics-table th { + background: rgba(0, 0, 0, 0.04); + font-weight: 600; +} + +.dark .metrics-table th { + background: rgba(255, 255, 255, 0.06); +} + +.metrics-table td.metrics-table__cell--best { + font-weight: 600; +} diff --git a/fern/styles/notebook-viewer.css b/fern/styles/notebook-viewer.css new file mode 100644 index 00000000..c55ff16e --- /dev/null +++ b/fern/styles/notebook-viewer.css @@ -0,0 +1,117 @@ +/* NotebookViewer component styles */ + +.notebook-viewer { + margin: 1.5rem 0; +} + +.notebook-viewer__colab-banner { + margin-bottom: 1rem; + padding: 0.75rem 1rem; + background: linear-gradient(90deg, #f9ab00 0%, #e37400 100%); + border-radius: 8px; +} + +.notebook-viewer__colab-link { + display: inline-flex; + align-items: center; + gap: 0.5rem; + color: #1a1a1a; + font-weight: 600; + text-decoration: none; +} + +.notebook-viewer__colab-link:hover { + text-decoration: underline; +} + +.notebook-viewer__colab-icon { + font-size: 0.875rem; +} + +.notebook-viewer__cells { + display: flex; + flex-direction: column; + gap: 1.25rem; +} + +.notebook-viewer__cell { + margin: 0; +} + +.notebook-viewer__cell--markdown .notebook-viewer__markdown { + line-height: 1.6; +} + +.notebook-viewer__cell--markdown .notebook-viewer__markdown h1, +.notebook-viewer__cell--markdown .notebook-viewer__markdown h2, +.notebook-viewer__cell--markdown .notebook-viewer__markdown h3, +.notebook-viewer__cell--markdown .notebook-viewer__markdown h4 { + margin-top: 1rem; + margin-bottom: 0.5rem; +} + +.notebook-viewer__cell--markdown .notebook-viewer__markdown h1:first-child, +.notebook-viewer__cell--markdown .notebook-viewer__markdown h2:first-child, +.notebook-viewer__cell--markdown .notebook-viewer__markdown h3:first-child, +.notebook-viewer__cell--markdown .notebook-viewer__markdown h4:first-child { + margin-top: 0; +} + +.notebook-viewer__cell--markdown .notebook-viewer__markdown ul { + margin: 0.5rem 0; + padding-left: 1.5rem; +} + +.notebook-viewer__cell--markdown .notebook-viewer__markdown p { + margin: 0.5rem 0; +} + +.notebook-viewer__code-wrapper { + margin: 0; +} + +.notebook-viewer__code { + margin: 0; + padding: 1rem; + border-radius: 8px; + overflow-x: auto; +} + +.notebook-viewer__outputs { + margin-top: 0.5rem; + padding: 1rem; + background: rgba(0, 0, 0, 0.03); + border-radius: 8px; + overflow-x: auto; +} + +.dark .notebook-viewer__outputs { + background: rgba(255, 255, 255, 0.05); +} + +.notebook-viewer__output-text, +.notebook-viewer__output-html { + margin: 0; + font-size: 0.875rem; +} + +.notebook-viewer__output-html table { + border-collapse: collapse; +} + +.notebook-viewer__output-html th, +.notebook-viewer__output-html td { + border: 1px solid #e5e7eb; + padding: 0.25rem 0.5rem; +} + +.dark .notebook-viewer__output-html th, +.dark .notebook-viewer__output-html td { + border-color: #374151; +} + +.notebook-viewer__output-image { + max-width: 100%; + height: auto; + border-radius: 4px; +} diff --git a/fern/styles/pipeline-diagram.css b/fern/styles/pipeline-diagram.css new file mode 100644 index 00000000..5a80dfdf --- /dev/null +++ b/fern/styles/pipeline-diagram.css @@ -0,0 +1,31 @@ +/* PipelineDiagram component styles */ + +.pipeline-diagram { + margin: 1.5rem 0; +} + +.pipeline-diagram__title { + font-size: 0.9rem; + font-weight: 600; + margin-bottom: 0.5rem; + opacity: 0.9; +} + +.pipeline-diagram__pre { + margin: 0; + padding: 1rem; + background: rgba(0, 0, 0, 0.03); + border-radius: 8px; + overflow-x: auto; +} + +.dark .pipeline-diagram__pre { + background: rgba(255, 255, 255, 0.05); +} + +.pipeline-diagram__code { + font-family: "SF Mono", Menlo, Monaco, "Cascadia Code", monospace; + font-size: 0.82em; + line-height: 1.4; + white-space: pre; +} diff --git a/fern/styles/trajectory-viewer.css b/fern/styles/trajectory-viewer.css new file mode 100644 index 00000000..783aa2fb --- /dev/null +++ b/fern/styles/trajectory-viewer.css @@ -0,0 +1,163 @@ +/* TrajectoryViewer component styles */ + +.trajectory-viewer { + font-family: -apple-system, system-ui, sans-serif; + max-width: 960px; + margin: 16px 0; + padding: 0; +} + +.trajectory-viewer__details { + margin: 1rem 0; +} + +.trajectory-viewer__summary { + cursor: pointer; + padding: 0.5rem 0; + font-size: 0.95rem; +} + +.trajectory-viewer__question { + background: rgba(66, 165, 245, 0.08); + padding: 12px 16px; + border-radius: 8px; + margin-bottom: 8px; +} + +.trajectory-viewer__question strong { + color: #42a5f5; +} + +.trajectory-viewer__ref { + background: rgba(76, 175, 80, 0.08); + padding: 12px 16px; + border-radius: 8px; + margin-bottom: 20px; + border-left: 4px solid #4caf50; +} + +.trajectory-viewer__ref strong { + color: #66bb6a; +} + +.trajectory-viewer__hint { + opacity: 0.5; + font-size: 0.8em; + margin-bottom: 12px; +} + +.trajectory-viewer__turn { + margin: 6px 0; + display: flex; + align-items: flex-start; + gap: 12px; +} + +.trajectory-viewer__label { + min-width: 48px; + padding: 6px 0; + opacity: 0.5; + font-size: 0.75em; + font-family: "SF Mono", Menlo, Monaco, "Cascadia Code", monospace; + text-align: right; + flex-shrink: 0; +} + +.trajectory-viewer__body { + flex: 1; + display: flex; + flex-direction: column; + gap: 3px; +} + +.trajectory-viewer__group { + display: flex; + flex-direction: column; + gap: 3px; + position: relative; +} + +.trajectory-viewer__group--multi { + padding-left: 13px; +} + +.trajectory-viewer__group--multi::before { + content: ""; + position: absolute; + left: 0; + top: 4px; + bottom: 4px; + width: 3px; + background: rgba(128, 128, 128, 0.3); + border-radius: 2px; +} + +.trajectory-viewer__call { + padding: 5px 12px; + border-radius: 5px; + font-family: "SF Mono", Menlo, Monaco, "Cascadia Code", monospace; + font-size: 0.82em; + display: flex; + gap: 8px; +} + +.trajectory-viewer__call .trajectory-viewer__fn { + font-weight: bold; + min-width: 55px; + flex-shrink: 0; +} + +.trajectory-viewer__call .trajectory-viewer__arg { + opacity: 0.85; +} + +.trajectory-viewer__call--search { + background: rgba(66, 165, 245, 0.1); + border-left: 3px solid #42a5f5; +} + +.trajectory-viewer__call--search .trajectory-viewer__fn { + color: #42a5f5; +} + +.trajectory-viewer__call--open { + background: rgba(102, 187, 106, 0.1); + border-left: 3px solid #66bb6a; +} + +.trajectory-viewer__call--open .trajectory-viewer__fn { + color: #66bb6a; +} + +.trajectory-viewer__call--find { + background: rgba(255, 167, 38, 0.1); + border-left: 3px solid #ffa726; +} + +.trajectory-viewer__call--find .trajectory-viewer__fn { + color: #ffa726; +} + +.trajectory-viewer__call--answer { + background: rgba(76, 175, 80, 0.08); + border-left: 3px solid #4caf50; + padding: 10px 16px; + border-radius: 6px; + font-size: 0.88em; + line-height: 1.5; + flex-direction: column; + align-items: flex-start; + gap: 4px; +} + +.trajectory-viewer__call--answer .trajectory-viewer__fn { + color: #4caf50; + font-weight: bold; + font-family: "SF Mono", Menlo, Monaco, "Cascadia Code", monospace; +} + +.trajectory-viewer__call--answer .trajectory-viewer__body { + width: 100%; + font-family: -apple-system, system-ui, sans-serif; + font-size: 1em; +} diff --git a/fern/v0.5.0/pages/api-reference/analysis.mdx b/fern/v0.5.0/pages/api-reference/analysis.mdx new file mode 100644 index 00000000..5c778eac --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/analysis.mdx @@ -0,0 +1,171 @@ +--- +title: Analysis +description: API reference for dataset analysis and profiling. +--- + +The `analysis` modules provide tools for profiling and analyzing generated datasets. It includes statistics tracking, column profiling, and reporting capabilities. + +## Column Statistics + +Column statistics are automatically computed for every column after generation. They provide basic metrics specific to the column type. For example, LLM columns track token usage statistics, sampler columns track distribution information, and validation columns track validation success rates. + +The classes below are result objects that store the computed statistics for each column type and provide methods for formatting these results for display in reports. + +### LLMColumnStatistics + +```python +class LLMColumnStatistics(BaseModel): + """Statistics for LLM-generated columns.""" + + total_input_tokens: int # Total prompt tokens across all generations + total_output_tokens: int # Total completion tokens + avg_input_tokens: float # Average prompt tokens per generation + avg_output_tokens: float # Average completion tokens per generation + generation_time_seconds: float # Total generation time + generations_per_second: float # Generation throughput +``` + +### SamplerColumnStatistics + +```python +class SamplerColumnStatistics(BaseModel): + """Statistics for sampler columns.""" + + unique_values: int # Number of unique values generated + value_counts: dict[str, int] # Counts per value (for categorical) + min_value: float | None # Minimum value (for numerical) + max_value: float | None # Maximum value (for numerical) + mean_value: float | None # Mean value (for numerical) + std_value: float | None # Standard deviation (for numerical) +``` + +### ValidationColumnStatistics + +```python +class ValidationColumnStatistics(BaseModel): + """Statistics for validation columns.""" + + total_validated: int # Total records validated + valid_count: int # Number of valid records + invalid_count: int # Number of invalid records + null_count: int # Number of null results + pass_rate: float # Percentage of valid records +``` + +### ExpressionColumnStatistics + +```python +class ExpressionColumnStatistics(BaseModel): + """Statistics for expression columns.""" + + unique_values: int # Number of unique values + null_count: int # Number of null results + evaluation_time_seconds: float # Time to evaluate expressions +``` + +## Column Profilers + +Column profilers are optional analysis tools that provide deeper insights into specific column types. Currently, the only column profiler available is the Judge Score Profiler. + +The classes below are result objects that store the computed profiler results and provide methods for formatting these results for display in reports. + +### JudgeScoreProfilerResults + +```python +class JudgeScoreProfilerResults(BaseModel): + """Profiling results for LLM judge columns.""" + + score_name: str # Name of the score dimension + score_distribution: dict[str, int] # Distribution of scores + avg_score: float | None # Average score (for numeric scores) + score_counts: dict[str | int, int] # Counts per score value +``` + +## Dataset Profiler + +The `DatasetProfilerResults` class contains complete profiling results for a generated dataset. It aggregates column-level statistics, metadata, and profiler results, and provides methods to: + +- Compute dataset-level metrics (completion percentage, column type summary) +- Filter statistics by column type +- Generate formatted analysis reports via the `to_report()` method + +Reports can be displayed in the console or exported to HTML/SVG formats. + +### DatasetProfilerResults + +```python +class DatasetProfilerResults(BaseModel): + """Complete profiling results for a generated dataset.""" + + dataset_name: str # Name of the dataset + total_records: int # Total records generated + generation_time_seconds: float # Total generation time + column_statistics: dict[str, ColumnStatistics] # Per-column stats + column_profiler_results: dict[str, list[ProfilerResults]] # Profiler results + + def to_report( + self, + output_format: Literal["console", "html", "svg"] = "console", + ) -> None: + """Generate a formatted analysis report. + + Args: + output_format: Output format for the report. + """ + ... + + def get_column_statistics( + self, + column_name: str, + ) -> ColumnStatistics: + """Get statistics for a specific column. + + Args: + column_name: Name of the column. + + Returns: + Column statistics object. + """ + ... + + def filter_by_column_type( + self, + column_type: str, + ) -> dict[str, ColumnStatistics]: + """Filter statistics by column type. + + Args: + column_type: Type of columns to filter (e.g., "llm-text"). + + Returns: + Dictionary of column statistics for matching columns. + """ + ... +``` + +### Example: Accessing Analysis Results + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Generate a dataset +data_designer = DataDesigner() +builder = dd.DataDesignerConfigBuilder() +# ... add columns ... + +results = data_designer.create(builder, num_records=100) + +# Load and display analysis +analysis = results.load_analysis() +analysis.to_report() + +# Access specific column statistics +llm_stats = analysis.get_column_statistics("generated_text") +print(f"Average output tokens: {llm_stats.avg_output_tokens}") + +# Filter by column type +all_llm_stats = analysis.filter_by_column_type("llm-text") +for col_name, stats in all_llm_stats.items(): + print(f"{col_name}: {stats.generations_per_second:.2f} gen/sec") +``` diff --git a/fern/v0.5.0/pages/api-reference/column-configs.mdx b/fern/v0.5.0/pages/api-reference/column-configs.mdx new file mode 100644 index 00000000..d1e05b1e --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/column-configs.mdx @@ -0,0 +1,252 @@ +--- +title: Column Configurations +description: API reference for column configuration objects. +--- + +The `column_configs` module defines configuration objects for all Data Designer column types. Each configuration inherits from `SingleColumnConfig`, which provides shared arguments like the column `name`, whether to `drop` the column after generation, and the `column_type`. + + +The `column_type` argument is used to identify column types when deserializing the Data Designer Config from JSON/YAML. It acts as the discriminator in a [discriminated union](https://docs.pydantic.dev/latest/concepts/unions/#discriminated-unions), allowing Pydantic to automatically determine which column configuration class to instantiate. + + +## SingleColumnConfig (Base Class) + +```python +class SingleColumnConfig(BaseModel): + """Base configuration for all column types.""" + + name: str # Column name (unique identifier) + drop: bool = False # Whether to drop column from final output + column_type: str # Discriminator field for column type + + @property + def required_columns(self) -> list[str]: + """Columns that must be generated before this one.""" + ... + + @property + def side_effect_columns(self) -> list[str]: + """Columns created as side effects (e.g., reasoning traces).""" + ... +``` + +## SamplerColumnConfig + +```python +class SamplerColumnConfig(SingleColumnConfig): + """Configuration for sampler-based columns.""" + + column_type: Literal["sampler"] = "sampler" + sampler_type: SamplerType # Type of sampler to use + params: SamplerParamsT # Sampler-specific parameters + conditional_params: dict[str, SamplerParamsT] = {} # Condition-based params + convert_to: str | None = None # Type conversion ("int", "float", "str") +``` + +## LLMTextColumnConfig + +```python +class LLMTextColumnConfig(SingleColumnConfig): + """Configuration for LLM text generation columns.""" + + column_type: Literal["llm-text"] = "llm-text" + model_alias: str # Reference to model configuration + prompt: str # Jinja2 template for the prompt + system_prompt: str | None = None # Optional system prompt + multi_modal_context: list[ImageContext] | None = None # Image inputs + tool_alias: str | None = None # Tool configuration alias for MCP tool calls + with_trace: TraceType = TraceType.NONE # Trace capture setting + extract_reasoning_content: bool = False # Extract reasoning from response +``` + + +When `with_trace` is not `TraceType.NONE`, a `{column_name}__trace` column is created containing the message history. + +When `extract_reasoning_content=True`, a `{column_name}__reasoning_content` column is created containing chain-of-thought reasoning from the model response. + + +## LLMCodeColumnConfig + +Extends `LLMTextColumnConfig` for code generation. + +```python +class LLMCodeColumnConfig(LLMTextColumnConfig): + """Configuration for LLM code generation columns.""" + + column_type: Literal["llm-code"] = "llm-code" + code_lang: CodeLang # Target programming language + + # Inherits from LLMTextColumnConfig: + # model_alias, prompt, system_prompt, multi_modal_context, + # tool_alias, with_trace, extract_reasoning_content +``` + +## LLMStructuredColumnConfig + +Extends `LLMTextColumnConfig` for structured JSON output. + +```python +class LLMStructuredColumnConfig(LLMTextColumnConfig): + """Configuration for LLM structured output columns.""" + + column_type: Literal["llm-structured"] = "llm-structured" + output_format: type[BaseModel] | dict # Pydantic model or JSON schema + + # Inherits from LLMTextColumnConfig: + # model_alias, prompt, system_prompt, multi_modal_context, + # tool_alias, with_trace, extract_reasoning_content +``` + +## LLMJudgeColumnConfig + +Extends `LLMTextColumnConfig` for quality assessment. + +```python +class LLMJudgeColumnConfig(LLMTextColumnConfig): + """Configuration for LLM judge/scoring columns.""" + + column_type: Literal["llm-judge"] = "llm-judge" + scores: list[Score] # Scoring rubrics (at least one required) + + # Inherits from LLMTextColumnConfig: + # model_alias, prompt, system_prompt, multi_modal_context, + # tool_alias, with_trace, extract_reasoning_content + + +class Score(BaseModel): + """Scoring rubric definition.""" + + name: str # Score dimension name + description: str # Description of what's being evaluated + options: dict[int | str, str] # Score options with descriptions +``` + +## EmbeddingColumnConfig + +```python +class EmbeddingColumnConfig(SingleColumnConfig): + """Configuration for embedding generation columns.""" + + column_type: Literal["embedding"] = "embedding" + model_alias: str # Reference to model configuration + target_column: str # Column containing text to embed +``` + +## ImageColumnConfig + +```python +class ImageColumnConfig(SingleColumnConfig): + """Configuration for image generation columns.""" + + column_type: Literal["image"] = "image" + model_alias: str # Reference to model configuration + prompt: str # Jinja2 template for the image prompt + multi_modal_context: list[ImageContext] | None = None # Image inputs for img2img +``` + +## ExpressionColumnConfig + +```python +class ExpressionColumnConfig(SingleColumnConfig): + """Configuration for Jinja2 expression columns.""" + + column_type: Literal["expression"] = "expression" + expr: str # Jinja2 expression + dtype: Literal["str", "int", "float", "bool"] = "str" # Output type +``` + +## ValidationColumnConfig + +```python +class ValidationColumnConfig(SingleColumnConfig): + """Configuration for validation columns.""" + + column_type: Literal["validation"] = "validation" + validator_type: ValidatorType # Type of validator + target_columns: list[str] # Columns to validate + validator_params: ValidatorParamsT # Validator-specific parameters + batch_size: int = 10 # Number of records per validation batch +``` + +## SeedDatasetColumnConfig + +```python +class SeedDatasetColumnConfig(SingleColumnConfig): + """Configuration for seed dataset columns. + + This config marks columns that come from seed data. It is typically + created automatically when calling with_seed_dataset() on the builder. + """ + + column_type: Literal["seed-dataset"] = "seed-dataset" + # No additional fields - the column name is the seed column name +``` + +## CustomColumnConfig + +```python +class CustomColumnConfig(SingleColumnConfig): + """Configuration for custom user-defined column generators.""" + + column_type: Literal["custom"] = "custom" + generator_function: Callable # Function decorated with @custom_column_generator + generation_strategy: GenerationStrategy = GenerationStrategy.CELL_BY_CELL + generator_params: BaseModel | None = None # Optional typed config object + + +class GenerationStrategy(str, Enum): + """Strategy for custom column generation.""" + + CELL_BY_CELL = "cell_by_cell" # Row-based generation + FULL_COLUMN = "full_column" # Batch-based with DataFrame access +``` + +## CodeLang Enum + +```python +class CodeLang(str, Enum): + """Supported programming languages for code generation.""" + + BASH = "bash" + C = "c" + COBOL = "cobol" + CPP = "cpp" + CSHARP = "csharp" + GO = "go" + JAVA = "java" + JAVASCRIPT = "javascript" + KOTLIN = "kotlin" + PYTHON = "python" + RUBY = "ruby" + RUST = "rust" + SCALA = "scala" + SWIFT = "swift" + TYPESCRIPT = "typescript" + SQL_SQLITE = "sql:sqlite" + SQL_TSQL = "sql:tsql" + SQL_BIGQUERY = "sql:bigquery" + SQL_MYSQL = "sql:mysql" + SQL_POSTGRES = "sql:postgres" +``` + +## ValidatorType Enum + +```python +class ValidatorType(str, Enum): + """Supported validator types.""" + + CODE = "code" + LOCAL_CALLABLE = "local_callable" + REMOTE = "remote" +``` + +## TraceType Enum + +```python +class TraceType(str, Enum): + """Trace capture options for LLM columns.""" + + NONE = "none" # No trace captured + LAST_MESSAGE = "last_message" # Only final assistant message + ALL_MESSAGES = "all_messages" # Full conversation history +``` diff --git a/fern/v0.5.0/pages/api-reference/config-builder.mdx b/fern/v0.5.0/pages/api-reference/config-builder.mdx new file mode 100644 index 00000000..66d59045 --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/config-builder.mdx @@ -0,0 +1,496 @@ +--- +title: Config Builder +description: API reference for the DataDesignerConfigBuilder class. +--- + +The `DataDesignerConfigBuilder` provides a high-level interface for building Data Designer configurations. It allows you to define columns, model configurations, tool configurations, processors, constraints, and seed datasets in a fluent, method-chaining style. + + +Model configurations must be provided either at initialization or defaults will be used when running locally with configured API keys. + + +## BuilderConfig + +Container for the full builder configuration, including the Data Designer config and library version. + +```python +class BuilderConfig(ExportableConfigBase): + """Configuration container for Data Designer builder.""" + + data_designer: DataDesignerConfig # Main Data Designer configuration + library_version: str | None = None # Version used to create the config +``` + +## DataDesignerConfigBuilder + +The main builder class for constructing Data Designer configurations. + +### Constructor + +```python +def __init__( + self, + model_configs: list[ModelConfig] | str | Path | None = None, + tool_configs: list[ToolConfig] | None = None, +) -> None: + """Initialize a new DataDesignerConfigBuilder instance. + + Args: + model_configs: Model configurations. Can be: + - None to use default model configurations in local mode + - A list of ModelConfig objects + - A string or Path to a model configuration file + tool_configs: Tool configurations for MCP tool calling. + """ +``` + +### Class Methods + +#### from_config + +```python +@classmethod +def from_config(cls, config: dict | str | Path | BuilderConfig) -> Self: + """Create a DataDesignerConfigBuilder from an existing configuration. + + Accepts both the full BuilderConfig format (with a top-level + `data_designer` key) and the shorthand DataDesignerConfig format + (columns, model_configs, etc. at the top level). + + Args: + config: Configuration source. Can be: + - A dictionary containing the configuration + - A string or Path to a local YAML/JSON configuration file + - An HTTP(S) URL string to a YAML/JSON configuration file + - A BuilderConfig object + + Returns: + A new instance populated with the configuration. + """ +``` + +### Properties + +```python +@property +def model_configs(self) -> list[ModelConfig]: + """Get the model configurations for this builder.""" + +@property +def tool_configs(self) -> list[ToolConfig]: + """Get the tool configurations for this builder.""" + +@property +def allowed_references(self) -> list[str]: + """Get all referenceable variables allowed in prompt templates and expressions.""" + +@property +def info(self) -> ConfigBuilderInfo: + """Get the ConfigBuilderInfo object for this builder.""" +``` + +### Column Methods + +#### add_column + +```python +def add_column( + self, + column_config: ColumnConfigT | None = None, + *, + name: str | None = None, + column_type: DataDesignerColumnType | None = None, + **kwargs, +) -> Self: + """Add a column configuration to the builder. + + You can either provide a column config object directly, or provide + name, column_type, and additional kwargs to construct the config. + + Args: + column_config: Data Designer column config object to add. + name: Name of the column (if not using column_config). + column_type: Column type (if not using column_config). + **kwargs: Additional arguments for the column constructor. + + Returns: + The builder instance for chaining. + """ +``` + +#### delete_column + +```python +def delete_column(self, column_name: str) -> Self: + """Delete the column with the given name. + + Args: + column_name: Name of the column to delete. + + Returns: + The builder instance for chaining. + + Raises: + BuilderConfigurationError: If trying to delete a seed dataset column. + """ +``` + +#### get_column_config + +```python +def get_column_config(self, name: str) -> ColumnConfigT: + """Get a column configuration by name. + + Args: + name: Name of the column. + + Returns: + The column configuration object. + + Raises: + KeyError: If no column with the given name exists. + """ +``` + +#### get_column_configs + +```python +def get_column_configs(self) -> list[ColumnConfigT]: + """Get all column configurations.""" +``` + +#### get_columns_of_type + +```python +def get_columns_of_type(self, column_type: DataDesignerColumnType) -> list[ColumnConfigT]: + """Get all column configurations of the specified type.""" +``` + +#### get_columns_excluding_type + +```python +def get_columns_excluding_type(self, column_type: DataDesignerColumnType) -> list[ColumnConfigT]: + """Get all column configurations excluding the specified type.""" +``` + +#### num_columns_of_type + +```python +def num_columns_of_type(self, column_type: DataDesignerColumnType) -> int: + """Get the count of columns of the specified type.""" +``` + +### Model Config Methods + +#### add_model_config + +```python +def add_model_config(self, model_config: ModelConfig) -> Self: + """Add a model configuration to the builder. + + Args: + model_config: The model configuration to add. + + Raises: + BuilderConfigurationError: If a config with the same alias exists. + """ +``` + +#### delete_model_config + +```python +def delete_model_config(self, alias: str) -> Self: + """Delete a model configuration by alias.""" +``` + +### Tool Config Methods + +#### add_tool_config + +```python +def add_tool_config(self, tool_config: ToolConfig) -> Self: + """Add a tool configuration for MCP tool calling. + + Args: + tool_config: The tool configuration to add. + + Raises: + BuilderConfigurationError: If a config with the same alias exists. + """ +``` + +#### delete_tool_config + +```python +def delete_tool_config(self, alias: str) -> Self: + """Delete a tool configuration by alias.""" +``` + +#### get_tool_config + +```python +def get_tool_config(self, alias: str) -> ToolConfig: + """Get a tool configuration by alias. + + Raises: + KeyError: If no tool configuration with the given alias exists. + """ +``` + +### Processor Methods + +#### add_processor + +```python +def add_processor( + self, + processor_config: ProcessorConfigT | None = None, + *, + processor_type: ProcessorType | None = None, + **kwargs, +) -> Self: + """Add a processor to the builder. + + You can either provide a processor config object directly, or provide + a processor_type and additional kwargs to construct the config. + + Args: + processor_config: The processor configuration object. + processor_type: The type of processor to add. + **kwargs: Additional arguments for the processor constructor. + """ +``` + +#### get_processor_configs + +```python +def get_processor_configs(self) -> list[ProcessorConfigT]: + """Get all processor configurations.""" +``` + +### Constraint Methods + +#### add_constraint + +```python +def add_constraint( + self, + constraint: ColumnConstraintT | None = None, + *, + constraint_type: ConstraintType | None = None, + **kwargs, +) -> Self: + """Add a constraint to the builder. + + Valid constraint types: + - "scalar_inequality": Constraint between a column and a scalar value + - "column_inequality": Constraint between two columns + + Args: + constraint: Constraint object to add. + constraint_type: Constraint type (if not using constraint object). + **kwargs: Additional arguments for the constraint constructor. + """ +``` + +#### delete_constraints + +```python +def delete_constraints(self, target_column: str) -> Self: + """Delete all constraints for the given target column.""" +``` + +#### get_constraints + +```python +def get_constraints(self, target_column: str) -> list[ColumnConstraintT]: + """Get all constraints for the given target column.""" +``` + +### Profiler Methods + +#### add_profiler + +```python +def add_profiler(self, profiler_config: ColumnProfilerConfigT) -> Self: + """Add a profiler to the builder. + + Args: + profiler_config: The profiler configuration object. + """ +``` + +#### get_profilers + +```python +def get_profilers(self) -> list[ColumnProfilerConfigT]: + """Get all profiler configurations.""" +``` + +### Seed Dataset Methods + +#### with_seed_dataset + +```python +def with_seed_dataset( + self, + seed_source: SeedSourceT, + *, + sampling_strategy: SamplingStrategy = SamplingStrategy.ORDERED, + selection_strategy: IndexRange | PartitionBlock | None = None, +) -> Self: + """Add a seed dataset to the builder. + + Args: + seed_source: The seed dataset source. + sampling_strategy: How to sample from the seed (default: ORDERED). + selection_strategy: Optional selection strategy for the seed. + """ +``` + +#### get_seed_config + +```python +def get_seed_config(self) -> SeedConfig | None: + """Get the seed config, or None if not configured.""" +``` + +### Build and Export Methods + +#### build + +```python +def build(self) -> DataDesignerConfig: + """Build a DataDesignerConfig from the current builder state. + + Returns: + The built configuration object. + """ +``` + +#### get_builder_config + +```python +def get_builder_config(self) -> BuilderConfig: + """Get the full BuilderConfig including library version.""" +``` + +#### write_config + +```python +def write_config(self, path: str | Path, indent: int | None = 2, **kwargs) -> None: + """Write the configuration to a YAML or JSON file. + + The format is determined by the file extension (.yaml, .yml, or .json). + + Args: + path: Path to the output file. + indent: Indentation level (default: 2). + **kwargs: Additional serialization options. + + Raises: + BuilderConfigurationError: If the file format is unsupported. + BuilderSerializationError: If using a DataFrame seed source. + """ +``` + +## ConfigBuilderInfo + +The `info` property returns a `ConfigBuilderInfo` object that can display information about available samplers and model configurations. + +```python +# Display available sampler types and their parameters +config_builder.info.display("samplers") + +# Display configured model configurations +config_builder.info.display("model_configs") +``` + +## Seed Sources + +### LocalFileSeedSource + +```python +class LocalFileSeedSource(SeedSource): + """Seed source from a local file.""" + + seed_type: Literal["local"] = "local" + path: str # Path to parquet, CSV, or JSON file + + @classmethod + def from_dataframe(cls, df: pd.DataFrame, path: str) -> Self: + """Create a local file seed source from a DataFrame. + + Saves the DataFrame to the specified path as parquet. + """ +``` + +### HuggingFaceSeedSource + +```python +class HuggingFaceSeedSource(SeedSource): + """Seed source from HuggingFace datasets.""" + + seed_type: Literal["hf"] = "hf" + path: str # HuggingFace path (e.g., "datasets/user/dataset/data/*.parquet") + token: str | None = None # Optional HuggingFace token + endpoint: str = "https://huggingface.co" +``` + +### DataFrameSeedSource + +```python +class DataFrameSeedSource(SeedSource): + """Seed source from an in-memory DataFrame. + + Note: Cannot be serialized to config files. + """ + + seed_type: Literal["df"] = "df" + df: pd.DataFrame # The DataFrame to use as seed +``` + +## Example Usage + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Create a builder with default model configurations +builder = dd.DataDesignerConfigBuilder() + +# Add columns +builder.add_column( + dd.SamplerColumnConfig( + name="category", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=["A", "B", "C"]), + ) +) + +builder.add_column( + dd.LLMTextColumnConfig( + name="description", + model_alias="nvidia-text", + prompt="Write a description for category {{ category }}.", + ) +) + +# Add a processor +builder.add_processor( + dd.DropColumnsProcessorConfig( + name="cleanup", + column_names=["intermediate_column"], + ) +) + +# Build and use +data_designer = DataDesigner() +results = data_designer.preview(builder, num_records=5) + +# Save configuration +builder.write_config("my_config.yaml") + +# Load configuration later +loaded_builder = dd.DataDesignerConfigBuilder.from_config("my_config.yaml") +``` diff --git a/fern/v0.5.0/pages/api-reference/data-designer-config.mdx b/fern/v0.5.0/pages/api-reference/data-designer-config.mdx new file mode 100644 index 00000000..b9ce1049 --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/data-designer-config.mdx @@ -0,0 +1,110 @@ +--- +title: Data Designer Configuration +description: API reference for the DataDesignerConfig object. +--- + +`DataDesignerConfig` is the main configuration object for building datasets with Data Designer. It is a declarative configuration for defining the dataset you want to generate column-by-column, including options for dataset post-processing, validation, and profiling. + +Generally, you should use the [DataDesignerConfigBuilder](/api/config-builder) to build your configuration, but you can also build it manually by instantiating the `DataDesignerConfig` class directly. + +## DataDesignerConfig + +```python +class DataDesignerConfig(BaseModel): + """Complete configuration for a Data Designer generation job.""" + + columns: list[SingleColumnConfig] # List of column configurations + processors: list[ProcessorConfig] = [] # Post-generation processors + seed_source: SeedSource | None = None # Optional seed dataset + + @property + def column_names(self) -> list[str]: + """Names of all configured columns.""" + ... + + @property + def dependency_graph(self) -> dict[str, list[str]]: + """Column dependency graph for execution ordering.""" + ... + + def get_column(self, name: str) -> SingleColumnConfig: + """Get a column configuration by name. + + Args: + name: Column name. + + Returns: + Column configuration. + + Raises: + KeyError: If column not found. + """ + ... + + def to_yaml(self) -> str: + """Serialize configuration to YAML string.""" + ... + + def to_json(self) -> str: + """Serialize configuration to JSON string.""" + ... + + @classmethod + def from_yaml(cls, yaml_str: str) -> "DataDesignerConfig": + """Deserialize configuration from YAML string.""" + ... + + @classmethod + def from_json(cls, json_str: str) -> "DataDesignerConfig": + """Deserialize configuration from JSON string.""" + ... + + @classmethod + def from_file(cls, path: str | Path) -> "DataDesignerConfig": + """Load configuration from a file. + + Args: + path: Path to YAML or JSON file. + + Returns: + Loaded configuration. + """ + ... +``` + +## Configuration Serialization + +Data Designer configs can be serialized to and from YAML or JSON format, making it easy to: + +- Save configurations for reproducibility +- Share configurations with team members +- Version control your data generation pipelines +- Load and modify existing configurations + +### Example: Saving and Loading Configs + +```python +import data_designer.config as dd + +# Build a configuration +builder = dd.DataDesignerConfigBuilder() +builder.add_column( + dd.SamplerColumnConfig( + name="id", + sampler_type=dd.SamplerType.UUID, + ) +) +builder.add_column( + dd.LLMTextColumnConfig( + name="greeting", + model_alias="nvidia-text", + prompt="Write a greeting.", + ) +) + +# Save to file +builder.write_config("my_config.yaml") + +# Load from file later +config = dd.DataDesignerConfig.from_file("my_config.yaml") +``` diff --git a/fern/v0.5.0/pages/api-reference/mcp.mdx b/fern/v0.5.0/pages/api-reference/mcp.mdx new file mode 100644 index 00000000..24fc8f6e --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/mcp.mdx @@ -0,0 +1,173 @@ +--- +title: MCP (Model Context Protocol) +description: Configuration and execution classes for tool use via MCP. +--- + +The `mcp` module defines configuration and execution classes for tool use via MCP (Model Context Protocol). + +## Configuration Classes + +- **[MCPProvider](#mcpprovider)**: Configure remote MCP servers via SSE transport +- **[LocalStdioMCPProvider](#localstdiomcpprovider)**: Configure local MCP servers as subprocesses via stdio transport +- **[ToolConfig](#toolconfig)**: Define which tools are available for LLM columns and how they are constrained + +For user-facing guides, see: + +- **[MCP Providers](/docs/concepts/mcp/mcp-providers)** - Configure local or remote MCP providers +- **[Tool Configs](/docs/concepts/mcp/tool-configs)** - Define tool permissions and limits +- **[Enabling Tools](/docs/concepts/mcp/enabling-tools)** - Use tools in LLM columns +- **[Traces](/docs/concepts/traces)** - Capture full conversation history + +## MCPProvider + +Remote MCP provider configuration using SSE (Server-Sent Events) transport. + +```python +import data_designer.config as dd + +provider = dd.MCPProvider( + name="remote-mcp", + endpoint="http://localhost:8080/sse", + api_key="MCP_API_KEY", +) +``` + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `name` | `str` | Yes | Unique identifier for the provider | +| `endpoint` | `str` | Yes | SSE endpoint URL | +| `api_key` | `str` | No | API key or environment variable name | +| `provider_type` | `str` | No | Always `"sse"` (set automatically) | + +## LocalStdioMCPProvider + +Local MCP provider configuration using stdio transport (subprocess). + +```python +import data_designer.config as dd + +provider = dd.LocalStdioMCPProvider( + name="local-mcp", + command="python", + args=["-m", "my_mcp_server"], + env={"DEBUG": "true"}, +) +``` + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `name` | `str` | Yes | Unique identifier for the provider | +| `command` | `str` | Yes | Executable to run | +| `args` | `list[str]` | No | Command-line arguments | +| `env` | `dict[str, str]` | No | Environment variables for the subprocess | +| `provider_type` | `str` | No | Always `"stdio"` (set automatically) | + +## ToolConfig + +Tool configuration defining tool access and constraints for LLM columns. + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="my-tools", + providers=["local-mcp"], + allow_tools=["search", "get_fact"], + max_tool_call_turns=5, + timeout_sec=30.0, +) +``` + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `tool_alias` | `str` | Yes | - | Unique identifier referenced by columns | +| `providers` | `list[str]` | Yes | - | MCP provider names to use | +| `allow_tools` | `list[str]` | No | `None` | Restrict to specific tools (`None` = all) | +| `max_tool_call_turns` | `int` | No | `5` | Maximum tool-calling iterations | +| `timeout_sec` | `float` | No | `60.0` | Per-call timeout in seconds | + +## Internal Architecture + +### Parallel Structure + +| Model Layer | MCP Layer | Purpose | +|-------------|-----------|---------| +| `ModelProviderRegistry` | `MCPProviderRegistry` | Holds provider configurations | +| `ModelRegistry` | `MCPRegistry` | Manages configs by alias, lazy facade creation | +| `ModelFacade` | `MCPFacade` | Lightweight facade scoped to specific config | +| `ModelConfig.alias` | `ToolConfig.tool_alias` | Alias for referencing in column configs | + +### MCPProviderRegistry + +Holds MCP provider configurations. Can be empty (MCP is optional). Created first during resource initialization. + +### MCPRegistry + +The central registry for tool configurations: + +- Holds `ToolConfig` instances by `tool_alias` +- Lazily creates `MCPFacade` instances via `get_mcp(tool_alias)` +- Manages shared connection pool and tool cache across all facades +- Validates that tool configs reference valid providers + +### MCPFacade + +A lightweight facade scoped to a specific `ToolConfig`. Key methods: + +| Method | Description | +|--------|-------------| +| `tool_call_count(response)` | Count tool calls in a completion response | +| `has_tool_calls(response)` | Check if response contains tool calls | +| `get_tool_schemas()` | Get OpenAI-format tool schemas for this config | +| `process_completion_response(response)` | Execute tool calls and return messages | +| `refuse_completion_response(response)` | Refuse tool calls gracefully (budget exhaustion) | + +Properties: `tool_alias`, `providers`, `max_tool_call_turns`, `allow_tools`, `timeout_sec` + +### I/O Layer + +The `io.py` module provides low-level MCP communication with performance optimizations: + +**Single event loop architecture:** +All MCP operations funnel through a dedicated background daemon thread running an asyncio event loop. This allows: + +- Efficient concurrent I/O without per-thread event loop overhead +- Natural session sharing across all worker threads +- Clean async implementation for parallel tool calls + +**Session pooling:** +MCP sessions are created lazily and kept alive for the program's duration: + +- One session per provider (keyed by serialized config) +- No per-call connection/handshake overhead +- Graceful cleanup on program exit via `atexit` handler + +**Request coalescing:** +The `list_tools` operation uses request coalescing to prevent thundering herd: + +- When multiple workers request tools from the same provider simultaneously +- Only one request is made; others wait for the cached result +- Uses asyncio.Lock per provider key + +**Parallel tool execution:** +The `call_tools_parallel()` function executes multiple tool calls concurrently via `asyncio.gather()`. This is used by MCPFacade when the model returns parallel tool calls in a single response. + +### Integration with ModelFacade.generate() + +The `ModelFacade.generate()` method accepts an optional `tool_alias` parameter: + +```python +output, messages = model_facade.generate( + prompt="Search and answer...", + parser=my_parser, + tool_alias="my-tools", # Enables tool calling for this generation +) +``` + +When `tool_alias` is provided: + +1. `ModelFacade` looks up the `MCPFacade` from `MCPRegistry` +2. Tool schemas are fetched and passed to the LLM +3. After each completion, `MCPFacade` processes tool calls +4. Turn counting tracks iterations; refusal kicks in when budget exhausted +5. Messages (including tool results) are returned for trace capture diff --git a/fern/v0.5.0/pages/api-reference/models.mdx b/fern/v0.5.0/pages/api-reference/models.mdx new file mode 100644 index 00000000..ba506afb --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/models.mdx @@ -0,0 +1,116 @@ +--- +title: Models +description: API reference for model configuration objects. +--- + +The `models` module defines configuration objects for model-based generation. `ModelProvider` specifies connection and authentication details for custom providers. `ModelConfig` encapsulates model details including the model alias, identifier, and inference parameters. [Inference Parameters](/docs/concepts/models/inference-parameters) controls model behavior through settings like `temperature`, `top_p`, and `max_tokens`, with support for both fixed values and distribution-based sampling. The module includes `ImageContext` for providing image inputs to multimodal models, and `ImageInferenceParams` for configuring image generation models. + +For more information on how they are used, see below: + +- **[Model Providers](/docs/concepts/models/model-providers)** +- **[Model Configs](/docs/concepts/models/model-configs)** +- **[Images as Context](/docs/tutorials/images-as-context)** +- **[Generating Images](/docs/tutorials/generating-images)** + +## ModelProvider + +```python +class ModelProvider(BaseModel): + """Configuration for a model provider endpoint.""" + + name: str # Unique identifier for the provider + endpoint: str # API endpoint URL + provider_type: str = "openai" # Provider type (default: OpenAI-compatible) + api_key: str | None = None # API key or environment variable name + extra_body: dict[str, Any] | None = None # Additional request body parameters + extra_headers: dict[str, str] | None = None # Additional headers +``` + +## ModelConfig + +```python +class ModelConfig(BaseModel): + """Configuration for a specific model.""" + + alias: str # Unique identifier for this model configuration + model: str # Model identifier as recognized by the provider + provider: str | None = None # Reference to provider by name + inference_parameters: InferenceParamsT | None = None # Inference parameters +``` + +## ChatCompletionInferenceParams + +```python +class ChatCompletionInferenceParams(BaseModel): + """Parameters for chat completion inference.""" + + temperature: float | Distribution | None = None # Sampling temperature (0.0-2.0) + top_p: float | Distribution | None = None # Nucleus sampling parameter (0.0-1.0) + max_tokens: int | None = None # Maximum output tokens + max_parallel_requests: int = 4 # Maximum concurrent API requests + timeout: int | None = None # Request timeout in seconds + extra_body: dict[str, Any] | None = None # Additional request body parameters +``` + +## EmbeddingInferenceParams + +```python +class EmbeddingInferenceParams(BaseModel): + """Parameters for embedding inference.""" + + encoding_format: Literal["float", "base64"] = "float" # Embedding encoding format + dimensions: int | None = None # Number of embedding dimensions + max_parallel_requests: int = 4 # Maximum concurrent API requests + timeout: int | None = None # Request timeout in seconds + extra_body: dict[str, Any] | None = None # Additional request body parameters +``` + +## ImageInferenceParams + +```python +class ImageInferenceParams(BaseModel): + """Parameters for image generation inference.""" + + max_parallel_requests: int = 4 # Maximum concurrent API requests + timeout: int | None = None # Request timeout in seconds + extra_body: dict[str, Any] | None = None # Model-specific image options (size, quality, etc.) +``` + +## ImageContext + +```python +class ImageContext(BaseModel): + """Configuration for providing image context to vision models.""" + + column_name: str # Name of column containing image data + data_type: ModalityDataType # Type of image data (BASE64, URL, etc.) + image_format: ImageFormat | None = None # Image format (PNG, JPEG, etc.) +``` + +## Distribution Types + +### UniformDistribution + +```python +class UniformDistribution(BaseModel): + """Uniform distribution for parameter sampling.""" + + params: UniformDistributionParams + +class UniformDistributionParams(BaseModel): + low: float # Lower bound + high: float # Upper bound +``` + +### ManualDistribution + +```python +class ManualDistribution(BaseModel): + """Manual distribution with discrete values.""" + + params: ManualDistributionParams + +class ManualDistributionParams(BaseModel): + values: list[float] # Discrete values to sample from + weights: list[float] | None = None # Optional probability weights +``` diff --git a/fern/v0.5.0/pages/api-reference/processors.mdx b/fern/v0.5.0/pages/api-reference/processors.mdx new file mode 100644 index 00000000..a69d642a --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/processors.mdx @@ -0,0 +1,147 @@ +--- +title: Processors +description: API reference for processor configuration objects. +--- + +The `processors` module defines configuration objects for post-generation data transformations. Processors run after column generation and can modify the dataset schema or content before output. + +## ProcessorType Enum + +```python +class ProcessorType(str, Enum): + """Enumeration of available processor types.""" + + DROP_COLUMNS = "drop_columns" + SCHEMA_TRANSFORM = "schema_transform" +``` + +## ProcessorConfig (Base Class) + +```python +class ProcessorConfig(ConfigBase, ABC): + """Abstract base class for all processor configuration types.""" + + name: str # Unique name of the processor + processor_type: str # Discriminator field +``` + +## DropColumnsProcessorConfig + +```python +class DropColumnsProcessorConfig(ProcessorConfig): + """Configuration for dropping columns from output.""" + + name: str # Processor identifier + column_names: list[str] # Columns to remove from output + processor_type: Literal[ProcessorType.DROP_COLUMNS] = ProcessorType.DROP_COLUMNS +``` + +### Behavior + +- Columns in `column_names` are removed from the main output +- Dropped column values are saved to a separate file in `dropped-columns/` +- When this processor is added via the config builder, column configs are automatically marked with `drop=True` + +### Example Usage + +```python +import data_designer.config as dd + +builder = dd.DataDesignerConfigBuilder() +# ... add columns ... + +builder.add_processor( + dd.DropColumnsProcessorConfig( + name="remove_intermediate", + column_names=["temp_calculation", "raw_context", "debug_info"], + ) +) +``` + + +Instead of using a processor, you can set `drop=True` directly when configuring a column: + +```python +builder.add_column( + dd.LLMTextColumnConfig( + name="intermediate_reasoning", + model_alias="nvidia-text", + prompt="...", + drop=True, # This column will be dropped from output + ) +) +``` + + +## SchemaTransformProcessorConfig + +```python +class SchemaTransformProcessorConfig(ProcessorConfig): + """Configuration for transforming output schema.""" + + name: str # Processor identifier + template: dict[str, Any] # Jinja2 template for output schema (must be JSON-serializable) + processor_type: Literal[ProcessorType.SCHEMA_TRANSFORM] = ProcessorType.SCHEMA_TRANSFORM +``` + +### Behavior + +- Each key in `template` becomes a column in the transformed output +- Values are Jinja2 templates with access to all columns in the batch +- Complex structures (lists, nested dicts) are supported but must be JSON-serializable +- Output saved to `processors-outputs/{processor_name}/` +- Original dataset passes through unchanged + +### Template Capabilities + +- **Variable substitution**: `{{ column_name }}` +- **Filters**: `{{ text | upper }}`, `{{ text | lower }}`, `{{ text | trim }}` +- **Nested structures**: Arbitrarily deep JSON structures +- **Lists**: `["{{ col1 }}", "{{ col2 }}"]` +- **Conditionals**: `{% if condition %}...{% endif %}` + +### Example Usage + +```python +import data_designer.config as dd + +builder = dd.DataDesignerConfigBuilder() +# ... add columns with 'question' and 'answer' ... + +# Transform to chat message format +builder.add_processor( + dd.SchemaTransformProcessorConfig( + name="chat_format", + template={ + "messages": [ + {"role": "user", "content": "{{ question }}"}, + {"role": "assistant", "content": "{{ answer }}"}, + ], + "metadata": { + "category": "{{ category | upper }}", + "generated": True, + }, + }, + ) +) +``` + +## Helper Function + +```python +def get_processor_config_from_kwargs( + processor_type: ProcessorType, + **kwargs: Any +) -> ProcessorConfig: + """Create a processor configuration from a processor type and keyword arguments. + + Args: + processor_type: The type of processor to create. + **kwargs: Additional keyword arguments passed to the processor constructor. + + Returns: + A processor configuration object of the specified type. + """ +``` + +This function is used internally by `DataDesignerConfigBuilder.add_processor()` when you provide a `processor_type` and keyword arguments instead of a config object directly. diff --git a/fern/v0.5.0/pages/api-reference/run-config.mdx b/fern/v0.5.0/pages/api-reference/run-config.mdx new file mode 100644 index 00000000..e5b4b0ac --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/run-config.mdx @@ -0,0 +1,101 @@ +--- +title: Run Config +description: Runtime settings for dataset generation behavior. +--- + +The `run_config` module defines runtime settings that control dataset generation behavior, including early shutdown thresholds, batch sizing, and non-inference worker concurrency. + +## Usage + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +data_designer = DataDesigner() +data_designer.set_run_config(dd.RunConfig( + buffer_size=500, + max_conversation_restarts=3, +)) +``` + +## RunConfig + +| Field | Type | Default | Description | +|-------|------|---------|-------------| +| `buffer_size` | `int` | `1000` | Number of records processed per batch | +| `max_conversation_restarts` | `int` | `5` | Maximum full conversation restarts for failed generations | +| `max_conversation_correction_steps` | `int` | `0` | Maximum in-conversation correction attempts | +| `non_inference_max_parallel_workers` | `int` | `4` | Thread pool size for non-LLM operations | +| `disable_early_shutdown` | `bool` | `False` | Disable early shutdown on high error rates | +| `shutdown_error_rate` | `float` | `0.5` | Error rate threshold for early shutdown | +| `shutdown_error_window` | `int` | `10` | Minimum tasks before error monitoring begins | + +## Parameters in Detail + +### `buffer_size` + +Controls how many records are processed per batch. Each batch completes entirely before moving to the next. + +| Value | Memory Usage | Throughput | Error Feedback | +|-------|--------------|------------|----------------| +| **Low** (100-500) | Lower | May not saturate inference | Fast | +| **Default** (1000) | Moderate | Good for most cases | Moderate | +| **High** (2000-5000) | Higher | Better for deep pipelines | Slower | + +### `max_conversation_restarts` + +When generation fails (parsing error, schema violation, etc.), the entire conversation is restarted from scratch. This parameter limits total restart attempts per cell. + +### `max_conversation_correction_steps` + +Instead of restarting, Data Designer can attempt in-conversation corrections by feeding the error back to the model. Set this to enable error-recovery within the conversation. + + +For strict schema requirements, consider `max_conversation_restarts=7` with `max_conversation_correction_steps=2`. + + +### `non_inference_max_parallel_workers` + +Thread pool size for non-LLM operations (samplers, expressions, validators). Increase for workloads with many CPU-bound columns. + +### Early Shutdown + +Early shutdown terminates generation if the error rate exceeds a threshold, preventing wasted computation on failing workloads. + +- `disable_early_shutdown`: Set to `True` to see all errors during debugging +- `shutdown_error_rate`: Error rate threshold (0.5 = 50%) +- `shutdown_error_window`: Minimum tasks before monitoring begins + +## Example Configurations + +### High-throughput workload + +```python +run_config = dd.RunConfig( + buffer_size=2000, + max_conversation_restarts=3, +) +``` + +### Strict schema requirements + +```python +run_config = dd.RunConfig( + max_conversation_restarts=7, + max_conversation_correction_steps=2, +) +``` + +### Debugging failed generations + +```python +run_config = dd.RunConfig( + disable_early_shutdown=True, + buffer_size=100, +) +``` + +## See Also + +- [Architecture & Performance](/docs/concepts/architecture-and-performance): Detailed tuning guide +- [Inference Parameters](/docs/concepts/models/inference-parameters): Per-model concurrency settings diff --git a/fern/v0.5.0/pages/api-reference/sampler-params.mdx b/fern/v0.5.0/pages/api-reference/sampler-params.mdx new file mode 100644 index 00000000..6c19b199 --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/sampler-params.mdx @@ -0,0 +1,273 @@ +--- +title: Sampler Parameters +description: API reference for sampler parameter configuration objects. +--- + +The `sampler_params` module defines parameter configuration objects for all Data Designer sampler types. Sampler parameters are used within the `SamplerColumnConfig` to specify how values should be generated for sampled columns. + + +The config builder has an `info` attribute that can be used to display the available sampler types and their parameters: + +```python +config_builder.info.display("samplers") +``` + + +## SamplerType + +Enum defining all available sampler types: + +```python +class SamplerType(str, Enum): + BERNOULLI = "bernoulli" + BERNOULLI_MIXTURE = "bernoulli_mixture" + BINOMIAL = "binomial" + CATEGORY = "category" + DATETIME = "datetime" + GAUSSIAN = "gaussian" + PERSON = "person" + PERSON_FROM_FAKER = "person_from_faker" + POISSON = "poisson" + SCIPY = "scipy" + SUBCATEGORY = "subcategory" + TIMEDELTA = "timedelta" + UNIFORM = "uniform" + UUID = "uuid" +``` + +## CategorySamplerParams + +Parameters for categorical sampling with optional probability weighting. + +```python +class CategorySamplerParams(ConfigBase): + """Parameters for categorical sampling.""" + + values: list[str | int | float] # List of categorical values to sample from (required) + weights: list[float] | None = None # Optional unnormalized probability weights + sampler_type: Literal[SamplerType.CATEGORY] = SamplerType.CATEGORY +``` + +When `weights` are provided, they are automatically normalized to sum to 1.0. Larger weights result in higher sampling probability. + +## SubcategorySamplerParams + +Parameters for subcategory sampling conditioned on a parent category column. + +```python +class SubcategorySamplerParams(ConfigBase): + """Parameters for hierarchical categorical sampling.""" + + category: str # Name of parent category column + values: dict[str, list[str | int | float]] # Mapping of parent values to subcategory values + sampler_type: Literal[SamplerType.SUBCATEGORY] = SamplerType.SUBCATEGORY +``` + +## UniformSamplerParams + +Parameters for sampling from a continuous Uniform distribution. + +```python +class UniformSamplerParams(ConfigBase): + """Parameters for uniform distribution sampling.""" + + low: float # Lower bound (inclusive) + high: float # Upper bound (inclusive) + decimal_places: int | None = None # Number of decimal places to round to + sampler_type: Literal[SamplerType.UNIFORM] = SamplerType.UNIFORM +``` + +## GaussianSamplerParams + +Parameters for sampling from a Gaussian (Normal) distribution. + +```python +class GaussianSamplerParams(ConfigBase): + """Parameters for Gaussian (normal) distribution sampling.""" + + mean: float # Distribution mean (required) + stddev: float # Standard deviation (required) + decimal_places: int | None = None # Number of decimal places to round to + sampler_type: Literal[SamplerType.GAUSSIAN] = SamplerType.GAUSSIAN +``` + +## BernoulliSamplerParams + +Parameters for sampling from a Bernoulli distribution (binary outcomes). + +```python +class BernoulliSamplerParams(ConfigBase): + """Parameters for Bernoulli (binary) sampling.""" + + p: float # Probability of success (1), must be between 0.0 and 1.0 + sampler_type: Literal[SamplerType.BERNOULLI] = SamplerType.BERNOULLI +``` + +## BernoulliMixtureSamplerParams + +Parameters for sampling from a Bernoulli mixture distribution. Combines a Bernoulli distribution with a scipy.stats distribution. + +```python +class BernoulliMixtureSamplerParams(ConfigBase): + """Parameters for Bernoulli mixture distribution sampling.""" + + p: float # Probability of sampling from the mixture distribution (non-zero outcome) + dist_name: str # Name of scipy.stats distribution for non-zero samples + dist_params: dict # Parameters for the scipy.stats distribution + sampler_type: Literal[SamplerType.BERNOULLI_MIXTURE] = SamplerType.BERNOULLI_MIXTURE +``` + +With probability `1-p`, the sample is 0. With probability `p`, the sample is drawn from the specified distribution. + +## BinomialSamplerParams + +Parameters for sampling from a Binomial distribution. + +```python +class BinomialSamplerParams(ConfigBase): + """Parameters for binomial distribution sampling.""" + + n: int # Number of trials + p: float # Probability of success per trial (0.0 to 1.0) + sampler_type: Literal[SamplerType.BINOMIAL] = SamplerType.BINOMIAL +``` + +## PoissonSamplerParams + +Parameters for sampling from a Poisson distribution. + +```python +class PoissonSamplerParams(ConfigBase): + """Parameters for Poisson distribution sampling.""" + + mean: float # Mean number of events (lambda/rate parameter) + sampler_type: Literal[SamplerType.POISSON] = SamplerType.POISSON +``` + +## ScipySamplerParams + +Parameters for sampling from any scipy.stats distribution. + +```python +class ScipySamplerParams(ConfigBase): + """Parameters for scipy.stats distribution sampling.""" + + dist_name: str # Name of scipy.stats distribution (e.g., "beta", "gamma", "expon") + dist_params: dict # Distribution-specific parameters + decimal_places: int | None = None # Number of decimal places to round to + sampler_type: Literal[SamplerType.SCIPY] = SamplerType.SCIPY +``` + +See [scipy.stats documentation](https://docs.scipy.org/doc/scipy/reference/stats.html) for available distributions. + +## UUIDSamplerParams + +Parameters for UUID generation. + +```python +class UUIDSamplerParams(ConfigBase): + """Parameters for UUID generation.""" + + prefix: str | None = None # Optional prefix for the UUID + short_form: bool = False # Truncate to 8 characters + uppercase: bool = False # Convert to uppercase + sampler_type: Literal[SamplerType.UUID] = SamplerType.UUID +``` + +## DatetimeSamplerParams + +Parameters for uniform datetime sampling within a specified range. + +```python +class DatetimeSamplerParams(ConfigBase): + """Parameters for datetime sampling.""" + + start: str # Start of date range (parseable by pandas.to_datetime) + end: str # End of date range (parseable by pandas.to_datetime) + unit: Literal["Y", "M", "D", "h", "m", "s"] = "D" # Sampling granularity + sampler_type: Literal[SamplerType.DATETIME] = SamplerType.DATETIME +``` + +The `unit` parameter determines the smallest possible time interval: +- `"Y"`: Years +- `"M"`: Months +- `"D"`: Days (default) +- `"h"`: Hours +- `"m"`: Minutes +- `"s"`: Seconds + +## TimeDeltaSamplerParams + +Parameters for sampling time deltas relative to a reference datetime column. + +```python +class TimeDeltaSamplerParams(ConfigBase): + """Parameters for timedelta (duration) sampling.""" + + dt_min: int # Minimum delta (inclusive), must be >= 0 + dt_max: int # Maximum delta (exclusive), must be > dt_min + reference_column_name: str # Column containing reference datetime + unit: Literal["D", "h", "m", "s"] = "D" # Time unit for delta values + sampler_type: Literal[SamplerType.TIMEDELTA] = SamplerType.TIMEDELTA +``` + + +Years and months are not supported as timedelta units because they have variable lengths. + + +## PersonSamplerParams + +Parameters for sampling synthetic person data from Nemotron Personas managed datasets. + +```python +class PersonSamplerParams(ConfigBase): + """Parameters for Nemotron-Personas person sampling.""" + + locale: str = "en_US" # Must be a supported managed dataset locale + sex: Literal["Male", "Female"] | None = None # Filter by sex + city: str | list[str] | None = None # Filter by city + age_range: list[int] = [18, 70] # [min_age, max_age] + select_field_values: dict[str, list[str]] | None = None # Custom field filters + with_synthetic_personas: bool = False # Include personality profiles + sampler_type: Literal[SamplerType.PERSON] = SamplerType.PERSON +``` + +Supported locales for managed datasets: `en_US`, `ja_JP`, `en_IN`, `hi_Deva_IN`, `hi_Latn_IN`, `en_SG`, `pt_BR`. + +## PersonFromFakerSamplerParams + +Parameters for Faker-based person sampling. Generates basic synthetic person data without the demographic accuracy of managed datasets. + +```python +class PersonFromFakerSamplerParams(ConfigBase): + """Parameters for Faker-based person sampling.""" + + locale: str = "en_US" # Any Faker-supported locale + sex: Literal["Male", "Female"] | None = None # Filter by sex + city: str | list[str] | None = None # Filter by city + age_range: list[int] = [18, 70] # [min_age, max_age] + sampler_type: Literal[SamplerType.PERSON_FROM_FAKER] = SamplerType.PERSON_FROM_FAKER +``` + +## SamplerParamsT + +Type alias representing the union of all sampler parameter types: + +```python +SamplerParamsT = ( + SubcategorySamplerParams + | CategorySamplerParams + | DatetimeSamplerParams + | PersonSamplerParams + | PersonFromFakerSamplerParams + | TimeDeltaSamplerParams + | UUIDSamplerParams + | BernoulliSamplerParams + | BernoulliMixtureSamplerParams + | BinomialSamplerParams + | GaussianSamplerParams + | PoissonSamplerParams + | UniformSamplerParams + | ScipySamplerParams +) +``` diff --git a/fern/v0.5.0/pages/api-reference/validator-params.mdx b/fern/v0.5.0/pages/api-reference/validator-params.mdx new file mode 100644 index 00000000..3308e224 --- /dev/null +++ b/fern/v0.5.0/pages/api-reference/validator-params.mdx @@ -0,0 +1,167 @@ +--- +title: Validator Parameters +description: API reference for validator parameter configuration objects. +--- + +When creating a `ValidationColumnConfig`, two parameters are used to define the validator: `validator_type` and `validator_params`. +The `validator_type` parameter can be set to either `code`, `local_callable` or `remote`. The `validator_params` accompanying each of these is described below. + +## CodeValidatorParams + +```python +class CodeValidatorParams(BaseModel): + """Parameters for code validation.""" + + code_lang: CodeLang # Programming language to validate +``` + +### Supported Languages + +For Python code validation (uses Ruff): +- `CodeLang.PYTHON` + +For SQL code validation (uses SQLFluff): +- `CodeLang.SQL_ANSI` +- `CodeLang.SQL_POSTGRES` +- `CodeLang.SQL_MYSQL` +- `CodeLang.SQL_SQLITE` +- `CodeLang.SQL_TSQL` +- `CodeLang.SQL_BIGQUERY` + +### Example Usage + +```python +from data_designer.essentials import ( + CodeLang, + CodeValidatorParams, + ValidationColumnConfig, + ValidatorType, +) + +# Python code validation +python_validator = ValidationColumnConfig( + name="python_validation", + validator_type=ValidatorType.CODE, + target_columns=["python_code"], + validator_params=CodeValidatorParams(code_lang=CodeLang.PYTHON), + batch_size=10, +) + +# SQL code validation +sql_validator = ValidationColumnConfig( + name="sql_validation", + validator_type=ValidatorType.CODE, + target_columns=["sql_query"], + validator_params=CodeValidatorParams(code_lang=CodeLang.SQL_POSTGRES), + batch_size=10, +) +``` + +## LocalCallableValidatorParams + +```python +class LocalCallableValidatorParams(BaseModel): + """Parameters for local callable validation.""" + + validation_function: Callable[[pd.DataFrame], pd.DataFrame] + # Function that takes DataFrame and returns DataFrame with is_valid column + + output_schema: dict | None = None + # Optional JSON schema to validate function output +``` + +### Function Requirements + +The validation function must: +1. Accept a `pd.DataFrame` containing the target columns +2. Return a `pd.DataFrame` with at minimum an `is_valid` column (boolean or null) +3. Any additional columns in the output become validation metadata + +### Example Usage + +```python +import pandas as pd +from data_designer.essentials import ( + LocalCallableValidatorParams, + ValidationColumnConfig, + ValidatorType, +) + +def validate_positive_prices(df: pd.DataFrame) -> pd.DataFrame: + """Validate that all prices are positive.""" + result = pd.DataFrame() + result["is_valid"] = df["price"] > 0 + result["error_message"] = result["is_valid"].apply( + lambda v: "" if v else "Price must be positive" + ) + return result + +validator = ValidationColumnConfig( + name="price_validation", + validator_type=ValidatorType.LOCAL_CALLABLE, + target_columns=["price"], + validator_params=LocalCallableValidatorParams( + validation_function=validate_positive_prices, + ), + batch_size=50, +) +``` + +## RemoteValidatorParams + +```python +class RemoteValidatorParams(BaseModel): + """Parameters for remote HTTP validation.""" + + endpoint_url: str # URL of the validation endpoint + timeout: float = 30.0 # Request timeout in seconds + max_retries: int = 3 # Number of retry attempts + retry_backoff: float = 2.0 # Exponential backoff factor + max_parallel_requests: int = 4 # Maximum concurrent requests + output_schema: dict | None = None # Optional response schema validation +``` + +### Request/Response Format + +**Request (POST):** +```json +{ + "data": [ + {"column1": "value1", "column2": "value2"}, + {"column1": "value3", "column2": "value4"} + ] +} +``` + +**Response:** +```json +{ + "data": [ + {"is_valid": true, "additional_field": "value"}, + {"is_valid": false, "additional_field": "value"} + ] +} +``` + +### Example Usage + +```python +from data_designer.essentials import ( + RemoteValidatorParams, + ValidationColumnConfig, + ValidatorType, +) + +validator = ValidationColumnConfig( + name="external_validation", + validator_type=ValidatorType.REMOTE, + target_columns=["content"], + validator_params=RemoteValidatorParams( + endpoint_url="https://api.example.com/validate", + timeout=60.0, + max_retries=3, + max_parallel_requests=4, + ), + batch_size=5, +) +``` diff --git a/fern/v0.5.0/pages/concepts/architecture-and-performance.mdx b/fern/v0.5.0/pages/concepts/architecture-and-performance.mdx new file mode 100644 index 00000000..4d2df4de --- /dev/null +++ b/fern/v0.5.0/pages/concepts/architecture-and-performance.mdx @@ -0,0 +1,221 @@ +--- +title: Architecture & Performance +description: Understand Data Designer's execution model and tune performance for your workload. +--- + +Data Designer is an **orchestration framework** that coordinates synthetic data generation workflows. It is a **client** of LLM inference servers—it does not host models itself. + +This guide explains the architecture, execution model, and how to tune performance for your specific use case. + +## Separation of Concerns + +``` +┌─────────────────────────────────────┐ ┌─────────────────────────────────────┐ +│ Data Designer │ │ Inference Server(s) │ +│ (Orchestration) │ HTTP │ (LLM Hosting) │ +│ │ ─────► │ │ +│ • Dataset workflow management │ │ • Model weights and execution │ +│ • Column dependency resolution │ │ • GPU allocation and scheduling │ +│ • Batching and parallelism │ │ • Request queuing │ +│ • Retry and error handling │ │ • Token generation │ +│ • Data validation and quality │ │ • Rate limiting (optional) │ +└─────────────────────────────────────┘ └─────────────────────────────────────┘ + ▲ ▲ + │ │ + Your workflow Your infrastructure + configuration (or cloud API) +``` + +### What Data Designer Does + +- **Orchestrates** the generation workflow across multiple columns +- **Resolves dependencies** between columns (DAG-based execution) +- **Batches** work into manageable chunks (`buffer_size`) +- **Parallelizes** LLM calls within batches (`max_parallel_requests`) +- **Handles errors** with retries and early shutdown logic +- **Validates** generated data against schemas and constraints + +### What Data Designer Does NOT Do + +- **Host models**: You must provide LLM endpoints +- **Manage GPUs**: Your inference server handles GPU allocation +- **Scale inference**: You must provision sufficient capacity +- **Rate limit**: Your server or API gateway handles this + +## Execution Model + + +This describes Data Designer's current **column-wise dataset generator**. Other dataset generation strategies are in development. + + +Data Designer processes datasets in **batches**, with **parallel** operations within each batch. + +### How It Works + +**Step 1: Split into batches** + +Your dataset is divided into batches of `buffer_size` records. Each batch is processed completely before moving to the next. + +**Step 2: Process columns sequentially** + +Within a batch, columns are generated one at a time following the dependency graph. The order depends on column dependencies—expression columns may come before LLM columns if the LLM columns depend on them. + +Example workflow: + +``` +Batch 1 (100 records) +│ +├─► Column 1: category (Sampler) ──── All 100 values generated +├─► Column 2: prompt (LLM Text) ──── All 100 values generated +├─► Column 3: response (LLM Text) ──── All 100 values generated +├─► Column 4: score (Expression) ──── All 100 values computed +│ +└─► Write batch to disk + │ + ▼ +Batch 2 (100 records) + ...repeat... +``` + +**Step 3: Generate cells in parallel** + +Within each column, cells are processed **in parallel** up to the configured limit: + +| Column Type | Parallelism Control | +|-------------|---------------------| +| Sampler | `non_inference_max_parallel_workers` | +| LLM (Text, Code, Structured, Judge) | `max_parallel_requests` | +| Expression | Sequential (fast, CPU-bound) | + +### Key Concepts + +| Concept | Description | +|---------|-------------| +| **Batching** | Records are split into batches of `buffer_size`. Each batch completes entirely before the next begins. | +| **Sequential columns** | Within a batch, columns are generated one at a time, respecting the dependency graph. | +| **Parallel cells** | Within a column, individual cells (records) are generated in parallel up to the configured limit. | + +### Concurrency Formula + +At any moment, the number of concurrent LLM requests is: + +```python +concurrent_requests = min( + buffer_size, # Records in current batch + max_parallel_requests, # Per-model limit + remaining_cells_in_column # Cells left to generate +) +``` + +**Example**: With `buffer_size=100` and `max_parallel_requests=8`, Data Designer sends up to 8 LLM requests at a time until all 100 cells in the column are complete. + +## Configuration Parameters + +### `buffer_size` (RunConfig) + +Controls how many records are processed per batch. + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +run_config = dd.RunConfig(buffer_size=2000) + +designer = DataDesigner() +designer.set_run_config(run_config) +``` + +| Value | Memory Usage | Throughput | Error Feedback | +|-------|--------------|------------|----------------| +| **Low** (100-500) | Lower | May not saturate inference | Fast | +| **Default** (1000) | Moderate | Good for most cases | Moderate | +| **High** (2000-5000) | Higher | Better for deep pipelines | Slower | + +**When to increase**: High-capacity inference server, single-model workflows, memory not constrained + +**When to decrease**: Memory-constrained environments, development/debugging, complex multi-model pipelines + +### `max_parallel_requests` (InferenceParams) + +Controls concurrent LLM API calls **per model alias**. + +```python +import data_designer.config as dd + +model = dd.ModelConfig( + alias="my-model", + model="nvidia/nemotron-3-nano-30b-a3b", + inference_parameters=dd.ChatCompletionInferenceParams( + max_parallel_requests=8, + ), +) +``` + +**Default**: 4 + +**When to increase**: Your inference backend has high throughput capacity, you're using a cloud API with generous rate limits, or you're running vLLM/TensorRT-LLM with multiple GPUs + +**When to decrease**: You're hitting rate limits or 429 errors, the inference server is overloaded, or you want more predictable/debuggable execution + + +The right value depends on your inference stack and model. Self-hosted vLLM servers can often handle values as high as 256, 512, or even 1024 depending on your hardware. + +**Benchmark approach**: Run a small dataset (e.g., 100 records) with increasing `max_parallel_requests` values (4 → 8 → 16 → 32 → ...) and measure generation time. Stop increasing when the runtime stops decreasing—that's when your inference server is saturated. + + +### `non_inference_max_parallel_workers` (RunConfig) + +Controls thread pool size for non-LLM operations (samplers, expressions, validators). + +```python +run_config = dd.RunConfig(non_inference_max_parallel_workers=8) +designer.set_run_config(run_config) +``` + +**Default**: 4 + +**When to increase**: Many CPU-bound columns (complex expressions, heavy sampling) + +### Error Handling (RunConfig) + +Control retry behavior and early shutdown for failed generations. + +```python +run_config = dd.RunConfig( + max_conversation_restarts=5, # Full conversation restarts (default: 5) + max_conversation_correction_steps=0, # In-conversation corrections (default: 0) + disable_early_shutdown=False, # Enable early shutdown (default) + shutdown_error_rate=0.5, # Shut down if >50% errors + shutdown_error_window=10, # Min tasks before error monitoring +) +designer.set_run_config(run_config) +``` + +**When to adjust**: + +- **Strict schemas**: Increase `max_conversation_restarts` to 7, add `max_conversation_correction_steps=2` +- **Debugging**: Set `disable_early_shutdown=True` to see all errors +- **Simple text**: Reduce `max_conversation_restarts` to 3 + +## Common Problems + +| Problem | Symptom | Solution | +|---------|---------|----------| +| **Low throughput** | Low GPU utilization | Increase `max_parallel_requests` and/or `buffer_size` | +| **Long tail of slow generations** | Most records fast, few very slow | Reduce `max_conversation_restarts`, simplify schemas, improve prompts | +| **Multi-model idle periods** | One model busy, others idle | Reduce `buffer_size` for faster cycling, or consolidate models | +| **Memory errors** | OOM crashes | Reduce `buffer_size` and `max_parallel_requests` | +| **Too many errors** | Generation fails frequently | Check prompts/schemas; adjust `shutdown_error_rate` or disable early shutdown for debugging | + +## Tuning Workflow + +1. **Start with defaults** for initial development +2. **Profile your workload**: How many LLM columns? How many records? What models? +3. **Identify bottleneck**: Low GPU util → increase `max_parallel_requests`. Memory issues → decrease `buffer_size`. Long tails → tune retry settings. +4. **Iterate**: Make one change at a time, measure impact before next change + +## Related Documentation + +- [Deployment Options](/docs/concepts/deployment-options): Choosing between library and microservice +- [Model Configuration](/docs/concepts/models/model-configs): Complete model settings reference +- [Inference Parameters](/docs/concepts/models/inference-parameters): Detailed parameter reference diff --git a/fern/v0.5.0/pages/concepts/columns.mdx b/fern/v0.5.0/pages/concepts/columns.mdx new file mode 100644 index 00000000..2bd7b9c7 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/columns.mdx @@ -0,0 +1,228 @@ +--- +title: Columns +description: The fundamental building blocks in Data Designer for defining dataset fields. +--- + +Columns are the fundamental building blocks in Data Designer. Each column represents a field in your dataset and defines how to generate it—whether that's sampling from a distribution, calling an LLM, or applying a transformation. + + +Columns are **declarative specifications**. You describe *what* you want, and the framework handles *how* to generate it—managing execution order, batching, parallelization, and resources automatically. + + +## Column Types + +Data Designer provides eleven built-in column types, each optimized for different generation scenarios. + +### 🎲 Sampler Columns + +Sampler columns generate data using numerical sampling—fast, deterministic, and ideal for numerical and categorical dataset fields. They're significantly faster than LLMs and can produce data following specific distributions (Poisson for event counts, Gaussian for measurements, etc.). + +Available sampler types: + +- **UUID**: Unique identifiers +- **Category**: Categorical values with optional probability weights +- **Subcategory**: Hierarchical categorical data (states within countries, models within brands) +- **Uniform**: Evenly distributed numbers (integers or floats) +- **Gaussian**: Normally distributed values with configurable mean and standard deviation +- **Bernoulli**: Binary outcomes with specified success probability +- **Bernoulli Mixture**: Binary outcomes from multiple probability components +- **Binomial**: Count of successes in repeated trials +- **Poisson**: Count data and event frequencies +- **Scipy**: Access to the full scipy.stats distribution library +- **Person**: Realistic synthetic individuals with names, demographics, and attributes +- **Datetime**: Timestamps within specified ranges +- **Timedelta**: Time duration values + + +Samplers support **conditional parameters** that change behavior based on other columns. Want age distributions that vary by country? Income ranges that depend on occupation? Just define conditions on existing column values. + + +### 📝 LLM-Text Columns + +LLM-Text columns generate natural language text: product descriptions, customer reviews, narrative summaries, email threads, or anything requiring semantic understanding and creativity. + +Use **Jinja2 templating** in prompts to reference other columns. Data Designer automatically manages dependencies and injects the referenced column values into the prompt. + + +LLM columns can optionally capture message traces in a separate `{column_name}__trace` column. Set `with_trace` on the column config to control what's captured: `TraceType.NONE` (default, no trace), `TraceType.LAST_MESSAGE` (final assistant message only), or `TraceType.ALL_MESSAGES` (full conversation history). The trace includes the ordered message history for the final generation attempt (system/user/assistant/tool calls/tool results), and may include model reasoning fields when the provider exposes them. + + + +Some models expose chain-of-thought reasoning separately from the main response via a `reasoning_content` field. To capture only this reasoning (without the full trace), set `extract_reasoning_content=True`: + +```python +dd.LLMTextColumnConfig( + name="answer", + model_alias="reasoning-model", + prompt="Solve this problem: {{ problem }}", + extract_reasoning_content=True, # Creates answer__reasoning_content column +) +``` + +This creates a `{column_name}__reasoning_content` column containing the stripped reasoning content from the final assistant response, or `None` if the model didn't provide reasoning. This is independent of `with_trace`—you can use either or both. + + + +LLM columns can invoke external tools during generation via MCP (Model Context Protocol). Enable tools by setting `tool_alias` to reference a configured `ToolConfig`: + +```python +dd.LLMTextColumnConfig( + name="answer", + model_alias="nvidia-text", + prompt="Search for information and answer: {{ question }}", + tool_alias="search-tools", # References a ToolConfig + with_trace=dd.TraceType.ALL_MESSAGES, # Capture tool call history +) +``` + +When `tool_alias` is set, the model can request tool calls during generation. Data Designer executes the tools via configured MCP providers and feeds results back until the model produces a final answer. See [Tool Use & MCP](/docs/concepts/tool-use-and-mcp) for full configuration details. + + + +LLM columns are parallelized within each batch using `max_parallel_requests` from your model's inference parameters. See the [Architecture & Performance](/docs/concepts/architecture-and-performance) guide for optimization strategies. + + +### 💻 LLM-Code Columns + +LLM-Code columns generate code in specific programming languages. They handle the prompting and parsing necessary to extract clean code from the LLM's response—automatically detecting and extracting code from markdown blocks. You provide the prompt and choose the model; the column handles the extraction. + +Supported languages: **Bash, C, C++, C#, COBOL, Go, Java, JavaScript, Kotlin, Python, Ruby, Rust, Scala, Swift, TypeScript**, plus **SQL** dialects (SQLite, PostgreSQL, MySQL, T-SQL, BigQuery, ANSI SQL). + +### 🗂️ LLM-Structured Columns + +LLM-Structured columns generate JSON with a *guaranteed schema*. Define your structure using a Pydantic model or JSON schema, and Data Designer ensures the LLM output conforms—no parsing errors, no schema drift. + +Use for complex nested structures: API responses, configuration files, database records with multiple related fields, or any structured data where type safety matters. Schemas can be arbitrarily complex with nested objects, arrays, enums, and validation constraints, but success depends on the model's capabilities. + + +Flat schemas with simple fields are easier and more robustly produced across models. Deeply nested schemas with complex validation constraints are more sensitive to model choice—stronger models handle complexity better. If you're experiencing schema conformance issues, try simplifying the schema or switching to a more capable model. + + +### ⚖️ LLM-Judge Columns + +LLM-Judge columns score generated content across multiple quality dimensions using LLMs as evaluators. + +Define scoring rubrics (relevance, accuracy, fluency, helpfulness) and the judge model evaluates each record. Score rubrics specify criteria and scoring options (1-5 scales, categorical grades, etc.), producing quantified quality metrics for every data point. + +Use judge columns for data quality filtering (e.g., keep only 4+ rated responses), A/B testing generation strategies, and quality monitoring over time. + +### 🖼️ Image Columns + +Image columns generate images from text prompts using either **diffusion** models (DALL·E, Stable Diffusion, Imagen) or **autoregressive** models (Gemini image, GPT image). + +Use **Jinja2 templating** in the prompt to reference other columns, driving diversity across generated images. For example, reference sampled attributes like style, subject, and composition to produce varied images without manually writing different prompts. + +Image columns require a model configured with `ImageInferenceParams`. Model-specific options (size, quality, aspect ratio) are passed via `extra_body` in the inference parameters. + +**Output modes:** + +- **Preview** (`data_designer.preview()`): Images are stored as base64-encoded strings directly in the DataFrame for quick iteration +- **Create** (`data_designer.create()`): Images are saved to disk in an `images//` folder with UUID filenames; the DataFrame stores relative paths + +Image columns also support `multi_modal_context` for autoregressive models that accept image inputs, enabling image-to-image generation workflows. + + +The image tutorials cover three workflows: [Providing Images as Context](/docs/tutorials/images-as-context) (image → text), Generating Images (text → image), and Editing Images with Image Context (image → image). + + +### 🧬 Embedding Columns + +Embedding columns generate vector embeddings (numerical representations) for text content using embedding models. These embeddings capture semantic meaning, enabling similarity search, clustering, and semantic analysis. + +Specify a `target_column` containing text, and Data Designer generates embeddings for that content. The target column can contain either a single text string or a list of text strings in stringified JSON format. In the latter case, embeddings are generated for each text string in the list. + +Common use cases: + +- **Semantic search**: Generate embeddings for documents, then find similar content by vector similarity +- **Clustering**: Group similar texts based on embedding proximity +- **Recommendation systems**: Match content by semantic similarity +- **Anomaly detection**: Identify outliers in embedding space + + +Embedding columns require an embedding model configured with `EmbeddingInferenceParams`. These models differ from chat completion models—they output vectors rather than text. The generation type is automatically determined by the inference parameters type. + + +### 🧩 Expression Columns + +Expression columns handle simple transformations using **Jinja2 templates**—concatenate first and last names, calculate numerical totals, format date strings. No LLM overhead needed. + +Template capabilities: + +- **Variable substitution**: Pull values from any existing column +- **String filters**: Uppercase, lowercase, strip whitespace, replace patterns +- **Conditional logic**: if/elif/else support +- **Arithmetic**: Add, subtract, multiply, divide + +### 🔍 Validation Columns + +Validation columns check generated content against rules and return structured pass/fail results. + +Built-in validation types: + +**Code validation** runs Python or SQL code through a linter to validate the code. + +**Local callable validation** accepts a Python function directly when using Data Designer as a library. + +**Remote validation** sends data to HTTP endpoints for validation-as-a-service. Useful for linters, security scanners, or proprietary systems. + +### 🌱 Seed Dataset Columns + +Seed dataset columns bootstrap generation from existing data. Provide a real dataset, and those columns become available as context for generating new synthetic data. + +Typical pattern: use seed data for one part of your schema (real product names and categories), then generate synthetic fields around it (customer reviews, purchase histories, ratings). The seed data provides realism and constraints; generated columns add volume and variation. + +### 🔧 Custom Columns + +Custom columns let you implement your own generation logic using Python functions. Use the `@custom_column_generator` decorator to declare dependencies, and the framework handles DAG ordering and parallelization. + +Two generation strategies: + +- **`cell_by_cell`** (default): Function receives one row, framework parallelizes +- **`full_column`**: Function receives entire DataFrame for vectorized operations + +For LLM access, declare `model_aliases` in the decorator and receive a `models` dict as the third argument. See [Custom Columns](/docs/concepts/custom-columns) for details. + +## Shared Column Properties + +Every column configuration inherits from `SingleColumnConfig` with these standard properties: + +### `name` + +The column's identifier—unique within your configuration, used in Jinja2 references, and becomes the column name in the output DataFrame. Choose descriptive names: `user_review` > `col_17`. + +### `drop` + +Boolean flag (default: `False`) controlling whether the column appears in final output. Setting `drop=True` generates the column (available as a dependency) but excludes it from final output. + +**When to drop columns:** + +- Intermediate calculations that feed expressions but aren't meaningful standalone +- Context columns used only for LLM prompt templates +- Validation results during development unwanted in production + +Dropped columns participate fully in generation and the dependency graph—just filtered out at the end. + +### `column_type` + +Literal string identifying the column type: `"sampler"`, `"llm-text"`, `"expression"`, etc. Set automatically by each configuration class and serves as Pydantic's discriminator for deserialization. + +You rarely set this manually—instantiating `LLMTextColumnConfig` automatically sets `column_type="llm-text"`. Serialization is reversible: save to YAML, load later, and Pydantic reconstructs the exact objects. + +### `required_columns` + +Computed property listing columns that must be generated before this one. The framework derives this automatically: + +- For LLM/Expression columns: extracted from Jinja2 template `{{ variables }}` +- For Validation columns: explicitly listed target columns +- For Sampler columns with conditional parameters: columns referenced in conditions + +You read this property for introspection but never set it—always computed from configuration details. + +### `side_effect_columns` + +Computed property listing columns created implicitly alongside the primary column. Currently, only LLM columns produce side effects: + +- `{name}__trace`: Created when `with_trace` is not `TraceType.NONE` on the column. +- `{name}__reasoning_content`: Created when `extract_reasoning_content=True` on the column. + +For detailed information on each column type, refer to the [column configuration API reference](/api/column-configs). diff --git a/fern/v0.5.0/pages/concepts/custom-columns.mdx b/fern/v0.5.0/pages/concepts/custom-columns.mdx new file mode 100644 index 00000000..54badd06 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/custom-columns.mdx @@ -0,0 +1,132 @@ +--- +title: Custom Columns +description: Implement your own generation logic using Python functions. +--- + +Custom columns let you implement your own generation logic using Python functions. Use them for multi-step LLM workflows, external API integration, or any scenario requiring full programmatic control. For reusable, distributable components, see [Plugins](/docs/plugins/overview) instead. + +## Quick Start + +```python +import data_designer.config as dd + +@dd.custom_column_generator(required_columns=["name"]) +def create_greeting(row: dict) -> dict: + row["greeting"] = f"Hello, {row['name']}!" + return row + +config_builder.add_column( + dd.CustomColumnConfig( + name="greeting", + generator_function=create_greeting, + ) +) +``` + +## Function Signatures + +Three signatures are supported. **Parameter names are validated**: + +| Args | Signature | Use Case | +|------|-----------|----------| +| 1 | `fn(row) -> dict` | Simple transforms | +| 2 | `fn(row, generator_params) -> dict` | With typed params | +| 3 | `fn(row, generator_params, models) -> dict` | LLM access via models dict | + +For `full_column` strategy, use `df` instead of `row`. + +For LLM access without params, use `generator_params: None`: + +```python +@dd.custom_column_generator(required_columns=["name"], model_aliases=["my-model"]) +def generate_message(row: dict, generator_params: None, models: dict) -> dict: + response, _ = models["my-model"].generate(prompt=f"Greet {row['name']}") + row["greeting"] = response + return row +``` + +Model aliases are validated before generation starts. If an alias doesn't exist in your config, an error is raised during the health check. + +## Generation Strategies + +| Strategy | Input | Use Case | +|----------|-------|----------| +| `cell_by_cell` (default) | `row: dict` | LLM calls, row-by-row logic | +| `full_column` | `df: DataFrame` | Vectorized DataFrame operations | + +**Recommendation:** Use `cell_by_cell` for LLM calls. The framework handles parallelization automatically. Use `full_column` only for vectorized operations that don't involve LLM calls. + +For `full_column`, set `generation_strategy=dd.GenerationStrategy.FULL_COLUMN`. + +## The Decorator + +```python +@dd.custom_column_generator( + required_columns=["col1"], # DAG ordering + side_effect_columns=["extra"], # Additional columns created + model_aliases=["model1"], # Required for LLM access +) +``` + +## Models Dict + +The third argument is a dict of `ModelFacade` instances, keyed by alias. **You must declare all models required in your custom column generator in `model_aliases`** - this populates the `models` dict and enables health checks before generation starts. + +```python +@dd.custom_column_generator(model_aliases=["my-model"]) +def my_generator(row: dict, generator_params: None, models: dict) -> dict: + model = models["my-model"] + response, trace = model.generate( + prompt="...", + parser=my_custom_parser, # optional, defaults to identity + system_prompt="...", + max_correction_steps=3, + ) + row["result"] = response + return row +``` + +This gives you direct access to all `ModelFacade` capabilities: custom parsers, correction loops, structured output, tool use, etc. + +## Configuration + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `name` | str | Yes | Column name | +| `generator_function` | Callable | Yes | Decorated function | +| `generation_strategy` | GenerationStrategy | No | `CELL_BY_CELL` or `FULL_COLUMN` | +| `generator_params` | BaseModel | No | Typed params passed to function | + +## Multi-Turn Example + +```python +@dd.custom_column_generator( + required_columns=["topic"], + side_effect_columns=["draft", "critique"], + model_aliases=["writer", "editor"], +) +def writer_editor(row: dict, generator_params: None, models: dict) -> dict: + draft, _ = models["writer"].generate(prompt=f"Write about '{row['topic']}'") + critique, _ = models["editor"].generate(prompt=f"Critique: {draft}") + revised, _ = models["writer"].generate(prompt=f"Revise based on: {critique}\n\nOriginal: {draft}") + + row["final_text"] = revised + row["draft"] = draft + row["critique"] = critique + return row +``` + +## Development Testing + +Test generators with real LLM calls without running the full pipeline: + +```python +data_designer = DataDesigner() +models = data_designer.get_models(["my-model"]) +result = my_generator({"name": "Alice"}, None, models) +``` + +## See Also + +- [Column Configs Reference](/api/column-configs) +- [Plugins Overview](/docs/plugins/overview) diff --git a/fern/v0.5.0/pages/concepts/deployment-options.mdx b/fern/v0.5.0/pages/concepts/deployment-options.mdx new file mode 100644 index 00000000..ec552444 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/deployment-options.mdx @@ -0,0 +1,171 @@ +--- +title: Deployment Options +description: Choose between the open-source library and NeMo Microservice deployment. +--- + +Data Designer is available as both an **open-source library** and a **NeMo Microservice**. This guide helps you choose the right deployment option for your use case. + +## Deployment Architectures at a Glance + +Data Designer supports three main deployment patterns: + + + + Each user runs the library locally and connects to their choice of LLM provider. + + ![Library with Decentralized Providers](/assets/images/deployment-library-decentralized.png) + + + Users run the library locally but share a centralized enterprise LLM gateway with RBAC and governance. + + ![Library with Enterprise Gateway](/assets/images/deployment-enterprise-gateway.png) + + + A centralized SDG service that multiple users access via REST API. + + ![SDG Microservice](/assets/images/deployment-microservice.png) + + + +## Quick Comparison + +| Aspect | Open-Source Library | NeMo Microservice | +|--------|---------------------|-------------------| +| **What it is** | Python package you import and run | REST API service exposing `preview` and `create` methods | +| **Best for** | Developers with LLM access who want flexibility and customization | Teams using NeMo Microservices platform | +| **LLM Access** | You provide (any OpenAI-compatible API) | Integrated with NeMo Microservices Platform | +| **Installation** | `pip install data-designer` | Deploy via NeMo Microservices platform | +| **Scaling** | You manage inference capacity | Managed alongside other NeMo services | + + +Both the library and microservice use the **same `DataDesignerConfigBuilder` API**. Start with the library, and your configurations migrate seamlessly if you later adopt the NeMo platform. + + +## When to Use the Open-Source Library + +The library is the right choice for most users. Choose it if you: + +### You Have Access to LLMs + +![Library with Decentralized Providers](/assets/images/deployment-library-decentralized.png) + +You have API keys or endpoints for LLM inference: + +- **Cloud APIs**: NVIDIA API Catalog (build.nvidia.com), OpenAI, Azure OpenAI, Anthropic +- **Self-hosted**: vLLM, TGI, TensorRT-LLM, or any OpenAI-compatible server +- **Enterprise gateways**: Centralized LLM gateway with RBAC, rate limiting, or other enterprise features + +```python +from data_designer.interface import DataDesigner +from data_designer.config import ModelConfig + +# Use any OpenAI-compatible endpoint +model = ModelConfig( + alias="my-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", # or "openai", or a custom ModelProvider +) + +dd = DataDesigner() +# Your code controls the full workflow +``` + +### You Need Maximum Flexibility + +- **Custom plugins**: Extend Data Designer with custom column generators, validators, or processors +- **Local development**: Rapid iteration with immediate feedback +- **Integration**: Embed Data Designer into existing Python pipelines or notebooks +- **Experimentation**: Research workflows with custom models or configurations + +### You Already Have Enterprise LLM Infrastructure + +![Library with Enterprise Gateway](/assets/images/deployment-enterprise-gateway.png) + + +Many enterprises already have centralized LLM access through API gateways with: + +- Role-based access control (RBAC) +- Rate limiting and quotas +- Audit logging +- Cost allocation + +In this case, **use the library** and point it at your enterprise gateway. You get enterprise-grade LLM access while retaining full control over your Data Designer workflows. + + +```python +from data_designer.config import ModelConfig, ModelProvider + +# Define your enterprise gateway as a provider +enterprise_provider = ModelProvider( + name="enterprise-gateway", + endpoint="https://llm-gateway.yourcompany.com/v1", + api_key="ENTERPRISE_LLM_KEY", # Environment variable name (uppercase) or actual key +) + +# Use the provider in your model config +model = ModelConfig( + alias="enterprise-llm", + model="gpt-4", + provider="enterprise-gateway", # References the provider above +) +``` + +## When to Use the Microservice + +![SDG Microservice](/assets/images/deployment-microservice.png) + +The NeMo Microservice exposes Data Designer's `preview` and `create` methods as REST API endpoints. Choose it if you: + +### You're Using the NeMo Microservices Platform + +The primary value of the microservice is **integration with other NeMo Microservices**: + +- **NeMo Inference Microservices (NIMs)**: Seamless integration with NVIDIA's optimized inference endpoints +- **NeMo Customizer**: Generate synthetic data for model fine-tuning workflows +- **NeMo Evaluator**: Create evaluation datasets alongside model assessment +- **Unified deployment**: Single platform for your entire AI pipeline + +### You Want to Expose SDG as a Team Service + +If you need to provide synthetic data generation as a shared service: + +- **Multi-tenant access**: Multiple teams submit generation jobs via API +- **Job management**: Queue, monitor, and manage generation jobs centrally +- **Resource sharing**: Shared infrastructure for SDG workloads + +## Decision Flowchart + +``` + ┌─────────────────────────┐ + │ Are you using the NeMo │ + │ Microservices platform? │ + └───────────┬─────────────┘ + │ + ┌───────────┴───────────┐ + ▼ ▼ + YES NO + │ │ + ▼ ▼ + ┌───────────────────┐ ┌───────────────────────────┐ + │ Use Microservice │ │ Do you need to expose SDG │ + │ │ │ as a shared REST service? │ + │ Integrates with │ └─────────────┬─────────────┘ + │ NIMs, Customizer, │ │ + │ Evaluator │ ┌───────────┴───────────┐ + └───────────────────┘ ▼ ▼ + YES NO + │ │ + ▼ ▼ + ┌─────────────────────┐ ┌─────────────────┐ + │ Consider if the │ │ Use the Library │ + │ overhead is worth │ │ │ + │ it vs. library + │ │ Most flexible │ + │ enterprise gateway │ │ option for │ + └─────────────────────┘ │ direct use │ + └─────────────────┘ +``` + +## Learn More + +- **Library**: Continue with this documentation +- **Microservice**: See the [NeMo Data Designer Microservice documentation](https://docs.nvidia.com/nemo/microservices/latest/design-synthetic-data-from-scratch-or-seeds/index.html) diff --git a/fern/v0.5.0/pages/concepts/mcp/configure-mcp-cli.mdx b/fern/v0.5.0/pages/concepts/mcp/configure-mcp-cli.mdx new file mode 100644 index 00000000..76160790 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/mcp/configure-mcp-cli.mdx @@ -0,0 +1,142 @@ +--- +title: Configuring MCP Using the CLI +description: Use the interactive CLI to create and manage MCP providers and tool configurations. +--- + +The Data Designer CLI provides an interactive interface for creating and managing MCP providers and tool configurations stored in your Data Designer home directory (default: `~/.data-designer/`). + +## Configuration Files + +The CLI manages two YAML configuration files for MCP: + +- **`mcp_providers.yaml`**: MCP provider configurations +- **`tool_configs.yaml`**: Tool configurations + + +You can customize the configuration directory location with the `DATA_DESIGNER_HOME` environment variable: + +```bash +export DATA_DESIGNER_HOME="/path/to/your/custom/directory" +``` + + +## CLI Commands + +The Data Designer CLI provides commands for MCP configuration: + +```bash +# Configure MCP providers +data-designer config mcp + +# Configure tool configs +data-designer config tools + +# List all configurations (including MCP) +data-designer config list +``` + + +See available commands: + +```bash +data-designer config --help +``` + + +## Configuring MCP Providers + +Run the interactive MCP provider configuration command: + +```bash +data-designer config mcp +``` + +### Provider Type Selection + +The wizard first asks you to choose a provider type: + +1. **Remote SSE**: Connect to a pre-existing MCP server via HTTP Server-Sent Events +2. **Local stdio subprocess**: Launch an MCP server as a subprocess + +### Remote SSE Configuration + +When configuring a Remote SSE provider, you'll be prompted for: + +- **Name**: Unique identifier (e.g., `"doc-search"`) +- **Endpoint**: SSE endpoint URL (e.g., `"http://localhost:8080/sse"`) +- **API Key**: Optional API key or environment variable name + +### Local Stdio Configuration + +When configuring a Local stdio provider, you'll be prompted for: + +- **Name**: Unique identifier (e.g., `"local-tools"`) +- **Command**: Executable to run (e.g., `"python"`) +- **Arguments**: Command-line arguments (e.g., `"-m my_mcp_server"`) +- **Environment Variables**: Optional environment variables for the subprocess + +### Available Operations + +- **Add a new provider**: Define a new MCP provider +- **Update an existing provider**: Modify provider settings +- **Delete a provider**: Remove a provider +- **Delete all providers**: Remove all MCP providers + +## Configuring Tool Configs + +Run the interactive tool configuration command: + +```bash +data-designer config tools +``` + + +You need at least one MCP provider configured before adding tool configs. Run `data-designer config mcp` first if none exist. + + +### Configuration Options + +When creating a tool config, you'll be prompted for: + +- **Tool Alias**: Unique name for referencing in columns (e.g., `"my-tools"`) +- **Providers**: Select one or more MCP providers (checkbox selection) +- **Allowed Tools**: Optionally restrict to specific tools (leave empty for all) +- **Max Tool Call Turns**: Maximum tool-calling iterations (default: 5) +- **Timeout**: Per-call timeout in seconds (default: 60.0) + +### Available Operations + +- **Add a new tool config**: Define a new tool configuration +- **Update an existing tool config**: Modify settings +- **Delete a tool config**: Remove a tool configuration +- **Delete all tool configs**: Remove all tool configurations + +## Listing Configurations + +View all current configurations: + +```bash +data-designer config list +``` + +This command displays: + +- **Model Providers**: All configured model providers +- **Model Configurations**: All configured models +- **MCP Providers**: All configured MCP providers with their endpoints +- **Tool Configurations**: All configured tool configs with their settings + +## Manual Editing + +You can also edit the YAML files directly for advanced configurations. The files are located at: + +- `~/.data-designer/mcp_providers.yaml` +- `~/.data-designer/tool_configs.yaml` + +After manual edits, the changes take effect the next time you initialize `DataDesigner`. + +## See Also + +- **[MCP Providers](/docs/concepts/mcp/mcp-providers)**: Learn about provider configuration options +- **[Tool Configurations](/docs/concepts/mcp/tool-configs)**: Learn about tool config options +- **[Configure Model Settings with the CLI](/docs/concepts/models/configure-with-cli)**: CLI guide for model configuration diff --git a/fern/v0.5.0/pages/concepts/mcp/enabling-tools.mdx b/fern/v0.5.0/pages/concepts/mcp/enabling-tools.mdx new file mode 100644 index 00000000..9fc682d6 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/mcp/enabling-tools.mdx @@ -0,0 +1,108 @@ +--- +title: Enabling Tools on Columns +description: Connect LLM columns to tool configurations using the tool_alias parameter. +--- + +This guide explains how to enable tool use on LLM columns by connecting them to tool configurations via the `tool_alias` parameter. + +## Overview + +To enable tool use on an LLM column, you reference a `ToolConfig` by its alias. During generation, the model can then request tool calls, and Data Designer executes them and feeds the results back to the model. + +## Using tool_alias + +Add the `tool_alias` parameter to any supported LLM column configuration: + +```python +import data_designer.config as dd + +builder.add_column( + dd.LLMTextColumnConfig( + name="answer", + prompt="Use tools as needed to answer: {{ question }}", + model_alias="nvidia-text", + tool_alias="my-tools", # References a ToolConfig + ) +) +``` + +## Supported Column Types + +Tool use is supported on these column configuration types: + +| Column Type | Description | +|------------|-------------| +| `LLMTextColumnConfig` | Text generation with tool access | +| `LLMCodeColumnConfig` | Code generation with tool access | +| `LLMStructuredColumnConfig` | Structured JSON generation with tool access | +| `LLMJudgeColumnConfig` | Judge/scoring with tool access | + +## How It Works + +When `tool_alias` is specified: + +1. **Tool schemas are fetched** from the referenced MCP providers +2. **Model receives tool schemas** with the prompt +3. **Model can request tool calls** in its response +4. **Data Designer executes calls** and returns results to the model +5. **Iteration continues** until the model produces a final answer (or limits are reached) + +## Complete Example + +Here's a complete workflow showing provider → ToolConfig → column: + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# 1. Configure MCP provider +mcp_provider = dd.LocalStdioMCPProvider( + name="demo-mcp", + command="python", + args=["-m", "my_mcp_server"], +) + +# 2. Create DataDesigner instance with provider +data_designer = DataDesigner(mcp_providers=[mcp_provider]) + +# 3. Define tool configuration +tool_config = dd.ToolConfig( + tool_alias="my-tools", + providers=["demo-mcp"], + allow_tools=["search_docs", "get_fact"], + max_tool_call_turns=5, +) + +# 4. Create config builder with tool config +builder = dd.DataDesignerConfigBuilder(tool_configs=[tool_config]) + +# 5. Add columns that use tools +builder.add_column( + dd.SamplerColumnConfig( + name="question", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["What is machine learning?", "Explain neural networks"] + ), + ) +) + +builder.add_column( + dd.LLMTextColumnConfig( + name="answer", + prompt="Use the available tools to research and answer: {{ question }}", + model_alias="nvidia-text", + tool_alias="my-tools", # Enable tools + with_trace=dd.TraceType.ALL_MESSAGES, # Capture tool call history + ) +) + +# 6. Generate data +results = data_designer.preview(builder, num_records=5) +``` + +## See Also + +- **[Tool Configurations](/docs/concepts/mcp/tool-configs)**: Configure tool access and limits +- **[Traces](/docs/concepts/traces)**: Capture and inspect tool call history +- **[MCP Providers](/docs/concepts/mcp/mcp-providers)**: Configure MCP server connections diff --git a/fern/v0.5.0/pages/concepts/mcp/mcp-providers.mdx b/fern/v0.5.0/pages/concepts/mcp/mcp-providers.mdx new file mode 100644 index 00000000..92970fe9 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/mcp/mcp-providers.mdx @@ -0,0 +1,147 @@ +--- +title: MCP Providers +description: Configure local subprocess or remote SSE connections to MCP tool servers. +--- + +MCP providers are external services that host and serve tools via the Model Context Protocol (MCP). Data Designer uses provider configurations to establish connections to these services. + +## Overview + +An MCP provider defines how Data Designer connects to a tool server. Data Designer supports two provider types: + +| Provider Class | Connection Method | Use Case | +|---------------|-------------------|----------| +| `MCPProvider` | HTTP Server-Sent Events | Connect to a pre-existing MCP server | +| `LocalStdioMCPProvider` | Subprocess via stdin/stdout | Launch an MCP server as a subprocess | + +When you create a `ToolConfig`, you reference providers by name, and Data Designer uses those provider settings to communicate with the appropriate MCP servers. + +## MCPProvider (Remote SSE) + +Use `MCPProvider` to connect to a pre-existing MCP server via Server-Sent Events: + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +mcp_provider = dd.MCPProvider( + name="remote-mcp", + endpoint="http://localhost:8080/sse", + api_key="MCP_API_KEY", # Environment variable name +) + +data_designer = DataDesigner(mcp_providers=[mcp_provider]) +``` + +### MCPProvider Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `name` | `str` | Yes | Unique identifier for the provider | +| `endpoint` | `str` | Yes | SSE endpoint URL (e.g., `"http://localhost:8080/sse"`) | +| `api_key` | `str` | No | API key or environment variable name | +| `provider_type` | `str` | No | Always `"sse"` (set automatically) | + +## LocalStdioMCPProvider (Subprocess) + +Use `LocalStdioMCPProvider` to launch an MCP server as a subprocess: + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +mcp_provider = dd.LocalStdioMCPProvider( + name="demo-mcp", + command="python", + args=["-m", "my_mcp_server_module"], + env={"MY_SERVICE_TOKEN": "..."}, +) + +data_designer = DataDesigner(mcp_providers=[mcp_provider]) +``` + +### LocalStdioMCPProvider Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `name` | `str` | Yes | Unique identifier for the provider | +| `command` | `str` | Yes | Executable to run (e.g., `"python"`, `"node"`) | +| `args` | `list[str]` | No | Command-line arguments | +| `env` | `dict[str, str]` | No | Environment variables for the subprocess | +| `provider_type` | `str` | No | Always `"stdio"` (set automatically) | + +## API Key Configuration + +The `api_key` field can be specified in two ways: + +1. **Environment variable name** (recommended): Set `api_key` to the name of an environment variable (e.g., `"MCP_API_KEY"`). Data Designer will resolve it at runtime. + +2. **Plain-text value**: Set `api_key` to the actual API key string. This is less secure and not recommended for production. + +```python +# Method 1: Environment variable (recommended) +provider = dd.MCPProvider( + name="secure-mcp", + endpoint="https://mcp.example.com/sse", + api_key="MCP_API_KEY", # Will be resolved from environment +) + +# Method 2: Direct value (not recommended) +provider = dd.MCPProvider( + name="secure-mcp", + endpoint="https://mcp.example.com/sse", + api_key="actual-api-key-value", +) +``` + +## YAML Configuration + +Both provider types use a `provider_type` discriminator field in YAML configurations. When writing YAML configs manually (e.g., in `~/.data-designer/mcp_providers.yaml`), include the discriminator: + +```yaml +providers: + # Remote SSE provider + - name: doc-search + provider_type: sse + endpoint: http://localhost:8080/sse + api_key: ${MCP_API_KEY} + + # Local stdio provider + - name: local-tools + provider_type: stdio + command: python + args: + - -m + - my_mcp_server + env: + DEBUG: "true" +``` + +## Using Multiple Providers + +You can configure multiple MCP providers and use them together in a single `ToolConfig`: + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +providers = [ + dd.MCPProvider( + name="doc-search-mcp", + endpoint="http://localhost:8080/sse", + ), + dd.LocalStdioMCPProvider( + name="calculator-mcp", + command="python", + args=["-m", "calculator_mcp"], + ), +] + +data_designer = DataDesigner(mcp_providers=providers) +``` + +## See Also + +- **[Tool Configurations](/docs/concepts/mcp/tool-configs)**: Configure tool access with ToolConfig +- **[Configure MCP with the CLI](/docs/concepts/mcp/configure-mcp-cli)**: Use the CLI to manage MCP providers +- **[Enabling Tools on Columns](/docs/concepts/mcp/enabling-tools)**: Use tools in LLM columns diff --git a/fern/v0.5.0/pages/concepts/mcp/safety-and-limits.mdx b/fern/v0.5.0/pages/concepts/mcp/safety-and-limits.mdx new file mode 100644 index 00000000..98df523d --- /dev/null +++ b/fern/v0.5.0/pages/concepts/mcp/safety-and-limits.mdx @@ -0,0 +1,148 @@ +--- +title: Safety and Limits +description: Configure tool allowlists, turn budgets, and timeouts for safe tool use. +--- + +This guide covers the safety controls available for tool use, including allowlists, turn budgets, and timeouts. These controls help prevent runaway loops and ensure predictable generation behavior. + +## Overview + +When LLM columns use tools, the model can make multiple tool calls in a loop until it produces a final answer. Without limits, this could lead to: + +- Excessive API calls and costs +- Long generation times +- Infinite loops if the model keeps requesting tools + +Data Designer provides three types of controls: + +| Control | Purpose | +|---------|---------| +| **Tool allowlists** | Restrict which tools can be called | +| **Turn budgets** | Limit iterations of tool-calling | +| **Timeouts** | Cap individual tool call latency | + +## Tool Allowlists + +Restrict which tools are available using `allow_tools`: + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="restricted-tools", + providers=["demo-mcp"], + allow_tools=["search_docs", "list_docs"], # Only these tools +) +``` + +### Behavior + +| Setting | Behavior | +|---------|----------| +| `allow_tools=None` (default) | All tools from the providers are available | +| `allow_tools=["tool1", "tool2"]` | Only the specified tools are available | + +Tools not in the allowlist won't be included in the schemas sent to the model, so the model won't know they exist. + + +If your MCP providers expose tools that could be dangerous or expensive, use allowlists to restrict access to only the tools you need. + + +## Turn Budgets + +Limit the number of tool-calling iterations using `max_tool_call_turns`: + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="limited-tools", + providers=["demo-mcp"], + max_tool_call_turns=5, # Maximum 5 iterations (default) +) +``` + +### Understanding Turns + +A **turn** is one iteration where the LLM requests tool calls. With parallel tool calling, a single turn may execute multiple tools simultaneously. + +| Scenario | Turn Count | +|----------|------------| +| Model requests 1 tool | 1 turn | +| Model requests 3 tools in parallel | 1 turn | +| Model requests 1 tool, then 2 more, then 1 more | 3 turns | + +This approach gives models flexibility to use parallel calling efficiently while still bounding total iterations. + +### Graceful Budget Exhaustion + +When the turn limit is reached, Data Designer doesn't abruptly stop generation. Instead: + +1. The model's tool call request is recorded in the conversation +2. Tool "results" are returned with a refusal message explaining the limit was reached +3. The model receives this feedback and can produce a final response + +This ensures the model can still provide a useful answer based on the tools it already called, rather than failing silently. + +## Timeouts + +Limit how long each tool call can take using `timeout_sec`: + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="fast-tools", + providers=["demo-mcp"], + timeout_sec=30.0, # 30 seconds per tool call +) +``` + +### Timeout Behavior + +When a timeout occurs: + +1. The tool call is terminated +2. An error message is returned to the model +3. The model can attempt recovery (retry, skip, or answer without the result) + +```python +# Example error in trace when timeout occurs +{ + "role": "tool", + "content": "Error: Tool 'search_docs' failed: Connection timeout after 30s", + "tool_call_id": "call_abc123" +} +``` + +### Default Timeout + +The default timeout is 60 seconds. Adjust based on your tools: + +| Tool Type | Recommended Timeout | +|-----------|---------------------| +| Fast lookups | 5-10 seconds | +| Database queries | 15-30 seconds | +| External API calls | 30-60 seconds | +| Complex computations | 60+ seconds | + +## Combining Controls + +You can use all controls together for defense in depth: + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="secure-tools", + providers=["demo-mcp"], + allow_tools=["search_docs", "get_fact"], # Restricted tools + max_tool_call_turns=3, # Limited iterations + timeout_sec=15.0, # Fast timeout +) +``` + +## See Also + +- **[Tool Configurations](/docs/concepts/mcp/tool-configs)**: Complete ToolConfig reference +- **[Traces](/docs/concepts/traces)**: Monitor tool usage patterns diff --git a/fern/v0.5.0/pages/concepts/mcp/tool-configs.mdx b/fern/v0.5.0/pages/concepts/mcp/tool-configs.mdx new file mode 100644 index 00000000..6d9a477e --- /dev/null +++ b/fern/v0.5.0/pages/concepts/mcp/tool-configs.mdx @@ -0,0 +1,116 @@ +--- +title: Tool Configurations +description: Define how LLM columns access external tools during generation with ToolConfig. +--- + +Tool configurations define how LLM columns access external tools during generation. Each `ToolConfig` specifies which MCP providers to use, which tools are allowed, and operational limits. + +## Overview + +A `ToolConfig` connects LLM columns to MCP providers. When you create column configurations (like `LLMTextColumnConfig` or `LLMCodeColumnConfig`), you reference a tool configuration by its alias. Data Designer uses the tool configuration to determine which tools are available and how to manage tool calls. + +## ToolConfig Structure + +The `ToolConfig` class has the following fields: + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `tool_alias` | `str` | Yes | Unique identifier for this tool configuration (referenced by columns) | +| `providers` | `list[str]` | Yes | List of MCP provider names to use (can reference multiple providers) | +| `allow_tools` | `list[str]` | No | Restrict to specific tools (`None` = allow all tools from providers) | +| `max_tool_call_turns` | `int` | No | Maximum tool-calling iterations (default: 5) | +| `timeout_sec` | `float` | No | Per-call timeout in seconds (default: 60.0) | + +## Examples + +### Basic Tool Configuration + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="my-tools", + providers=["demo-mcp"], +) +``` + +### Restricting Allowed Tools + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="search-only", + providers=["demo-mcp"], + allow_tools=["search_docs", "list_docs"], # Only these tools allowed +) +``` + +### Using Multiple Providers + +A single `ToolConfig` can reference multiple MCP providers, allowing tools to be drawn from different sources: + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="multi-search", + providers=["doc-search-mcp", "web-search-mcp"], + allow_tools=["search_docs", "search_web", "list_docs"], + max_tool_call_turns=10, +) +``` + +When the model requests a tool call, Data Designer automatically finds which provider hosts that tool and routes the call appropriately. + +### Setting Operational Limits + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="limited-tools", + providers=["demo-mcp"], + max_tool_call_turns=3, # Maximum 3 tool-calling iterations + timeout_sec=30.0, # 30 seconds per tool call +) +``` + +## Adding to Config Builder + +Tool configurations can be added to the config builder in two ways: + +```python +import data_designer.config as dd + +tool_config = dd.ToolConfig( + tool_alias="my-tools", + providers=["demo-mcp"], +) + +# Method 1: Pass at initialization +builder = dd.DataDesignerConfigBuilder(tool_configs=[tool_config]) + +# Method 2: Add later +builder = dd.DataDesignerConfigBuilder() +builder.add_tool_config(tool_config) +``` + +## Understanding Turn-Based Limiting + +The `max_tool_call_turns` parameter limits how many tool-calling iterations (turns) are permitted, not the total number of individual tool calls. + + +A **turn** is one iteration where the LLM requests tool calls. With parallel tool calling, a single turn may execute multiple tools simultaneously. + +For example, if the model requests 3 tools in parallel, that counts as 1 turn, not 3. This gives models flexibility to use parallel calling efficiently while still bounding total iterations. + + +When the turn limit is reached, Data Designer gracefully refuses additional tool calls rather than failing abruptly. The model receives feedback explaining the limit was reached and can produce a final response based on the tools it already called. + +## See Also + +- **[MCP Providers](/docs/concepts/mcp/mcp-providers)**: Configure connections to MCP servers +- **[Enabling Tools on Columns](/docs/concepts/mcp/enabling-tools)**: Reference tool configs from LLM columns +- **[Safety and Limits](/docs/concepts/mcp/safety-and-limits)**: Detailed guide on tool safety controls +- **[Configure MCP with the CLI](/docs/concepts/mcp/configure-mcp-cli)**: Use the CLI to manage tool configurations diff --git a/fern/v0.5.0/pages/concepts/models/configure-with-cli.mdx b/fern/v0.5.0/pages/concepts/models/configure-with-cli.mdx new file mode 100644 index 00000000..90b58bf1 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/models/configure-with-cli.mdx @@ -0,0 +1,148 @@ +--- +title: Configuring Model Settings Using The CLI +description: Use the Data Designer CLI to manage model providers and configurations. +--- + +The Data Designer CLI provides an interactive interface for creating and managing default model providers and model configurations stored in your Data Designer home directory (default: `~/.data-designer/`). + +## Configuration Files + +The CLI manages two YAML configuration files: + +- **`model_providers.yaml`**: Model provider configurations +- **`model_configs.yaml`**: Model configurations + + +If these configuration files don't already exist, the Data Designer library automatically creates them with default settings at runtime when first initialized. + + + +You can customize the configuration directory location with the `DATA_DESIGNER_HOME` environment variable: + +```bash +export DATA_DESIGNER_HOME="/path/to/your/custom/directory" +``` + + +## CLI Commands + +The Data Designer CLI provides four main configuration commands: + +```bash +# Configure model providers +data-designer config providers + +# Configure models +data-designer config models + +# List current configurations +data-designer config list + +# Reset all configurations +data-designer config reset +``` + + +See available commands + +```bash +data-designer --help +``` + +See available sub-commands + +```bash +data-designer config --help +``` + + +## Managing Model Providers + +Run the interactive provider configuration command: + +```bash +data-designer config providers +``` + +### Available Operations + +**Add a new provider**: Define a new provider by entering its name, endpoint URL, provider type, and optionally an API key (as plain text or as an environment variable name). + +**Update an existing provider**: Modify an existing provider's settings. All fields are pre-filled with current values. + +**Delete a provider**: Remove a provider and its associated models. + +**Delete all providers**: Remove all providers and their associated models. + +**Change default provider**: Set which provider is used by default. This option is only available when multiple providers are configured. + +## Managing Model Configurations + +Run the interactive model configuration command: + +```bash +data-designer config models +``` + + +You need at least one provider configured before adding models. Run `data-designer config providers` first if none exist. + + +### Available Operations + +**Add a new model configuration** + +Create a new model configuration with the following fields: + +- **Alias**: A unique name for referencing this model in a column configuration. +- **Model ID**: The model identifier (e.g., `nvidia/nemotron-3-nano-30b-a3b`) +- **Provider**: Select from available providers (if multiple exist) +- **Temperature**: Sampling temperature (0.0 to 2.0) +- **Top P**: Nucleus sampling parameter (0.0 to 1.0) +- **Max Tokens**: Maximum output length (1 to 100000) + + +To configure additional inference parameter settings or use distribution-based inference parameters, edit the `model_configs.yaml` file directly. + + +**Update an existing model configuration**: Modify an existing model's configuration. All fields are pre-filled with current values. + +**Delete a model configuration**: Remove a single model configuration. + +**Delete all model configurations**: Remove all model configurations. The CLI will ask for confirmation before proceeding. + +## Listing Configurations + +View all current configurations: + +```bash +data-designer config list +``` + +This command displays: + +- **Model Providers**: All configured providers with their endpoints (API keys are masked) +- **Default Provider**: The currently selected default provider +- **Model Configurations**: All configured models with their settings + +## Resetting Configurations + +Delete all configuration files: + +```bash +data-designer config reset +``` + +The CLI will show which configuration files exist and ask for confirmation before deleting them. + + +This command permanently deletes all configuration files and resets to the default model providers and configurations. You'll need to reconfigure your custom configurations from scratch. + + +## See Also + +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured providers and model settings included with Data Designer +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Model Providers](/docs/concepts/models/model-providers)**: Learn about the `ModelProvider` class and provider configuration +- **[Model Configurations](/docs/concepts/models/model-configs)**: Learn about `ModelConfig` +- **[Quick Start Guide](/docs/quick-start)**: Get started with a simple example diff --git a/fern/v0.5.0/pages/concepts/models/custom-model-settings.mdx b/fern/v0.5.0/pages/concepts/models/custom-model-settings.mdx new file mode 100644 index 00000000..cf05630b --- /dev/null +++ b/fern/v0.5.0/pages/concepts/models/custom-model-settings.mdx @@ -0,0 +1,220 @@ +--- +title: Custom Model Settings +description: Create custom providers and model configurations for Data Designer. +--- + +While Data Designer ships with pre-configured model providers and configurations, you can create custom configurations to use different models, adjust inference parameters, or connect to custom API endpoints. + +## When to Use Custom Settings + +Use custom model settings when you need to: + +- Use models not included in the defaults +- Adjust inference parameters (temperature, top_p, max_tokens) for specific use cases +- Add distribution-based inference parameters for variability +- Connect to self-hosted or custom model endpoints +- Create multiple variants of the same model with different settings + +## Creating and Using Custom Settings + +### Custom Models with Default Providers + +Create custom model configurations that use the default providers (no need to define providers yourself): + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Create custom models using default providers +custom_models = [ + # High-temperature for more variability + dd.ModelConfig( + alias="creative-writer", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", # Uses default NVIDIA provider + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=1.2, + top_p=0.98, + max_tokens=4096, + ), + ), + # Low-temperature for less variability + dd.ModelConfig( + alias="fact-checker", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", # Uses default NVIDIA provider + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=0.1, + top_p=0.9, + max_tokens=2048, + ), + ), +] + +# Create DataDesigner (uses default providers) +data_designer = DataDesigner() + +# Pass custom models to config builder +config_builder = dd.DataDesignerConfigBuilder(model_configs=custom_models) + +# Add a topic column using a categorical sampler +config_builder.add_column( + dd.SamplerColumnConfig( + name="topic", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["Artificial Intelligence", "Space Exploration", "Ancient History", "Climate Science"], + ), + ) +) + +# Use your custom models +config_builder.add_column( + dd.LLMTextColumnConfig( + name="creative_story", + model_alias="creative-writer", + prompt="Write a creative short story about {{topic}}.", + ) +) + +config_builder.add_column( + dd.LLMTextColumnConfig( + name="facts", + model_alias="fact-checker", + prompt="List 3 facts about {{topic}}.", + ) +) + +# Preview your dataset +preview_result = data_designer.preview(config_builder=config_builder) +preview_result.display_sample_record() +``` + + +When you only specify `model_configs`, the default model providers (NVIDIA, OpenAI, and OpenRouter) are still available. You only need to create custom providers if you want to connect to different endpoints or modify provider settings. + + + +When you provide custom `model_configs` to `DataDesignerConfigBuilder`, they **replace** the defaults entirely. To use custom model configs in addition to the default configs, use the add_model_config method: + +```python +import data_designer.config as dd + +# Load defaults first +config_builder = dd.DataDesignerConfigBuilder() + +# Add custom model to defaults +config_builder.add_model_config( + dd.ModelConfig( + alias="my-custom-model", + model="nvidia/llama-3.3-nemotron-super-49b-v1.5", + provider="nvidia", # Uses default provider + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=0.6, + max_tokens=8192, + ), + ) +) + +# Now you can use both default and custom models +# Default: nvidia-text, nvidia-reasoning, nvidia-vision, etc. +# Custom: my-custom-model +``` + + +### Custom Providers with Custom Models + +Define both custom providers and custom model configurations when you need to connect to services not included in the defaults: + + +The custom provider endpoints must be reachable from where Data Designer runs. Ensure network connectivity, firewall rules, and any VPN requirements are properly configured. + + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Step 1: Define custom providers +custom_providers = [ + dd.ModelProvider( + name="my-custom-provider", + endpoint="https://api.my-llm-service.com/v1", + provider_type="openai", # OpenAI-compatible API + api_key="MY_SERVICE_API_KEY", # Environment variable name + ), + dd.ModelProvider( + name="my-self-hosted-provider", + endpoint="https://my-org.internal.com/llm/v1", + provider_type="openai", + api_key="SELF_HOSTED_API_KEY", + ), +] + +# Step 2: Define custom models +custom_models = [ + dd.ModelConfig( + alias="my-text-model", + model="openai/some-model-id", + provider="my-custom-provider", # References provider by name + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=0.85, + top_p=0.95, + max_tokens=2048, + ), + ), + dd.ModelConfig( + alias="my-self-hosted-text-model", + model="openai/some-hosted-model-id", + provider="my-self-hosted-provider", + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=0.7, + top_p=0.9, + max_tokens=1024, + ), + ), +] + +# Step 3: Create DataDesigner with custom providers +data_designer = DataDesigner(model_providers=custom_providers) + +# Step 4: Create config builder with custom models +config_builder = dd.DataDesignerConfigBuilder(model_configs=custom_models) + +# Step 5: Add a topic column using a categorical sampler +config_builder.add_column( + dd.SamplerColumnConfig( + name="topic", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["Technology", "Healthcare", "Finance", "Education"], + ), + ) +) + +# Step 6: Use your custom model by referencing its alias +config_builder.add_column( + dd.LLMTextColumnConfig( + name="short_news_article", + model_alias="my-text-model", # Reference custom alias + prompt="Write a short news article about the '{{topic}}' topic in 10 sentences.", + ) +) + +config_builder.add_column( + dd.LLMTextColumnConfig( + name="long_news_article", + model_alias="my-self-hosted-text-model", # Reference custom alias + prompt="Write a detailed news article about the '{{topic}}' topic.", + ) +) + +# Step 7: Preview your dataset +preview_result = data_designer.preview(config_builder=config_builder) +preview_result.display_sample_record() +``` + +## See Also + +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured providers and model settings +- **[Configure Model Settings With the CLI](/docs/concepts/models/configure-with-cli)**: CLI-based configuration +- **[Quick Start Guide](/docs/quick-start)**: Basic usage example diff --git a/fern/v0.5.0/pages/concepts/models/default-model-settings.mdx b/fern/v0.5.0/pages/concepts/models/default-model-settings.mdx new file mode 100644 index 00000000..cedcc521 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/models/default-model-settings.mdx @@ -0,0 +1,130 @@ +--- +title: Default Model Settings +description: Pre-configured model providers and configurations included with Data Designer. +--- + +Data Designer ships with pre-configured model providers and model configurations that make it easy to start generating synthetic data without manual setup. + +## Model Providers + +Data Designer includes a few default model providers that are configured automatically: + +### NVIDIA Provider (`nvidia`) + +- **Endpoint**: `https://integrate.api.nvidia.com/v1` +- **API Key**: Set via `NVIDIA_API_KEY` environment variable +- **Models**: Access to NVIDIA's hosted models from [build.nvidia.com](https://build.nvidia.com) +- **Getting Started**: Sign up and get your API key at [build.nvidia.com](https://build.nvidia.com) + +The NVIDIA provider gives you access to state-of-the-art models including Nemotron and other NVIDIA-optimized models. + +### OpenAI Provider (`openai`) + +- **Endpoint**: `https://api.openai.com/v1` +- **API Key**: Set via `OPENAI_API_KEY` environment variable +- **Models**: Access to OpenAI's model catalog +- **Getting Started**: Get your API key from [platform.openai.com/api-keys](https://platform.openai.com/api-keys) + +The OpenAI provider gives you access to GPT models and other OpenAI offerings. + +### OpenRouter Provider (`openrouter`) + +- **Endpoint**: `https://openrouter.ai/api/v1` +- **API Key**: Set via `OPENROUTER_API_KEY` environment variable +- **Models**: Access to a wide variety of models through OpenRouter's unified API +- **Getting Started**: Get your API key from [openrouter.ai](https://openrouter.ai) + +The OpenRouter provider gives you access to a unified interface for many different language models from various providers. + +## Model Configurations + +Data Designer provides pre-configured model aliases for common use cases. When you create a `DataDesignerConfigBuilder` without specifying `model_configs`, these default configurations are automatically available. + +### NVIDIA Models + +The following model configurations are automatically available when `NVIDIA_API_KEY` is set: + +| Alias | Model | Use Case | Inference Parameters | +|-------|-------|----------|---------------------| +| `nvidia-text` | `nvidia/nemotron-3-nano-30b-a3b` | General text generation | `temperature=1.0, top_p=1.0` | +| `nvidia-reasoning` | `openai/gpt-oss-20b` | Reasoning and analysis tasks | `temperature=0.35, top_p=0.95` | +| `nvidia-vision` | `nvidia/nemotron-nano-12b-v2-vl` | Vision and image understanding | `temperature=0.85, top_p=0.95` | +| `nvidia-embedding` | `nvidia/llama-3.2-nv-embedqa-1b-v2` | Text embeddings | `encoding_format="float", extra_body={"input_type": "query"}` | + + +### OpenAI Models + +The following model configurations are automatically available when `OPENAI_API_KEY` is set: + +| Alias | Model | Use Case | Inference Parameters | +|-------|-------|----------|---------------------| +| `openai-text` | `gpt-4.1` | General text generation | `temperature=0.85, top_p=0.95` | +| `openai-reasoning` | `gpt-5` | Reasoning and analysis tasks | `temperature=0.35, top_p=0.95` | +| `openai-vision` | `gpt-5` | Vision and image understanding | `temperature=0.85, top_p=0.95` | +| `openai-embedding` | `text-embedding-3-large` | Text embeddings | `encoding_format="float"` | + +### OpenRouter Models + +The following model configurations are automatically available when `OPENROUTER_API_KEY` is set: + +| Alias | Model | Use Case | Inference Parameters | +|-------|-------|----------|---------------------| +| `openrouter-text` | `nvidia/nemotron-3-nano-30b-a3b` | General text generation | `temperature=1.0, top_p=1.0` | +| `openrouter-reasoning` | `openai/gpt-oss-20b` | Reasoning and analysis tasks | `temperature=0.35, top_p=0.95` | +| `openrouter-vision` | `nvidia/nemotron-nano-12b-v2-vl` | Vision and image understanding | `temperature=0.85, top_p=0.95` | +| `openrouter-embedding` | `openai/text-embedding-3-large` | Text embeddings | `encoding_format="float"` | + + +## Using Default Settings + +Default settings work out of the box - no configuration needed! Simply create `DataDesigner` and `DataDesignerConfigBuilder` instances without any arguments, and reference the default model aliases in your column configurations. + +For a complete example showing how to use default model settings, see the **[Quick Start Guide](/docs/quick-start)**. + +### How Default Model Providers and Configurations Work + +When the Data Designer library or the CLI is initialized, default model configurations and providers are stored in the Data Designer home directory for easy access and customization if they do not already exist. These configuration files serve as the single source of truth for model settings. By default they are saved to the following paths: + +- **Model Configs**: `~/.data-designer/model_configs.yaml` +- **Model Providers**: `~/.data-designer/model_providers.yaml` + + +While these files provide a convenient way to specify settings for your model providers and configuration you use most often, they can always be set programmatically in your SDG workflow. + + +You can customize the home directory location by setting the `DATA_DESIGNER_HOME` environment variable: + +```bash +# In your .bashrc, .zshrc, or similar +export DATA_DESIGNER_HOME="/path/to/your/custom/directory" +``` + +These configuration files can be modified in two ways: + +1. **Using the CLI**: Run CLI commands to add, update, or delete model configurations and providers +2. **Manual editing**: Directly edit the YAML files with your preferred text editor + +Both methods operate on the same files, ensuring consistency across your entire Data Designer setup. + +## Important Notes + + +While default model configurations are always available, you need to set the appropriate API key environment variable (`NVIDIA_API_KEY`, `OPENAI_API_KEY`, or `OPENROUTER_API_KEY`) to actually use the corresponding models for data generation. Without a valid API key, any attempt to generate data using that provider's models will fail. + + + +Store your API keys in environment variables rather than hardcoding them in your scripts: + +```bash +# In your .bashrc, .zshrc, or similar +export NVIDIA_API_KEY="your-api-key-here" +export OPENAI_API_KEY="your-openai-api-key-here" +export OPENROUTER_API_KEY="your-openrouter-api-key-here" +``` + + +## See Also + +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Configure Model Settings With the CLI](/docs/concepts/models/configure-with-cli)**: Learn how to use the CLI to manage model settings +- **[Model Configurations](/docs/concepts/models/model-configs)**: Learn about model configurations diff --git a/fern/v0.5.0/pages/concepts/models/inference-parameters.mdx b/fern/v0.5.0/pages/concepts/models/inference-parameters.mdx new file mode 100644 index 00000000..b139fd8b --- /dev/null +++ b/fern/v0.5.0/pages/concepts/models/inference-parameters.mdx @@ -0,0 +1,190 @@ +--- +title: Inference Parameters +description: Control model behavior during synthetic data generation. +--- + +Inference parameters control how models generate responses during synthetic data generation. Data Designer provides three types of inference parameters: `ChatCompletionInferenceParams` for text/code/structured generation, `EmbeddingInferenceParams` for embedding generation, and `ImageInferenceParams` for image generation. + +## Overview + +When you create a `ModelConfig`, you can specify inference parameters to adjust model behavior. These parameters control aspects like randomness (temperature), diversity (top_p), context size (max_tokens), and more. Data Designer supports both static values and dynamic distribution-based sampling for certain parameters. + +## Chat Completion Inference Parameters + +The `ChatCompletionInferenceParams` class controls how models generate text completions (for text, code, and structured data generation). It provides fine-grained control over generation behavior and supports both static values and dynamic distribution-based sampling. + +### Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `temperature` | `float` or `Distribution` | No | Controls randomness in generation (0.0 to 2.0). Higher values = more creative/random | +| `top_p` | `float` or `Distribution` | No | Nucleus sampling parameter (0.0 to 1.0). Controls diversity by filtering low-probability tokens | +| `max_tokens` | `int` | No | Maximum number of tokens to generate in the response (≥ 1) | +| `max_parallel_requests` | `int` | No | Maximum concurrent API requests to this model (default: 4, ≥ 1). See [Concurrency Control](#concurrency-control) below. | +| `timeout` | `int` | No | API request timeout in seconds (≥ 1) | +| `extra_body` | `dict[str, Any]` | No | Additional parameters to include in the API request body | + + +If `temperature`, `top_p`, or `max_tokens` are not provided, the model provider's default values will be used. Different providers and models may have different defaults. + + + +For gpt-oss models like `gpt-oss-20b` and `gpt-oss-120b`, you can control the reasoning effort using the `extra_body` parameter: + +```python +import data_designer.config as dd + +# High reasoning effort (more thorough, slower) +inference_parameters = dd.ChatCompletionInferenceParams( + extra_body={"reasoning_effort": "high"} +) + +# Medium reasoning effort (balanced) +inference_parameters = dd.ChatCompletionInferenceParams( + extra_body={"reasoning_effort": "medium"} +) + +# Low reasoning effort (faster, less thorough) +inference_parameters = dd.ChatCompletionInferenceParams( + extra_body={"reasoning_effort": "low"} +) +``` + + +### Temperature and Top P Guidelines + +- **Temperature**: + - `0.0-0.3`: Highly deterministic, focused outputs (ideal for structured/reasoning tasks) + - `0.4-0.7`: Balanced creativity and coherence (general purpose) + - `0.8-1.0`: Creative, diverse outputs (ideal for creative writing) + - `1.0+`: Highly random and experimental + +- **Top P**: + - `0.1-0.5`: Very focused, only most likely tokens + - `0.6-0.9`: Balanced diversity + - `0.95-1.0`: Maximum diversity, including less likely tokens + + +When tuning both parameters simultaneously, consider these combinations: + +- **For deterministic/structured outputs**: Low temperature (`0.0-0.3`) + moderate-to-high top_p (`0.8-0.95`) + - The low temperature ensures focus, while top_p allows some token diversity +- **For balanced generation**: Moderate temperature (`0.5-0.7`) + high top_p (`0.9-0.95`) + - This is a good starting point for most use cases +- **For creative outputs**: Higher temperature (`0.8-1.0`) + high top_p (`0.95-1.0`) + - Both parameters work together to maximize diversity + +**Avoid**: Setting both very low (overly restrictive) or adjusting both dramatically at once. When experimenting, adjust one parameter at a time to understand its individual effect. + + +## Distribution-Based Inference Parameters + +For `temperature` and `top_p` in `ChatCompletionInferenceParams`, you can specify distributions instead of fixed values. This allows Data Designer to sample different values for each generation request, introducing controlled variability into your synthetic data. + +### Uniform Distribution + +Samples values uniformly between a low and high bound: + +```python +import data_designer.config as dd + +inference_params = dd.ChatCompletionInferenceParams( + temperature=dd.UniformDistribution( + params=dd.UniformDistributionParams(low=0.7, high=1.0) + ), +) +``` + +### Manual Distribution + +Samples from a discrete set of values with optional weights: + +```python +import data_designer.config as dd + +# Equal probability for each value +inference_params = dd.ChatCompletionInferenceParams( + temperature=dd.ManualDistribution( + params=dd.ManualDistributionParams(values=[0.5, 0.7, 0.9]) + ), +) + +# Weighted probabilities (normalized automatically) +inference_params = dd.ChatCompletionInferenceParams( + top_p=dd.ManualDistribution( + params=dd.ManualDistributionParams( + values=[0.8, 0.9, 0.95], + weights=[0.2, 0.5, 0.3] # 20%, 50%, 30% probability + ) + ), +) +``` + +## Concurrency Control + +The `max_parallel_requests` parameter controls how many concurrent API calls Data Designer makes to a specific model. This directly impacts throughput and should be tuned to match your inference server's capacity. + + +For recommended values by deployment type (NVIDIA API Catalog, vLLM, OpenAI, NIMs) and detailed optimization strategies, see the [Architecture & Performance](/docs/concepts/architecture-and-performance) guide. + + +## Embedding Inference Parameters + +The `EmbeddingInferenceParams` class controls how models generate embeddings. This is used when working with embedding models for tasks like semantic search or similarity analysis. + +### Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `encoding_format` | `Literal["float", "base64"]` | No | Format of the embedding encoding (default: "float") | +| `dimensions` | `int` | No | Number of dimensions for the embedding | +| `max_parallel_requests` | `int` | No | Maximum concurrent API requests (default: 4, ≥ 1) | +| `timeout` | `int` | No | API request timeout in seconds (≥ 1) | +| `extra_body` | `dict[str, Any]` | No | Additional parameters to include in the API request body | + + +## Image Inference Parameters + +The `ImageInferenceParams` class is used for image generation models, including both diffusion models (DALL·E, Stable Diffusion, Imagen) and autoregressive models (Gemini image, GPT image). Unlike text models, image-specific options are passed entirely via `extra_body`, since they vary significantly between providers. + +### Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `max_parallel_requests` | `int` | No | Maximum concurrent API requests (default: 4, ≥ 1) | +| `timeout` | `int` | No | API request timeout in seconds (≥ 1) | +| `extra_body` | `dict[str, Any]` | No | Model-specific image options (size, quality, aspect ratio, etc.) | + +### Examples + +```python +import data_designer.config as dd + +# Autoregressive model (chat completions API, supports image context) +dd.ModelConfig( + alias="image-model", + model="black-forest-labs/flux.2-pro", + provider="openrouter", + inference_parameters=dd.ImageInferenceParams( + extra_body={"height": 512, "width": 512} + ), +) + +# Diffusion model (e.g., DALL·E, Stable Diffusion) +dd.ModelConfig( + alias="dalle", + model="dall-e-3", + inference_parameters=dd.ImageInferenceParams( + extra_body={"size": "1024x1024", "quality": "hd"} + ), +) +``` + + +## See Also + +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured model settings included with Data Designer +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Model Configurations](/docs/concepts/models/model-configs)**: Learn about configuring model settings +- **[Model Providers](/docs/concepts/models/model-providers)**: Learn about configuring model providers +- **[Architecture & Performance](/docs/concepts/architecture-and-performance)**: Understanding separation of concerns and optimizing concurrency diff --git a/fern/v0.5.0/pages/concepts/models/model-configs.mdx b/fern/v0.5.0/pages/concepts/models/model-configs.mdx new file mode 100644 index 00000000..22b3878b --- /dev/null +++ b/fern/v0.5.0/pages/concepts/models/model-configs.mdx @@ -0,0 +1,157 @@ +--- +title: Model Configurations +description: Configure model settings for synthetic data generation. +--- + +Model configurations define the specific models you use for synthetic data generation and their associated inference parameters. Each `ModelConfig` represents a named model that can be referenced throughout your data generation workflows. + +## Overview + +A `ModelConfig` specifies which LLM model to use and how it should behave during generation. When you create column configurations (like `LLMText`, `LLMCode`, or `LLMStructured`), you reference a model by its alias. Data Designer uses the model configuration to determine which model to call and with what parameters. + +## ModelConfig Structure + +The `ModelConfig` class has the following fields: + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `alias` | `str` | Yes | Unique identifier for this model configuration (e.g., `"my-text-model"`, `"reasoning-model"`) | +| `model` | `str` | Yes | Model identifier as recognized by the provider (e.g., `"nvidia/nemotron-3-nano-30b-a3b"`, `"gpt-4"`) | +| `inference_parameters` | `InferenceParamsT` | No | Controls model behavior during generation. Use `ChatCompletionInferenceParams` for text/code/structured generation or `EmbeddingInferenceParams` for embeddings. Defaults to `ChatCompletionInferenceParams()` if not provided. The generation type is automatically determined by the inference parameters type. See [Inference Parameters](/docs/concepts/models/inference-parameters) for details. | +| `provider` | `str` | No | Reference to the name of the Provider to use (e.g., `"nvidia"`, `"openai"`, `"openrouter"`). If not specified, one set as the default provider, which may resolve to the first provider if there are more than one | +| `skip_health_check` | `bool` | No | Whether to skip the health check for this model. Defaults to `False`. Set to `True` to skip health checks when you know the model is accessible or want to defer validation. | + + +## Examples + +### Basic Model Configuration + +```python +from data_designer.essentials import ChatCompletionInferenceParams, ModelConfig + +# Simple model configuration with fixed parameters +model_config = ModelConfig( + alias="my-text-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.85, + top_p=0.95, + max_tokens=2048, + ), +) +``` + +### Multiple Model Configurations for Different Tasks + +```python +from data_designer.essentials import ( + ChatCompletionInferenceParams, + EmbeddingInferenceParams, + GenerationType, + ModelConfig +) + +model_configs = [ + # Creative tasks + ModelConfig( + alias="creative-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.9, + top_p=0.95, + max_tokens=2048, + ), + ), + # Critic tasks + ModelConfig( + alias="critic-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.25, + top_p=0.95, + max_tokens=2048, + ), + ), + # Reasoning and structured tasks + ModelConfig( + alias="reasoning-model", + model="openai/gpt-oss-20b", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.3, + top_p=0.9, + max_tokens=4096, + ), + ), + # Vision tasks + ModelConfig( + alias="vision-model", + model="nvidia/nemotron-nano-12b-v2-vl", + provider="nvidia", + inference_parameters=ChatCompletionInferenceParams( + temperature=0.7, + top_p=0.95, + max_tokens=2048, + ), + ), + # Embedding tasks + ModelConfig( + alias="embedding_model", + model="nvidia/llama-3.2-nv-embedqa-1b-v2", + provider="nvidia", + inference_parameters=EmbeddingInferenceParams( + encoding_format="float", + extra_body={ + "input_type": "query" + } + ) + ) +] +``` + + +The number of tokens required to generate a single data entry can vary significantly with use case. For example, reasoning models often need more tokens to "think through" problems before generating a response. Note that `max_tokens` specifies the **maximum number of output tokens** to generate in the response, so set this value based on the expected length of the generated content. + + +### Skipping Health Checks + +By default, Data Designer runs a health check for each model before starting data generation to ensure the model is accessible and configured correctly. You can skip this health check for specific models by setting `skip_health_check=True`: + +```python +import data_designer.config as dd + +model_config = dd.ModelConfig( + alias="my-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=0.85, + top_p=0.95, + max_tokens=2048, + ), + skip_health_check=True, # Skip health check for this model +) +``` + + +Skipping health checks can be useful when: + +- You've already verified the model is accessible and want to speed up initialization +- You're using a model that doesn't support the standard health check format +- You want to defer model validation until the model is actually used + +Note that skipping health checks means errors will only be discovered during actual data generation. + + +## See Also + +- **[Inference Parameters](/docs/concepts/models/inference-parameters)**: Detailed guide to inference parameters and how to configure them +- **[Model Providers](/docs/concepts/models/model-providers)**: Learn about configuring model providers +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured model settings included with Data Designer +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Configure Model Settings With the CLI](/docs/concepts/models/configure-with-cli)**: Use the CLI to manage model settings +- **[Column Configurations](/api/column-configs)**: Learn how to use models in column configurations +- **[Architecture & Performance](/docs/concepts/architecture-and-performance)**: Understanding separation of concerns and optimizing concurrency diff --git a/fern/v0.5.0/pages/concepts/models/model-providers.mdx b/fern/v0.5.0/pages/concepts/models/model-providers.mdx new file mode 100644 index 00000000..efc877f3 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/models/model-providers.mdx @@ -0,0 +1,56 @@ +--- +title: Model Providers +description: Configure connections to model hosting services. +--- + +Model providers are external services that host and serve models. Data Designer uses the `ModelProvider` class to configure connections to these services. + +## Overview + +A `ModelProvider` defines how Data Designer connects to a provider's API endpoint. When you create a `ModelConfig`, you reference a provider by name, and Data Designer uses that provider's settings to make API calls to the appropriate endpoint. + +## ModelProvider Configuration + +The `ModelProvider` class has the following fields: + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `name` | `str` | Yes | Unique identifier for the provider (e.g., `"nvidia"`, `"openai"`, `"openrouter"`) | +| `endpoint` | `str` | Yes | API endpoint URL (e.g., `"https://integrate.api.nvidia.com/v1"`) | +| `provider_type` | `str` | No | Provider type (default: `"openai"`). Uses OpenAI-compatible API format | +| `api_key` | `str` | No | API key or environment variable name (e.g., `"NVIDIA_API_KEY"`) | +| `extra_body` | `dict[str, Any]` | No | Additional parameters to include in the request body of all API requests to the provider. | +| `extra_headers` | `dict[str, str]` | No | Additional headers to include in all API requests to the provider. | + +## API Key Configuration + +The `api_key` field can be specified in two ways: + +1. **Environment variable name** (recommended): Set `api_key` to the name of an environment variable (e.g., `"NVIDIA_API_KEY"`). Data Designer will automatically resolve it at runtime. + +2. **Plain-text value**: Set `api_key` to the actual API key string. This is less secure and not recommended for production use. + +```python +# Method 1: Environment variable (recommended) +provider = ModelProvider( + name="nvidia", + endpoint="https://integrate.api.nvidia.com/v1", + api_key="NVIDIA_API_KEY", # Will be resolved from environment +) + +# Method 2: Direct value (not recommended) +provider = ModelProvider( + name="nvidia", + endpoint="https://integrate.api.nvidia.com/v1", + api_key="nvapi-abc123...", # Direct API key +) +``` + +## See Also + +- **[Model Configurations](/docs/concepts/models/model-configs)**: Learn about configuring models +- **[Inference Parameters](/docs/concepts/models/inference-parameters)**: Detailed guide to inference parameters and how to configure them +- **[Default Model Settings](/docs/concepts/models/default-model-settings)**: Pre-configured providers and model settings included with Data Designer +- **[Custom Model Settings](/docs/concepts/models/custom-model-settings)**: Learn how to create custom providers and model configurations +- **[Configure Model Settings With the CLI](/docs/concepts/models/configure-with-cli)**: Use the CLI to manage providers and model settings +- **[Quick Start Guide](/docs/quick-start)**: Get started with a simple example diff --git a/fern/v0.5.0/pages/concepts/person-sampling.mdx b/fern/v0.5.0/pages/concepts/person-sampling.mdx new file mode 100644 index 00000000..0f3cbe3a --- /dev/null +++ b/fern/v0.5.0/pages/concepts/person-sampling.mdx @@ -0,0 +1,245 @@ +--- +title: Person Sampling in Data Designer +description: Generate synthetic person data for your datasets. +--- + +Person sampling in Data Designer allows you to generate synthetic person data for your datasets. There are two distinct approaches, each with different capabilities and use cases. + +## Overview + +Data Designer provides two ways to generate synthetic people: + +1. **Faker-based sampling** - Quick, basic PII generation for testing or when realistic demographic distributions are not relevant for your use case +2. **Nemotron-Personas datasets** - Demographically accurate, rich persona data + +--- + +## Approach 1: Faker-Based Sampling + +### What It Does +Uses the Faker library to generate random personal information. The data is basic and not demographically accurate, but is useful for quick testing, prototyping, or when realistic demographic distributions are not relevant for your use case. + +### Features +- Gives you access to person attributes that Faker exposes +- Quick to set up with no additional downloads +- Generates random names, emails, addresses, phone numbers, etc. +- Supports [all Faker-supported locales](https://faker.readthedocs.io/en/master/locales.html) +- **Not demographically grounded** - data patterns don't reflect real-world demographics + +### Usage Example +```python +import data_designer.config as dd + +config_builder.add_column( + dd.SamplerColumnConfig( + name="customer", + sampler_type=dd.SamplerType.PERSON_FROM_FAKER, + params=dd.PersonFromFakerSamplerParams( + locale="en_US", + age_range=[25, 65], + sex="Female", + ), + ) +) +``` + +For more details, see the documentation for [`SamplerColumnConfig`](/api/column-configs) and [`PersonFromFakerSamplerParams`](/api/sampler-params). + +--- + +## Approach 2: Nemotron-Personas Datasets + +### What It Does +Uses curated Nemotron-Personas datasets from NVIDIA GPU Cloud (NGC) to generate demographically accurate person data with rich personality profiles and behavioral characteristics. + +The NGC datasets are extended versions of the [open-source Nemotron-Personas datasets on HuggingFace](https://huggingface.co/collections/nvidia/nemotron-personas), with additional fields and enhanced data quality. + +Supported locales: + +- `en_US`: United States +- `en_IN`: India (English) +- `en_SG`: Singapore (English) +- `hi_Deva_IN`: India (Devanagari script) +- `hi_Latn_IN`: India (Latin script) +- `ja_JP`: Japan +- `pt_BR`: Brazil (Portuguese) + +### Features +- **Demographically accurate personal details**: Names, ages, sex, marital status, education, occupation based on census data +- **Rich persona details**: Comprehensive behavioral profiles including: + - Big Five personality traits with scores + - Cultural backgrounds and narratives + - Skills and hobbies + - Career goals and aspirations + - Context-specific personas (professional, financial, healthcare, sports, arts, travel, culinary, etc.) +- Consistent, referenceable attributes across your dataset +- Grounded in real-world demographic distributions + +### Prerequisites + +To use the extended Nemotron-Personas datasets with Data Designer, you need to download them [from NGC](https://catalog.ngc.nvidia.com/search?orderBy=scoreDESC&query=nemotron+personas) and move them to the Data Designer managed assets directory. + +See below for step-by-step instructions. + +### Nemotron-Personas Datasets Setup Instructions + +#### Step 0: Obtain an NGC API Key and install the NGC CLI + +To download the Nemotron-Personas datasets from NGC, you will need to obtain an NGC API key and install the NGC CLI. + +1. **NGC API Key**: Obtain from [NVIDIA GPU Cloud](https://ngc.nvidia.com/) +2. **NGC CLI**: [NGC CLI](https://org.ngc.nvidia.com/setup/installers/cli) + + +#### Step 1: Set Your NGC API Key +```bash +export NGC_API_KEY="your-ngc-api-key-here" +``` + +#### Step 2 (option 1): Download Nemotron-Personas Datasets via the Data Designer CLI + +Once you have the NGC CLI and your NGC API key set up, you can download the datasets via the Data Designer CLI. + +You can pass the locales you want to download as arguments to the CLI command: +```bash +data-designer download personas --locale en_US --locale ja_JP +``` + +Or you can use the interactive mode to select the locales you want to download: +```bash +data-designer download personas +``` + +#### Step 2 (option 2): Download Nemotron-Personas Datasets Directly + +Use the NGC CLI to download the datasets: +```bash +# For Nemotron-Personas USA +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-en_us" + +# For Nemotron-Personas IN +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-hi_deva_in" +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-hi_latn_in" +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-en_in" + +# For Nemotron-Personas JP +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-ja_jp" + +# For Nemotron-Personas SG +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-en_sg" + +# For Nemotron-Personas BR +ngc registry resource download-version "nvidia/nemotron-personas/nemotron-personas-dataset-pt_br" +``` + +Then move the downloaded dataset to the Data Designer managed assets directory: +```bash +mkdir -p ~/.data-designer/managed-assets/datasets/ +mv nemotron-personas-dataset-*/*.parquet ~/.data-designer/managed-assets/datasets/ +``` + +#### Step 3: Use PersonSampler in Your Code +```python +import data_designer.config as dd + +config_builder.add_column( + dd.SamplerColumnConfig( + name="customer", + sampler_type=dd.SamplerType.PERSON, + params=dd.PersonSamplerParams( + locale="en_US", + sex="Female", + age_range=[25, 45], + with_synthetic_personas=True, + ), + ) +) +``` + +For more details, see the documentation for [`SamplerColumnConfig`](/api/column-configs) and [`PersonSamplerParams`](/api/sampler-params). + +### Available Data Fields + +**Core Fields (all locales):** + +| Field | Type | Notes | +|-------|------|-------| +| `uuid` | UUID | Unique identifier | +| `first_name` | string | | +| `middle_name` | string | | +| `last_name` | string | | +| `sex` | enum | "Male" or "Female" | +| `birth_date` | date | Derived: year, month, day | +| `street_number` | int | | +| `street_name` | string | | +| `unit` | string | Address line 2 | +| `city` | string | | +| `region` | string | Alias: state | +| `district` | string | Alias: county | +| `postcode` | string | Alias: zipcode | +| `country` | string | | +| `phone_number` | PhoneNumber | Derived: area_code, country_code, prefix, line_number | +| `marital_status` | string | Values: never_married, married_present, separated, widowed, divorced | +| `education_level` | string or None | | +| `bachelors_field` | string or None | | +| `occupation` | string or None | | +| `email_address` | string | | +| `national_id` | string | + +**Japan-Specific Fields (`ja_JP`):** + +- `area` +- `prefecture` +- `zone` + +**Brazil-Specific Fields (`pt_BR`):** + +- `race` - Census-reported race + +**Brazil and India Shared Fields (`pt_BR`, `en_IN`, `hi_Deva_IN`, `hi_Latn_IN`):** + +- `religion` - Census-reported religion + +**India-Specific Fields (`en_IN`, `hi_Deva_IN`, `hi_Latn_IN`):** + +- `district` - Census-reported district +- `education_degree` - Census-reported education degree +- `first_language` - Native language +- `second_language` - Second language (if applicable) +- `third_language` - Third language (if applicable) +- `zone` - Urban vs rural + +**With Synthetic Personas Enabled:** + +- Big Five personality traits (Openness, Conscientiousness, Extraversion, Agreeableness, Neuroticism) with t-scores and labels +- Cultural background narratives +- Skills and competencies +- Hobbies and interests +- Career goals +- Context-specific personas (professional, financial, healthcare, sports, arts & entertainment, travel, culinary, etc.) + +*Japan-specific persona fields:* + +- `aspects` +- `digital_skills` + +*Brazil and India shared persona fields (`pt_BR`, `en_IN`, `hi_Deva_IN`, `hi_Latn_IN`):* + +- `religious_persona` +- `religious_background` + +*India-specific persona fields (`en_IN`, `hi_Deva_IN`, `hi_Latn_IN`):* + +- `linguistic_persona` +- `linguistic_background` + +### Configuration Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `locale` | str | Language/region code - must be one of: "en_US", "en_IN", "en_SG", "hi_Deva_IN", "hi_Latn_IN", "ja_JP", "pt_BR" | +| `sex` | str (optional) | Filter by "Male" or "Female" | +| `city` | str or list[str] (optional) | Filter by specific city or cities within locale | +| `age_range` | list[int] (optional) | Two-element list [min_age, max_age] (default: [18, 114]) | +| `with_synthetic_personas` | bool (optional) | Include rich personality profiles (default: False) | +| `select_field_values` | dict (optional) | Custom field-based filtering (e.g., `{"state": ["NY", "CA"], "education_level": ["bachelors"]}`) | diff --git a/fern/v0.5.0/pages/concepts/processors.mdx b/fern/v0.5.0/pages/concepts/processors.mdx new file mode 100644 index 00000000..a1fef4ad --- /dev/null +++ b/fern/v0.5.0/pages/concepts/processors.mdx @@ -0,0 +1,165 @@ +--- +title: Processors +description: Transformations that modify your dataset before or after columns are generated. +--- + +Processors are transformations that modify your dataset before or after columns are generated. They run at different stages and can reshape, filter, or augment the data. + + +Processors handle transformations that don't fit the "column" model: restructuring the schema for a specific output format, dropping intermediate columns in bulk, or applying batch-wide operations. + + +## Overview + +Each processor: + +- Receives the complete batch DataFrame +- Applies its transformation +- Passes the result to the next processor (or to output) + +Processors can run at three stages, determined by which callback methods they implement: + +| Stage | When it runs | Callback method | Use cases | +|-------|--------------|-----------------|-----------| +| Pre-batch | After seed columns, before dependent columns | `process_before_batch()` | Transform seed data before other columns are generated | +| Post-batch | After each batch completes | `process_after_batch()` | Drop columns, transform schema per batch | +| After generation | Once, on final dataset after all batches | `process_after_generation()` | Deduplicate, aggregate statistics, final cleanup | + + +Each batch carries the full dataset schema during generation. Post-batch schema changes such as column dropping only alter past batches, so all columns remain accessible to generators while building follow-up batches. + + +A processor can implement any combination of these callbacks. The built-in processors use `process_after_batch()` by default. + +## Processor Types + +### 🗑️ Drop Columns Processor + +Removes specified columns from the output dataset. Dropped columns are saved separately in the `dropped-columns` directory for reference. + + +The Drop Columns Processor is different from others in the sense that it does not need to be explicitly added: setting `drop = True` when configuring a column will accomplish the same. + + +**Configuration:** + +```python +import data_designer.config as dd + +processor = dd.DropColumnsProcessorConfig( + name="remove_intermediate", + column_names=["temp_calculation", "raw_input", "debug_info"], +) +``` + +**Behavior:** + +- Columns specified in `column_names` are removed from the output +- Original values are preserved in a separate parquet file +- Missing columns produce a warning but don't fail the build +- Column configs are automatically marked with `drop=True` when this processor is added + +**Use Cases:** + +- Removing intermediate columns used only for LLM context +- Cleaning up debug or validation columns before final output +- Separating sensitive data from the main dataset + +### 🔄 Schema Transform Processor + +Creates an additional dataset with a transformed schema using Jinja2 templates. The output is written to a separate directory alongside the main dataset. + +**Configuration:** + +```python +import data_designer.config as dd + +processor = dd.SchemaTransformProcessorConfig( + name="chat_format", + template={ + "messages": [ + {"role": "user", "content": "{{ question }}"}, + {"role": "assistant", "content": "{{ answer }}"}, + ], + "metadata": "{{ category | upper }}", + }, +) +``` + +**Behavior:** + +- Each key in `template` becomes a column in the transformed dataset +- Values are Jinja2 templates with access to all columns in the batch +- Complex structures (lists, nested dicts) are supported +- Output is saved to the `processors-outputs/{name}/` directory +- The original dataset passes through unchanged + +**Template Capabilities:** + +- **Variable substitution**: `{{ column_name }}` +- **Filters**: `{{ text | upper }}`, `{{ text | lower }}`, `{{ text | trim }}` +- **Nested structures**: Arbitrarily deep JSON structures +- **Lists**: `["{{ col1 }}", "{{ col2 }}"]` + +**Use Cases:** + +- Converting flat columns to chat message format +- Restructuring data for specific model training formats +- Creating derived views without modifying the source dataset + +## Using Processors + +Add processors to your configuration using the builder's `add_processor` method: + +```python +import data_designer.config as dd + +builder = dd.DataDesignerConfigBuilder() + +# ... add columns ... + +# Drop intermediate columns +builder.add_processor( + dd.DropColumnsProcessorConfig( + name="cleanup", + column_names=["scratch_work", "raw_context"], + ) +) + +# Transform to chat format +builder.add_processor( + dd.SchemaTransformProcessorConfig( + name="chat_format", + template={ + "messages": [ + {"role": "user", "content": "{{ question }}"}, + {"role": "assistant", "content": "{{ answer }}"}, + ], + }, + ) +) +``` + +### Execution Order + +Processors execute in the order they're added. Plan accordingly when one processor's output affects another. + +## Configuration Parameters + +### Common Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `name` | str | Identifier for the processor, used in output directory names | + +### DropColumnsProcessorConfig + +| Parameter | Type | Description | +|-----------|------|-------------| +| `column_names` | list[str] | Columns to remove from output | + +### SchemaTransformProcessorConfig + +| Parameter | Type | Description | +|-----------|------|-------------| +| `template` | dict[str, Any] | Jinja2 template defining the output schema. Must be JSON-serializable. | diff --git a/fern/v0.5.0/pages/concepts/seed-datasets.mdx b/fern/v0.5.0/pages/concepts/seed-datasets.mdx new file mode 100644 index 00000000..53aea1e2 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/seed-datasets.mdx @@ -0,0 +1,265 @@ +--- +title: Seed Datasets +description: Bootstrap synthetic data generation from existing datasets. +--- + +Seed datasets let you bootstrap synthetic data generation from existing data. Instead of generating everything from scratch, you provide a dataset whose columns become available as context in your prompts and expressions—grounding your synthetic data in real-world examples. + + +Seed datasets shine when you have **real data you want to build on**: + +- Product catalogs → generate customer reviews +- Medical diagnoses → generate physician notes +- Code snippets → generate documentation +- Company profiles → generate financial reports + +The seed data provides realism and domain specificity; Data Designer adds volume and variation. + + +## The Basic Pattern + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Define your model configuration +model_configs = [ + dd.ModelConfig( + alias="my-model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + ) +] + +config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs) + +# 1. Attach a seed dataset +seed_source = dd.LocalFileSeedSource(path="products.csv") +config_builder.with_seed_dataset(seed_source) + +# 2. Reference seed columns in your prompts +config_builder.add_column( + dd.LLMTextColumnConfig( + name="review", + model_alias="my-model", + prompt="""\ +Write a customer review for {{ product_name }}. +Category: {{ category }} +Price: ${{ price }} +""", + ) +) +``` + +Every column in your seed dataset becomes available as a Jinja2 variable in prompts and expressions. Data Designer automatically: + +- Reads rows from the seed dataset +- Injects seed column values into templates + +## Seed Sources + +Data Designer supports three ways to provide seed data: + +### LocalFileSeedSource + +Load from a local file—CSV, Parquet, or JSON. + +```python +# Single file +seed_source = dd.LocalFileSeedSource(path="data/products.csv") + +# Parquet files with wildcard +seed_source = dd.LocalFileSeedSource(path="data/products/*.parquet") +``` + + +- CSV (`.csv`) +- Parquet (`.parquet`) +- JSON (`.json`, `.jsonl`) + + +### HuggingFaceSeedSource + +Load directly from HuggingFace datasets without downloading manually. + +```python +seed_source = dd.HuggingFaceSeedSource( + path="datasets/gretelai/symptom_to_diagnosis/data/train.parquet", + token="hf_...", # Optional, for private datasets +) +``` + +### DataFrameSeedSource + +Use an in-memory pandas DataFrame—great for preprocessing or combining multiple sources. + +```python +import pandas as pd + +df = pd.read_csv("raw_data.csv") +df = df[df["quality_score"] > 0.8] # Filter to high-quality rows + +seed_source = dd.DataFrameSeedSource(df=df) +``` + + +`DataFrameSeedSource` can't be serialized to YAML/JSON configs. Use `LocalFileSeedSource` if you need to save and share configurations. + + +## Sampling Strategies + +Control how rows are read from the seed dataset. + +### Ordered (Default) + +Rows are read sequentially in their original order. Each generated record corresponds to the next row in the seed dataset. If you generate more records than exist in the seed dataset, it will cycle in order until completion. + +```python +config_builder.with_seed_dataset( + seed_source, + sampling_strategy=dd.SamplingStrategy.ORDERED, +) +``` + +### Shuffle + +Rows are randomly shuffled before sampling. Useful when your seed data has some ordering you want to break. + +```python +config_builder.with_seed_dataset( + seed_source, + sampling_strategy=dd.SamplingStrategy.SHUFFLE, +) +``` + +## Selection Strategies + +Select a subset of your seed dataset—useful for large datasets or parallel processing. + +### IndexRange + +Select a specific range of row indices. + +```python +# Use only rows 100-199 (100 rows total) +config_builder.with_seed_dataset( + seed_source, + selection_strategy=dd.IndexRange(start=100, end=199), +) +``` + +### PartitionBlock + +Split the dataset into N equal partitions and select one. Perfect for distributing work across multiple jobs. + +```python +# Split into 5 partitions, use the 3rd one (index=2, zero-based) +config_builder.with_seed_dataset( + seed_source, + selection_strategy=dd.PartitionBlock(index=2, num_partitions=5), +) +``` + + +Run 5 parallel jobs, each with a different partition index, to process a large seed dataset in parallel: + +```python +# Job 0: PartitionBlock(index=0, num_partitions=5) +# Job 1: PartitionBlock(index=1, num_partitions=5) +# Job 2: PartitionBlock(index=2, num_partitions=5) +# ... +``` + + +### Combining Strategies + +Sampling and selection strategies work together. For example, shuffle rows *within* a specific partition: + +```python +config_builder.with_seed_dataset( + seed_source, + sampling_strategy=dd.SamplingStrategy.SHUFFLE, + selection_strategy=dd.PartitionBlock(index=0, num_partitions=10), +) +``` + +## Complete Example + +Here's a complete example generating physician notes from a symptom-to-diagnosis seed dataset: + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +data_designer = DataDesigner() + +model_configs = [ + dd.ModelConfig( + alias="medical-notes", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + ) +] + +config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs) + +# Attach seed dataset (has 'diagnosis' and 'symptoms' columns) +seed_source = dd.LocalFileSeedSource(path="symptom_to_diagnosis.csv") +config_builder.with_seed_dataset(seed_source) + +# Generate patient info +config_builder.add_column( + dd.SamplerColumnConfig( + name="patient", + sampler_type=dd.SamplerType.PERSON_FROM_FAKER, + params=dd.PersonFromFakerSamplerParams(), + ) +) + +config_builder.add_column( + dd.ExpressionColumnConfig( + name="patient_name", + expr="{{ patient.first_name }} {{ patient.last_name }}", + ) +) + +# Generate notes grounded in seed data +config_builder.add_column( + dd.LLMTextColumnConfig( + name="physician_notes", + model_alias="medical-notes", + prompt="""\ +You are a physician writing notes after a patient visit. + +Patient: {{ patient_name }} +Diagnosis: {{ diagnosis }} +Reported Symptoms: {{ symptoms }} + +Write detailed clinical notes for this visit. +""", + ) +) + +# Preview +preview = designer.preview(config_builder, num_records=5) +preview.display_sample_record() +``` + +## Best Practices + +### Keep Seed Data Clean + +Garbage in, garbage out. Clean your seed data before using it: + +- Remove duplicates +- Fix encoding issues +- Filter out low-quality rows +- Standardize column names + +### Match Generation Volume to Seed Size + +If your seed dataset has 1,000 rows and you generate 10,000 records, each seed row will be used ~10 times. Consider whether that's appropriate for your use case. + +### Use Seed Data for Diversity Control + +Seed datasets are excellent for controlling the distribution of your synthetic data. Want 30% electronics, 50% clothing, 20% home goods? Curate your seed dataset to match. diff --git a/fern/v0.5.0/pages/concepts/tool-use-and-mcp.mdx b/fern/v0.5.0/pages/concepts/tool-use-and-mcp.mdx new file mode 100644 index 00000000..16363288 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/tool-use-and-mcp.mdx @@ -0,0 +1,72 @@ +--- +title: Tool Use & MCP +description: Enable LLM columns to call external tools during generation using the Model Context Protocol. +--- + +Tool use lets LLM columns call external tools during generation (e.g., lookups, calculations, retrieval, domain services). Data Designer supports tool use via the **Model Context Protocol (MCP)**, which standardizes how tools are discovered and invoked. + +## Quick Start + +1. Configure an MCP provider ([Local](/docs/concepts/mcp/mcp-providers#localstdiomcpprovider-subprocess) or [Remote](/docs/concepts/mcp/mcp-providers#mcpprovider-remote-sse)) +2. Create a [ToolConfig](/docs/concepts/mcp/tool-configs) referencing your provider +3. Add `tool_alias` to your [LLM column](/docs/concepts/mcp/enabling-tools) + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# 1. Configure provider + +## Local Stdio provider +mcp_provider = dd.LocalStdioMCPProvider( + name="demo-mcp", + command="python", + args=["-m", "my_mcp_server"], +) + +## Remote provider +# mcp_provider = dd.MCPProvider( +# name="remote-mcp", +# endpoint="https://mcp.example.invalid/sse", +# api_key="REMOTE_MCP_API_KEY", +# ) + +data_designer = DataDesigner(mcp_providers=[mcp_provider]) + +# 2. Create tool config +tool_config = dd.ToolConfig( + tool_alias="my-tools", + providers=["demo-mcp"], +) + +builder = dd.DataDesignerConfigBuilder(tool_configs=[tool_config]) + +# 3. Use tools in column +builder.add_column( + dd.LLMTextColumnConfig( + name="answer", + prompt="Use tools to answer: {{ question }}", + model_alias="nvidia-text", + tool_alias="my-tools", + ) +) +``` + +## Guides + +| Guide | Description | +|-------|-------------| +| **[MCP Providers](/docs/concepts/mcp/mcp-providers)** | Configure local subprocess or remote SSE providers | +| **[Tool Configs](/docs/concepts/mcp/tool-configs)** | Define tool permissions and limits | +| **[Enabling Tools on Columns](/docs/concepts/mcp/enabling-tools)** | Use tools in LLM generation | +| **[Configure via CLI](/docs/concepts/mcp/configure-mcp-cli)** | Interactive CLI configuration | +| **[Traces](/docs/concepts/traces)** | Capture full conversation history | +| **[Safety & Limits](/docs/concepts/mcp/safety-and-limits)** | Allowlists, budgets, timeouts | + +## Example + +See the [PDF Q&A Recipe](/docs/recipes/mcp-and-tooluse/pdf-qa) for a complete working example. + +## Code Reference + +For internal architecture and API documentation, see [MCP Code Reference](/api/mcp). diff --git a/fern/v0.5.0/pages/concepts/traces.mdx b/fern/v0.5.0/pages/concepts/traces.mdx new file mode 100644 index 00000000..bd3f12c2 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/traces.mdx @@ -0,0 +1,218 @@ +--- +title: Message Traces +description: Capture conversation history during LLM generation including tool calls and reasoning. +--- + +Traces capture the conversation history during LLM generation, including system prompts, user prompts, model reasoning, tool calls, tool results, and the final response. This visibility is essential for understanding model behavior, debugging generation issues, and iterating on prompts. + +Traces are also useful in certain scenarios as the target output of the workflow, e.g. producing an SFT dataset for fine-tuning tool-use capability, for instance. + +## Overview + +When generating content with LLM columns, you often need to understand what happened during generation: + +- What system prompt was used? +- What did the rendered user prompt look like? +- Did the model provide any reasoning content? +- Which tools were called (if tool use is enabled)? +- What arguments were passed to tools? +- What did tools return? +- Did the model retry after failures? +- How did the model arrive at the final answer? + +Traces provide this visibility by capturing the ordered message history for each generation, including any multi-turn conversations that occur during tool use or retry scenarios. + +## Trace Types + +Data Designer supports three trace modes via the `TraceType` enum: + +| TraceType | Description | +|-----------|-------------| +| `TraceType.NONE` | No trace captured (default) | +| `TraceType.LAST_MESSAGE` | Only the final assistant message is captured | +| `TraceType.ALL_MESSAGES` | Full conversation history (system/user/assistant/tool) | + +## Enabling Traces + +### Per-Column (Recommended) + +Set `with_trace` on specific LLM columns: + +```python +import data_designer.config as dd + +# Capture full conversation history +builder.add_column( + dd.LLMTextColumnConfig( + name="answer", + prompt="Answer: {{ question }}", + model_alias="nvidia-text", + with_trace=dd.TraceType.ALL_MESSAGES, # Full trace + ) +) + +# Capture only the final assistant response +builder.add_column( + dd.LLMTextColumnConfig( + name="summary", + prompt="Summarize: {{ text }}", + model_alias="nvidia-text", + with_trace=dd.TraceType.LAST_MESSAGE, # Just the final response + ) +) +``` + +## Trace Column Naming + +When enabled, LLM columns produce an additional side-effect column: + +- `{column_name}__trace` + +For example, if your column is named `"answer"`, the trace column will be `"answer__trace"`. + +## Trace Data Structure + +Each trace is a `list[dict]` where each dict represents a message in the conversation. + +### Message Fields by Role + +| Role | Fields | Description | +|------|--------|-------------| +| `system` | `role`, `content` | System prompt setting model behavior. `content` is a list of blocks in ChatML format. | +| `user` | `role`, `content` | User prompt (rendered from template). `content` is a list of blocks (text + multimodal). | +| `assistant` | `role`, `content`, `tool_calls`, `reasoning_content` | Model response; `content` may be empty if only requesting tools. | +| `tool` | `role`, `content`, `tool_call_id` | Tool execution result; `tool_call_id` links to the request. | + +### Example Trace (Simple Generation) + +A basic trace without tool use: + +```python +[ + # System message (if configured) + { + "role": "system", + "content": [{"type": "text", "text": "You are a helpful assistant that provides clear, concise answers."}] + }, + # User message (the rendered prompt) + { + "role": "user", + "content": [{"type": "text", "text": "What is the capital of France?"}] + }, + # Final assistant response + { + "role": "assistant", + "content": [{"type": "text", "text": "The capital of France is Paris."}], + "reasoning_content": None # May contain reasoning if model supports it + } +] +``` + +### Example Trace (With Tool Use) + +When tool use is enabled, traces capture the full conversation including tool calls: + +```python +[ + # System message + { + "role": "system", + "content": [{"type": "text", "text": "You must call tools before answering. Only use tool results."}] + }, + # User message (the rendered prompt) + { + "role": "user", + "content": [{"type": "text", "text": "What documents are in the knowledge base about machine learning?"}] + }, + # Assistant requests tool calls + { + "role": "assistant", + "content": [{"type": "text", "text": ""}], + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "list_docs", + "arguments": "{\"query\": \"machine learning\"}" + } + } + ] + }, + # Tool response (linked by tool_call_id) + { + "role": "tool", + "content": [{"type": "text", "text": "Found 3 documents: intro_ml.pdf, neural_networks.pdf, transformers.pdf"}], + "tool_call_id": "call_abc123" + }, + # Final assistant response + { + "role": "assistant", + "content": [{"type": "text", "text": "The knowledge base contains three documents about machine learning: ..."}] + } +] +``` + +### The tool_calls Structure + +When an assistant message includes tool calls: + +```python +{ + "id": "call_abc123", # Unique ID linking to tool response + "type": "function", # Always "function" for MCP tools + "function": { + "name": "search_docs", # Tool name + "arguments": "{...}" # JSON string of tool arguments + } +} +``` + +## Extracting Reasoning Content + +Some models (particularly those with extended thinking or chain-of-thought capabilities) expose their reasoning process separately via the `reasoning_content` field in assistant messages. While this is included in full traces, you may want to capture it separately without the overhead of storing the entire conversation history. + +### Dedicated Reasoning Column + +Set `extract_reasoning_content=True` on any LLM column to create a `{column_name}__reasoning_content` side-effect column: + +```python +import data_designer.config as dd + +builder.add_column( + dd.LLMTextColumnConfig( + name="solution", + prompt="Solve this math problem step by step: {{ problem }}", + model_alias="reasoning-model", + extract_reasoning_content=True, # Creates solution__reasoning_content + ) +) +``` + +The extracted reasoning content: + +- Contains only the `reasoning_content` from the **final** assistant message in the trace +- Is stripped of leading/trailing whitespace +- Is `None` if the model didn't provide reasoning content or if it was whitespace-only + +### When to Use Each Approach + +| Need | Approach | +|------|----------| +| Full conversation history for debugging | `with_trace=True` | +| Just the model's reasoning/thinking | `extract_reasoning_content=True` | +| Both conversation history and separate reasoning | Use both options | +| Fine-tuning data with reasoning | `extract_reasoning_content=True` for clean extraction | + +### Availability + +The `extract_reasoning_content` option is available on all LLM column types: + +- `LLMTextColumnConfig` +- `LLMCodeColumnConfig` +- `LLMStructuredColumnConfig` +- `LLMJudgeColumnConfig` + +## See Also + +- **[Safety and Limits](/docs/concepts/mcp/safety-and-limits)**: Understand turn limits and timeout behavior diff --git a/fern/v0.5.0/pages/concepts/validators.mdx b/fern/v0.5.0/pages/concepts/validators.mdx new file mode 100644 index 00000000..24176174 --- /dev/null +++ b/fern/v0.5.0/pages/concepts/validators.mdx @@ -0,0 +1,341 @@ +--- +title: Validators +description: Quality assurance mechanisms that check generated content against rules. +--- + +Validators are quality assurance mechanisms in Data Designer that check generated content against rules and return structured pass/fail results. They enable automated verification of data for correctness, code quality, and adherence to specifications. + + +Validators act as **quality gates** in your generation pipeline. Use them to filter invalid records, score code quality, verify format compliance, or integrate with external validation services. + + +## Overview + +Validation columns execute validation logic against target columns and produce structured results indicating: + +- **`is_valid`**: Boolean pass/fail status +- **Additional metadata**: Error messages, scores, severity levels, and custom fields + +Validators currently support three execution strategies: + +1. **Code validation**: Lint and check Python or SQL code using industry-standard tools +2. **Local callable validation**: Execute custom Python functions for flexible validation logic +3. **Remote validation**: Send data to HTTP endpoints for external validation services + +## Validator Types + +### 🐍 Python Code Validator + +The Python code validator runs generated Python code through [Ruff](https://github.com/astral-sh/ruff), a fast Python linter that checks for syntax errors, undefined variables, and code quality issues. + +**Configuration:** + +```python +import data_designer.config as dd + +validator_params = dd.CodeValidatorParams(code_lang=dd.CodeLang.PYTHON) +``` + +**Validation Output:** + +Each validated record returns: + +- **`is_valid`**: `True` if no fatal or error-level issues found +- **`python_linter_score`**: Quality score from 0-10 (based on pylint formula) +- **`python_linter_severity`**: Highest severity level found (`"none"`, `"convention"`, `"refactor"`, `"warning"`, `"error"`, `"fatal"`) +- **`python_linter_messages`**: List of linter messages with line numbers, columns, and descriptions + +**Severity Levels:** + +- **Fatal**: Syntax errors preventing code execution +- **Error**: Undefined names, invalid syntax +- **Warning**: Code smells and potential issues +- **Refactor**: Simplification opportunities +- **Convention**: Style guide violations + +A record is marked valid if it has no messages or only messages at warning/convention/refactor levels. + +**Example Validation Result:** + +```python +{ + "is_valid": False, + "python_linter_score": 0, + "python_linter_severity": "error", + "python_linter_messages": [ + { + "type": "error", + "symbol": "F821", + "line": 1, + "column": 7, + "message": "Undefined name `it`" + } + ] +} +``` + +### 🗄️ SQL Code Validator + +The SQL code validator uses [SQLFluff](https://github.com/sqlfluff/sqlfluff), a dialect-aware SQL linter that checks query syntax and structure. + +**Configuration:** + +```python +import data_designer.config as dd + +validator_params = dd.CodeValidatorParams(code_lang=dd.CodeLang.SQL_POSTGRES) +``` + + +The SQL code validator supports multiple dialects: `SQL_POSTGRES`, `SQL_ANSI`, `SQL_MYSQL`, `SQL_SQLITE`, `SQL_TSQL` and `SQL_BIGQUERY`. + + +**Validation Output:** + +Each validated record returns: + +- **`is_valid`**: `True` if no parsing errors found +- **`error_messages`**: Concatenated error descriptions (empty string if valid) + +The validator focuses on parsing errors (PRS codes) that indicate malformed SQL. It also checks for common pitfalls like `DECIMAL` definitions without scale parameters. + +**Example Validation Result:** + +```python +# Valid SQL +{ + "is_valid": True, + "error_messages": "" +} + +# Invalid SQL +{ + "is_valid": False, + "error_messages": "PRS: Line 1, Position 1: Found unparsable section: 'NOT SQL'" +} +``` + +### 🔧 Local Callable Validator + +The local callable validator executes custom Python functions for flexible validation logic. + +**Configuration:** + +```python +import pandas as pd + +import data_designer.config as dd + + +def my_validation_function(df: pd.DataFrame) -> pd.DataFrame: + """Validate that values are positive. + + Args: + df: DataFrame with target columns + + Returns: + DataFrame with is_valid column and optional metadata + """ + result = pd.DataFrame() + result["is_valid"] = df["price"] > 0 + result["error_message"] = result["is_valid"].apply( + lambda valid: "" if valid else "Price must be positive" + ) + return result + + +validator_params = dd.LocalCallableValidatorParams( + validation_function=my_validation_function, + output_schema={ # Optional: enforce output schema + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "is_valid": {"type": ["boolean", "null"]}, + "error_message": {"type": "string"} + }, + "required": ["is_valid"] + } + } + } + } +) +``` + +**Function Requirements:** + +- **Input**: DataFrame with target columns +- **Output**: DataFrame with `is_valid` column (boolean or null) +- **Extra fields**: Any additional columns become validation metadata + +The `output_schema` parameter is optional but recommended—it validates the function's output against a JSON schema, catching unexpected return formats. + +### 🌐 Remote Validator + +The remote validator sends data to HTTP endpoints for validation-as-a-service. This is useful for when you have validation software that needs to run on external compute and you can expose it through a service. Some examples are: + +- External linting services +- Security scanners +- Domain-specific validators +- Proprietary validation systems + + +Currently, the remote validator is only able to perform unauthenticated API calls. When implementing your own service, you can rely on network isolation for security. If you need to reach a service that requires authentication, you should implement a local proxy. + + +**Configuration:** + +```python +import data_designer.config as dd + +validator_params = dd.RemoteValidatorParams( + endpoint_url="https://api.example.com/validate", + timeout=30.0, # Request timeout in seconds + max_retries=3, # Retry attempts on failure + retry_backoff=2.0, # Exponential backoff factor + max_parallel_requests=4, # Concurrent request limit + output_schema={ # Optional: enforce response schema + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "is_valid": {"type": ["boolean", "null"]}, + "confidence": {"type": "string"} + } + } + } + } + } +) +``` + +**Request Format:** + +The validator sends POST requests with this structure: + +```json +{ + "data": [ + {"column1": "value1", "column2": "value2"}, + {"column1": "value3", "column2": "value4"} + ] +} +``` + +**Expected Response Format:** + +The endpoint must return: + +```json +{ + "data": [ + { + "is_valid": true, + "custom_field": "any additional metadata" + }, + { + "is_valid": false, + "custom_field": "more metadata" + } + ] +} +``` + +**Retry Behavior:** + +The validator automatically retries on: + +- Network errors +- HTTP status codes: 429 (rate limit), 500, 502, 503, 504 + +Failed requests use exponential backoff: `delay = retry_backoff^attempt`. + +**Parallelization:** + +Set `max_parallel_requests` to control concurrency. Higher values improve throughput but increase server load. The validator batches requests according to the `batch_size` parameter in the validation column configuration. + +## Using Validators in Columns + +Add validation columns to your configuration using the builder's `add_column` method: + +```python +import data_designer.config as dd + +builder = dd.DataDesignerConfigBuilder() + +# Generate Python code +builder.add_column( + dd.LLMCodeColumnConfig( + name="sorting_algorithm", + prompt="Write a Python function to sort a list using bubble sort.", + code_lang=dd.CodeLang.PYTHON, + model_alias="my-model" + ) +) + +# Validate the generated code +builder.add_column( + dd.ValidationColumnConfig( + name="code_validation", + target_columns=["sorting_algorithm"], + validator_type="code", + validator_params=dd.CodeValidatorParams(code_lang=dd.CodeLang.PYTHON), + batch_size=10, + drop=False, + ) +) +``` + +The `target_columns` parameter specifies which columns to validate. All target columns are passed to the validator together (except for code validators, which process each column separately). + +### Configuration Parameters + +See more about parameters used to instantiate `ValidationColumnConfig` in the [API reference](/api/column-configs). + +### Batch Size Considerations + +Larger batch sizes improve efficiency but consume more memory: + +- **Code validators**: 5-20 records (file I/O overhead) +- **Local callable**: 10-50 records (depends on function complexity) +- **Remote validators**: 1-10 records (network latency, server capacity) + +Adjust based on: + +- Validator computational cost +- Available memory +- Network bandwidth (for remote validators) +- Server rate limits + +If the validation logic uses information from other samples, only samples in the batch will be considered. + +### Multiple Column Validation + +Validate multiple columns simultaneously: + +```python +import data_designer.config as dd + +builder.add_column( + dd.ValidationColumnConfig( + name="multi_column_validation", + target_columns=["column_a", "column_b", "column_c"], + validator_type="remote", + validator_params=dd.RemoteValidatorParams( + endpoint_url="https://api.example.com/validate" + ) + ) +) +``` + +**Note**: Code validators always process each target column separately, even when multiple columns are specified. Local callable and remote validators receive all target columns together. + +## See Also + +- [Validator Parameters Reference](/api/validator-params): Configuration object schemas diff --git a/fern/v0.5.0/pages/contributing.mdx b/fern/v0.5.0/pages/contributing.mdx new file mode 100644 index 00000000..b317a75a --- /dev/null +++ b/fern/v0.5.0/pages/contributing.mdx @@ -0,0 +1,239 @@ +--- +title: 🎨✨ Contributing to NeMo Data Designer 🎨✨ +description: How to contribute to NeMo Data Designer +--- + +Thank you for your interest in contributing to Data Designer! + +We welcome contributions from the community and sincerely appreciate your efforts to improve the project. Whether you're fixing a typo, reporting a bug, proposing a new feature, or implementing a major enhancement, your work helps make Data Designer better for everyone 🎉. + +This guide will help you get started with the contribution process. + +## Table of Contents + +- [Getting Started](#getting-started) +- [Ways to Contribute](#ways-to-contribute) +- [Feature Requests](#feature-requests) +- [Development Guide](#development-guide) +- [Submitting Changes](#submitting-changes) +- [Code of Conduct](#code-of-conduct) +- [Signing off on your work](#signing-off-on-your-work) + + +## Getting Started +👋 Welcome to the Data Designer community! We're excited to have you here. + +Whether you're new to the project or ready to dive in, the resources below will help you get oriented and productive quickly: + +1. **[README.md](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/README.md)** – best place to start to learn the basics of the project + +2. **[AGENTS.md](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/AGENTS.md)** – context and instructions to help AI coding agents work on Data Designer (it's also useful for human developers!) + +3. **[Documentation](https://nvidia-nemo.github.io/DataDesigner/)** – detailed documentation on Data Designer's capabilities and usage + +## Ways to Contribute + +There are many ways to contribute to Data Designer: + +### 🐛 Bug Fixes + +Found a bug? Before reporting, please +1. Verify you're using the latest version: `uv pip install --upgrade data-designer` +2. Search for duplicates in the [issue tracker](https://github.com/NVIDIA-NeMo/DataDesigner/issues) + +When [creating a bug report](https://github.com/NVIDIA-NeMo/DataDesigner/issues/new), please include: +- Data Designer version +- Python version and operating system +- Minimal reproducible example +- Expected vs. actual behavior +- Full error messages and stack traces + +If you are interested in fixing the bug yourself, that's AWESOME! Please follow the [development guide](#development-guide) to get started. + +### ✨ Feature Implementation +Want to add new functionality? Great! Please review [our development approach](#feature-requests) and open a feature request to discuss the idea and get feedback before investing significant time on the implementation. + +### 📖 Documentation Improvements +Documentation is crucial for user adoption. Contributions that clarify usage, add examples, or fix typos are highly valued. + +### 💡 Examples and Tutorials +Share your use cases! Example notebooks and tutorials help others understand how to leverage Data Designer effectively. + +### 🧪 Test Coverage +Help us improve test coverage by adding tests for untested code paths or edge cases. + +## Feature Requests +Data Designer is designed to be as flexible and extensible as possible, and we welcome your ideas for pushing its capabilities even further! To keep the core library maintainable, while also supporting innovation, we take an incremental approach when adding new features – we explore what's already possible, extend through plugins when needed, and integrate the most broadly useful features into the core library: + +### How We Grow Data Designer +1. 🧗 **Explore what's possible**: Can your use case be achieved with current features? We've designed Data Designer to be composable – sometimes creative combinations of existing tools can accomplish what you need. Check out our examples or open an issue if you'd like help exploring this! + +2. 🔌 **Extend through plugins**: If existing features aren't quite enough, consider implementing your idea as a plugin that extends the core library. Plugins let you experiment and share functionality while keeping the core library focused. + +3. ⚙️ **Integrate into the core library**: If your feature or plugin proves broadly useful and aligns with Data Designer's goals, we'd love to integrate it into the core library! We're happy to discuss whether it's a good fit and how to move forward together. + +This approach helps us grow thoughtfully while keeping Data Designer focused and maintainable. + +### Submitting a Feature Request +Open a [new issue](https://github.com/NVIDIA-NeMo/DataDesigner/issues/new) with: + +- **Clear title**: Concise description of the feature +- **Use case**: Explain what problem this solves and why it's important +- **Proposed solution**: Describe how you envision the feature working +- **Alternatives considered**: Other approaches you've thought about +- **Examples**: Code examples or mockups of how users would interact with the feature +- **Willingness to implement**: Are you interested in implementing this yourself? + +## Development Guide +Data Designer uses [`uv`](https://github.com/astral-sh/uv) for dependency management. If you don't have uv installed, follow their [installation instructions](https://docs.astral.sh/uv/getting-started/installation/). + +### Initial Setup +0. **Create or find an issue** + + Before starting work, ensure there's an issue tracking your contribution: + + - For bug fixes: Search [existing issues](https://github.com/NVIDIA-NeMo/DataDesigner/issues) or [create a new one](https://github.com/NVIDIA-NeMo/DataDesigner/issues/new) + - For new features: Open a [feature request](#feature-requests) to discuss the approach first + - Comment on the issue to let maintainers know you're working on it + +1. **Fork and clone the repository** + + Start by [forking the Data Designer repository](https://github.com/NVIDIA-NeMo/DataDesigner/fork), then clone your fork and add the upstream remote: + + ```bash + git clone https://github.com/YOUR_GITHUB_USERNAME/DataDesigner.git + + cd DataDesigner + + git remote add upstream https://github.com/NVIDIA-NeMo/DataDesigner.git + ``` + +2. **Install dependencies** + + ```bash + # Install project with dev dependencies + make install-dev + + # Or, if you use Jupyter / IPython for development + make install-dev-notebooks + ``` + +3. **Verify your setup** + + ```bash + make test && make check-all + ``` + + If no errors are reported, you're ready to develop 🚀 + +### Making Changes + +1. **Create a feature branch** + + ```bash + git checkout main + git pull upstream main + git checkout -b //- + ``` + + Example types of change: + + - `feat` for new features + - `fix` for bug fixes + - `docs` for documentation updates + - `test` for testing changes + - `refactor` for code refactoring + - `chore` for chore tasks + - `style` for style changes + - `perf` for performance improvements + + Example branch name: + + - `johnnygreco/feat/123-add-xyz-generator` for a new feature by @johnnygreco, addressing issue #123 + +2. **Develop your changes** + + Please follow the patterns and conventions used throughout the codebase, as well as those outlined in [AGENTS.md](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/AGENTS.md). + +3. **Test and validate** + + ```bash + make check-all-fix # Format code and fix linting issues + make test # Run all tests + make coverage # Check test coverage (must be >90%) + ``` + + **Writing tests**: Place tests in [tests/](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/tests/) mirroring the source structure. Use fixtures from [tests/conftest.py](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/tests/conftest.py), mock external services with `unittest.mock` or `pytest-httpx`, and test both success and failure cases. See [AGENTS.md](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/AGENTS.md) for patterns and examples. + +4. **Commit your work** + + Write clear, descriptive commit messages, optionally including a brief summary (50 characters or less) and reference issue numbers when applicable (e.g., "Fixes #123"). + + ```bash + git commit -m "Add XYZ generator for synthetic data" -m "Fixes #123" + ``` + +5. **Stay up to date** + + Regularly sync your branch with upstream changes: + + ```bash + git fetch upstream + git merge upstream/main + ``` + +## Submitting Changes + +### Before Submitting + +Ensure your changes meet the following criteria: + +- All tests pass (`make test`) +- Code is formatted and linted (`make check-all-fix`) +- New functionality includes tests +- Documentation is updated (README, docstrings, examples) +- License headers are present on all new files +- Commit messages are clear and descriptive + +### Creating a Pull Request + +1. **Push your changes** to your fork: + + ```bash + git push origin //- + ``` + +2. **Open a pull request** on GitHub from your fork to the main repository + +3. **Respond to review feedback** update your PR as needed + +### Pull Request Review Process + +- Maintainers will review your PR and may request changes +- Address feedback by pushing additional commits to your branch +- Reply to the feedback comment with a link to the commit that addresses it. +- Once approved, a maintainer will merge your PR +- Your contribution will be included in the next release! + +## Code of Conduct +Data Designer follows the Contributor Covenant Code of Conduct. We are committed to providing a welcoming and inclusive environment for all contributors. + +**Please read our complete [Code of Conduct](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/CODE_OF_CONDUCT.md)** for full details on our standards and expectations. + +### License File Headers +All code files that are added to this repository must include the appropriate NVIDIA copyright header: + +```python +# SPDX-FileCopyrightText: Copyright (c) {YEAR} NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +``` + +Use `make update-license-headers` to add headers automatically. + +## Signing off on your work + +When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. All contributors are asked to sign the Data Designer [Developer Certificate of Origin (DCO)](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/DCO) when submitting their first pull request. The process is automated by a bot that will comment on the pull request. Our DCO is the same as the Linux Foundation requires its contributors to sign. + +--- + +Thank you for contributing to NeMo Data Designer! Your efforts help make synthetic data generation more accessible and powerful for everyone. 🎨✨ diff --git a/fern/v0.5.0/pages/devnotes/deep-research-trajectories.mdx b/fern/v0.5.0/pages/devnotes/deep-research-trajectories.mdx new file mode 100644 index 00000000..0b0d2df6 --- /dev/null +++ b/fern/v0.5.0/pages/devnotes/deep-research-trajectories.mdx @@ -0,0 +1,910 @@ +--- +title: "Deep Research Trajectories with NeMo Data Designer and MCP Tool Use" +description: Using MCP tool-use support to generate multi-turn research trajectories for training deep research agents. +--- + +Data Designer v0.5.0's MCP [tool-use support](/docs/concepts/tool-use-and-mcp) lets you generate multi-turn research trajectories, the kind of data needed to train deep research agents that iteratively search, read, and synthesize evidence before answering a question. + +--- + +Deep research agents like [OpenResearcher](https://github.com/TIGER-AI-Lab/OpenResearcher) (Li, Jiang, Ma et al., 2026) and [Universal Deep Research](https://arxiv.org/abs/2509.00244) (Belcak & Molchanov, 2025) generate long reasoning chains interleaved with tool calls: formulating queries, retrieving documents, reading passages, refining hypotheses, and eventually synthesizing an answer. Training these agents requires trajectory data capturing the full multi-turn interaction between a model and its tools: every search, every document opened, every dead end explored. + +OpenResearcher demonstrated something worth paying attention to: synthetic trajectories generated against a *local* retriever ([BM25](https://dl.acm.org/doi/abs/10.1561/1500000019) over a static corpus, no web APIs) are sufficient to train [Nemotron Nano 3](https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16) to outperform GPT-4.1 on deep research benchmarks. The data format (complete tool-use traces showing how a model moves through an information space) matters more than model scale. Nemotron Nano 3, with only 3B active parameters, beats models orders of magnitude larger on multi-hop research tasks. + +This post shows how to generate that same kind of training data using Data Designer's MCP tool-use capabilities. We build a retriever as an MCP server, construct a corpus with known-good evidence, run a teacher model through the full research process, and use an LLM judge for rejection sampling. The result is a pipeline that produces high-quality research trajectories you can use for supervised fine-tuning or as a starting point for RL. + + + +Here's what one of those trajectories looks like, a 4-hop question answered correctly by Claude Opus 4.5 using the pipeline described below. Each line is a tool call; parallel calls within the same turn are grouped. + +**Q:** Who was the mother of the person under whom the colonizer of the winner of a border war with Libya in the 1st century BC reached its greatest extent? + +**Reference:** Marcia + +**Trajectory (simplified):** + +| Turn | Tool Calls | +|------|------------| +| T1 | `search("border war Libya 1st century BC")` | +| T2 | `search("ancient Egypt Libya war conflict 1st century BC")`, `search("Cyrene Libya ancient war border")` | +| T3 | `search("Ptolemaic Egypt Cyrenaica war conflict")`, `search("Egypt border war ancient 1st century BC winner")` | +| T4 | `open(md_001400)` | +| T5 | `search("Egypt Marmarica border war ancient Roman")`, `search("Marmaric War Egypt Libya ancient")` | +| T6 | `search("Greek colony Libya border war Cyrene colonization")`, `search("Thera Cyrene colony Greek colonization ancient")` | +| T7 | `open(md_000167)` | +| T8 | `search("1st century BC war border Libya Egypt conflict")`, `search("Chad Libya border war Aouzou Strip conflict")` | +| T9 | `open(md_001676)` ⭐ (golden passage) | +| T10 | `search("Roman Empire war Libya Numidia Africa ancient")`, `search("Jugurthine War Numidia Rome Libya")` | +| T11 | `open(fw_021746)` | +| T12 | `search("Egypt ancient war tribes Libya Marmaridae border conflict")`, `search("Cornelius Gallus Egypt prefect war border")` | +| T13 | `open(md_001054)` ⭐ (golden passage) | +| T14 | `search("Roman Empire greatest extent Trajan mother")`, `search("Trajan Roman Empire greatest territorial extent")` | +| T15 | `open(md_002075)` ⭐, `open(md_002510)` ⭐ (golden passages) | +| T16 | `search("Egypt Roman province colony Augustus annexed")`, `search("Trajan mother Marcia family")` | +| ... | *(additional refinement searches)* | +| T31 | **Answer:** Based on my extensive research, I have gathered the following evidence: 1. The Roman Empire reached its greatest territorial extent under Trajan (117 CE). 2. Trajan's mother was from the Gens Marcia. 3. Egypt was colonized/annexed by Rome after Cleopatra's death (30 BC). 4. Egypt had border conflicts with Libya in the 1st century BC. **The answer is Marcia.** | + +⭐ = golden passage (contains evidence for the answer) + + + +--- + +## Step 1: Building the Retrieval MCP Server + +OpenResearcher's key design choice is a three-tool browser interface rather than a single retrieval call. The paper argues (and their ablations confirm) that separating search, document opening, and in-document search forces the model to develop genuine research strategies: skimming results, diving into promising documents, hunting for specific evidence within them. A single monolithic "retrieve" tool collapses this entire workflow into one step, which produces shorter and less useful training trajectories. + +We implement the same three tools as an MCP server that Data Designer can invoke during generation. Our retriever uses [BM25S](https://github.com/xhluca/bm25s) for fast lexical search over the corpus: + +```python +from mcp.server.fastmcp import FastMCP + +mcp_server = FastMCP("corpus-retriever") + +@mcp_server.tool() +def search(query: str, top_k: int = 10) -> dict: + """Search for candidate documents to explore.""" + # BM25S search over the corpus, returns ranked results with snippets + ... + +@mcp_server.tool(name="open") +def open_document(doc_id: str) -> dict: + """Open a document for detailed inspection with cursor-numbered chunks.""" + # Returns content formatted as [1] paragraph... [2] paragraph... + ... + +@mcp_server.tool() +def find(doc_id: str, query: str) -> dict: + """Find matching passages inside a document by keyword.""" + # Returns matching chunks with cursor positions + ... + +if __name__ == "__main__": + mcp_server.run() +``` + +- **`search`** returns a ranked list of document IDs with short snippets, enough for the model to decide which documents look promising. +- **`open`** returns the full document content, split into cursor-numbered chunks so the model can reference specific passages. +- **`find`** does targeted keyword search *within* a single document, letting the model locate specific evidence without reading the entire thing. + +The cursor-based chunking across `open` and `find` gives the model a way to scan long documents incrementally, the way a human researcher would scan a paper for the relevant section rather than reading it cover to cover. + +The server runs as a local stdio process, which means Data Designer launches and manages it automatically. No external services, no API keys for retrieval, no rate limits. + +--- + +## Step 2: Building the Corpus + +The corpus design follows directly from OpenResearcher's most striking ablation result. They tested what happens when you vary the retrieval corpus while keeping the reasoning model fixed (GPT-OSS-120B). The results, from the [OpenResearcher Appendix](https://boiled-honeycup-4c7.notion.site/Appendix-301e290627b58082abffd1ea2c262eb2): + +| Corpus | BrowseComp-Plus Accuracy | +| :---- | :----: | +| Golden passages only (BrowseComp-Plus corpus) | 56.0% | +| 15M FineWeb + golden passages | 31.2% | +| 15M FineWeb only | 0.71% | + +Without golden passages (documents known to contain evidence for the question), accuracy drops to nearly zero. The model can't learn research strategies from trajectories where every search is a dead end. + +The original OpenResearcher corpus uses 15M documents from [FineWeb](https://huggingface.co/datasets/HuggingFaceFW/fineweb) as distractors alongside 10K golden passages. For this demonstration, we use a lighter-weight approach: we construct the corpus from multi-hop QA datasets: [HotpotQA](https://arxiv.org/abs/1809.09600) (2-hop questions requiring two pieces of linked evidence) and [MuSiQue](https://arxiv.org/abs/2108.00573) (2-4 hop questions composed from single-hop sub-questions). Each question comes with annotated supporting passages, the specific paragraphs that contain the evidence needed to answer it. Golden passages go into the corpus alongside non-supporting passages from the same datasets as distractors, at roughly a 1:9 ratio. The model has to search through noise to find the signal, which is exactly the skill we want the training data to teach. + +The key constraint is that golden passages must be *findable but not obvious*. If the corpus is too small or the golden passages are too easy to identify, the trajectories won't transfer to real-world research where evidence is sparse. The distractor ratio controls this difficulty, and the paper's ablations give us a good starting point for tuning it. + +--- + +## Step 3: The Data Designer Pipeline + +With the retriever server and corpus ready, the Data Designer pipeline ties everything together. We configure a teacher model, point it at the MCP retriever, and let it research each question from scratch. For this demo we hosted our own inference server, but anyone can try this pipeline using [Nemotron Nano 3 on build.nvidia.com](https://build.nvidia.com/nvidia/nemotron-3-nano-30b-a3b) with a free API key using the model configuration shown below. + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Search rollout model for trajectory generation +config = dd.DataDesignerConfigBuilder() +config.add_model_config( + dd.ModelConfig( + alias="search_rollout_model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=1.0, + top_p=0.95, + max_tokens=16384, + ), + ) +) +``` + +The temperature and top_p settings matter here. We want diverse research strategies across seeds (different query formulations, different document exploration orders) so that rejection sampling has a rich pool to select from. Setting temperature to 1.0 with top_p at 0.95 gives enough variation that the same question can produce meaningfully different trajectories across seeds. + +The MCP tool configuration tells Data Designer which server to use and how many tool-call turns to allow: + +```python +# MCP retriever tool configuration +tool_config = dd.ToolConfig( + tool_alias="knowledge-base", + providers=["corpus-retriever"], + max_tool_call_turns=150, +) +config.add_tool_config(tool_config) +``` + +We set `max_tool_call_turns` high (150) because deep research trajectories can be long. Our longest observed trajectory used 25 tool calls across 53 messages. Capping too low would truncate the most interesting research chains. + +The seed dataset contains the research questions alongside reference answers (which we'll use for rejection sampling in Step 4): + +```python +config.with_seed_dataset( + dd.LocalFileSeedSource(path="questions.jsonl"), +) + +config.add_column( + dd.ExpressionColumnConfig( + name="research_question", + expr="{{ question }}", + ) +) +``` + +The core of the pipeline is the research column, where the teacher model receives a question and a system prompt instructing it to use the retriever tools: + +```python +SYSTEM_PROMPT = """You are a thorough research assistant. You have access to three tools \ +for navigating a knowledge base: +- search(query, top_k): Find candidate documents relevant to your query +- open(doc_id): Open a document to read its full content in numbered chunks +- find(doc_id, query): Locate specific passages within a document by keyword + +Your task is to research the given question by searching for relevant documents, \ +reading their content, and synthesizing an answer from the evidence you find. \ +Be systematic: formulate search queries, explore promising results, and gather \ +evidence before answering. Cite specific passages when possible.""" + +config.add_column( + dd.LLMTextColumnConfig( + name="research_answer", + prompt="Research and answer thoroughly:\n\n{{ research_question }}", + model_alias="search_rollout_model", + system_prompt=SYSTEM_PROMPT, + tool_alias="knowledge-base", + with_trace=dd.TraceType.ALL_MESSAGES, + extract_reasoning_content=True, + ) +) +``` + +Two settings are doing the important work here: + +- **`with_trace=dd.TraceType.ALL_MESSAGES`** captures the *entire* interaction (every tool call, every tool response, every intermediate reasoning step) into a separate trace column in ChatML format. This is the training data: the full trajectory of how the model moved through the information space. +- **`extract_reasoning_content=True`** pulls out the model's internal chain-of-thought separately, so you can include or exclude it depending on your training setup. + +--- + +## Step 4: Rejection Sampling with an LLM Judge + +Not every trajectory leads to a correct answer. OpenResearcher's approach is straightforward. Generate multiple trajectories per question, score them for correctness, and keep only the ones that got the right answer. We implement this with Data Designer's `LLMJudgeColumnConfig`, using a separate (smaller) model as the judge: + +```python +# Judge model for rejection sampling +config.add_model_config( + dd.ModelConfig( + alias="judge", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + ) +) + +config.add_column( + dd.LLMJudgeColumnConfig( + name="correctness", + model_alias="judge", + prompt=( + "Question: {{ research_question }}\n" + "Reference answer: {{ answer }}\n" + "Generated answer: {{ research_answer }}\n" + "Does the generated answer correctly address the question?" + ), + scores=[ + dd.Score( + name="correct", + description="Is the answer factually correct?", + options={ + 1: "Correct", + 0: "Incorrect", + }, + ), + ], + ) +) +``` + +The judge compares the generated answer against the reference answer from the seed dataset. Using a smaller model as judge is deliberate. We don't need the judge to *reason* about the question, just to compare two answers for factual agreement. This keeps costs down when scoring thousands of trajectories. + +In practice, you'd generate multiple trajectories per question (varying the random seed) and filter to `correctness.correct == 1`. The incorrect trajectories aren't wasted; they can serve as negative examples for preference-based training methods like DPO. + +--- + +## Multi-Turn Tool Calling: Rough Edges in the Open Model Ecosystem + +The pipeline described above is straightforward in principle. In practice, getting multi-turn tool calling to work reliably with open-weight models served through vLLM turned out to be the hardest part of this project. + +We tested two open-weight models on a self-hosted [vLLM (v0.15.1)](https://github.com/vllm-project/vllm/releases/tag/v0.15.1) instance: [GPT-OSS-120B](https://huggingface.co/openai/gpt-oss-120b) and [Kimi K2.5](https://huggingface.co/moonshotai/Kimi-K2.5). Both failed to produce usable research trajectories, for related but distinct reasons. + + +**GPT-OSS-120B** uses a "Harmony" output format that routes text through named channels (reasoning, final answer, tool calls). When tools are involved, vLLM's parser consistently routes the model's output to the wrong channel: everything lands in `reasoning_content` while the `content` field stays empty. This happens at all `reasoning_effort` levels. + +**Kimi K2.5** exhibits a different failure mode. With thinking mode enabled, it has the same channel-routing problem. With thinking mode disabled, the model produces content text, but after the first tool result, it *narrates* what it plans to do next rather than emitting another tool call. + + +The original OpenResearcher codebase handles this by bypassing vLLM's tool call parser entirely. They hit the raw `/completions` endpoint, parse `` XML tags from the output with regex, and continue looping until the model emits an explicit answer marker. + +The open-source tool-calling stack is growing and maturing quickly, but multi-turn tool use with reasoning models is still a rough edge. For now, the practical path is to use models with battle-tested tool-calling support through their native APIs, which is what we do in the results below. + +--- + +## Results + +We ran 64 questions uniformly sampled across 2, 3, and 4-hop difficulty levels from MuSiQue, with 50K FineWeb web documents as distractors (a 1:100 golden-to-distractor ratio). We tested two models, Claude Opus 4.5 (via API) and Nemotron Nano 3 (30B total / 3B active params, self-hosted via vLLM with reasoning disabled). + +| | Claude Opus 4.5 | Nemotron Nano 3 | +| :---- | :----: | :----: | +| **Samples** | 64 (55 completed) | 64 (61 completed) | +| **Overall accuracy** | 41/55 (75%) | 32/61 (52%) | +| **2-hop accuracy** | 18/23 (78%) | 13/23 (57%) | +| **3-hop accuracy** | 15/18 (83%) | 11/22 (50%) | +| **4-hop accuracy** | 8/14 (57%) | 8/16 (50%) | +| **Avg tool calls** | 16.8 | 11.8 | +| **Max tool calls** | 57 | 63 | +| **Avg messages per trajectory** | 40.4 | 26.5 | +| **Max messages per trajectory** | 117 | 129 | + +Opus is 22 points more accurate, but Nano runs roughly 5x faster on self-hosted hardware. Both models show tool usage scaling with hop count. Nano uses fewer tools but achieves lower accuracy, with the largest gap on 2-hop questions (78% vs 57%). Splitting by correctness reveals the same pattern in both models: incorrect trajectories are longer. + + + +**Claude Opus 4.5:** + +| Outcome | Hops | Count | Avg Tool Calls | Avg Messages | Avg Answer Length | +| :---- | :----: | :----: | :----: | :----: | :----: | +| **Correct** | 2 | 18 | 7.3 | 18.9 | 1,072 chars | +| | 3 | 15 | 14.9 | 35.7 | 1,372 chars | +| | 4 | 8 | 21.0 | 50.6 | 1,705 chars | +| | **All** | **41** | **12.8** | **31.2** | **1,305 chars** | +| **Incorrect** | 2 | 5 | 21.0 | 48.6 | 1,534 chars | +| | 3 | 3 | 25.7 | 63.0 | 1,795 chars | +| | 4 | 6 | 36.0 | 85.2 | 1,903 chars | +| | **All** | **14** | **28.4** | **67.4** | **1,748 chars** | + +**Nemotron Nano 3:** + +| Outcome | Hops | Count | Avg Tool Calls | Avg Messages | Avg Answer Length | +| :---- | :----: | :----: | :----: | :----: | :----: | +| **Correct** | 2 | 13 | 6.5 | 16.1 | 773 chars | +| | 3 | 11 | 12.7 | 28.5 | 708 chars | +| | 4 | 8 | 8.0 | 19.0 | 1,600 chars | +| | **All** | **32** | **9.0** | **21.1** | **957 chars** | +| **Incorrect** | 2 | 10 | 10.1 | 23.2 | 799 chars | +| | 3 | 11 | 18.0 | 39.0 | 1,163 chars | +| | 4 | 8 | 16.2 | 35.5 | 848 chars | +| | **All** | **29** | **14.8** | **32.6** | **951 chars** | + + + +Correct trajectories are shorter at every hop level for both models. Incorrect trajectories are roughly twice as long because the model keeps searching when it can't find evidence, then writes a longer answer to compensate. This anti-correlation between trajectory length and correctness is consistent across model scales, which means trajectory length alone could serve as a lightweight filter during rejection sampling. + +--- + +## Closing Remarks + +Thanks to the [OpenResearcher](https://github.com/TIGER-AI-Lab/OpenResearcher) team for their work showing that synthetic research trajectories over local retrieval can train small models to compete with much larger ones. Their results suggest we're only beginning to understand how LLMs interact with search tools and how the structure of those interactions shapes what models learn. We're excited to see where the community takes synthetic data research using [NeMo Data Designer](https://github.com/NVIDIA-NeMo/DataDesigner) as both the models and the tooling continue to improve. + +--- + +## Try For Yourself + + + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Models +config = dd.DataDesignerConfigBuilder() +config.add_model_config( + dd.ModelConfig( + alias="search_rollout_model", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=1.0, + top_p=0.95, + max_tokens=16384, + ), + ) +) +config.add_model_config( + dd.ModelConfig( + alias="judge", + model="nvidia/nemotron-3-nano-30b-a3b", + provider="nvidia", + ) +) + +# MCP retriever +tool_config = dd.ToolConfig( + tool_alias="knowledge-base", + providers=["corpus-retriever"], + max_tool_call_turns=150, +) +config.add_tool_config(tool_config) + +# Seed questions with reference answers +config.with_seed_dataset( + dd.LocalFileSeedSource(path="questions.jsonl"), +) + +config.add_column( + dd.ExpressionColumnConfig( + name="research_question", + expr="{{ question }}", + ) +) + +# Research trajectory generation +config.add_column( + dd.LLMTextColumnConfig( + name="research_answer", + prompt="Research and answer thoroughly:\n\n{{ research_question }}", + model_alias="search_rollout_model", + system_prompt=SYSTEM_PROMPT, + tool_alias="knowledge-base", + with_trace=dd.TraceType.ALL_MESSAGES, + extract_reasoning_content=True, + ) +) + +# Rejection sampling judge +config.add_column( + dd.LLMJudgeColumnConfig( + name="correctness", + model_alias="judge", + prompt=( + "Question: {{ research_question }}\n" + "Reference answer: {{ answer }}\n" + "Generated answer: {{ research_answer }}\n" + "Does the generated answer correctly address the question?" + ), + scores=[ + dd.Score( + name="correct", + description="Is the answer factually correct?", + options={ + 1: "Correct", + 0: "Incorrect", + }, + ), + ], + ) +) + +# Run +mcp_provider = dd.LocalStdioMCPProvider( + name="corpus-retriever", + command="uv", + args=["run", "retriever_mcp.py", "serve"], + env={"CORPUS_PATH": "corpus.jsonl"}, +) +data_designer = DataDesigner(mcp_providers=[mcp_provider]) +results = data_designer.create( + config_builder=config, + num_records=1000, + dataset_name="research-trajectories", +) +``` + + + + + +```python +# /// script +# requires-python = ">=3.10" +# dependencies = ["datasets", "huggingface_hub", "pyarrow"] +# /// + +"""Prepare a retrieval corpus and question set for the OpenResearcher demo. + +Builds corpus.jsonl and questions.jsonl from two sources: + + 1. MuSiQue — multi-hop QA dataset (2/3/4-hop) with golden passages + 2. FineWeb — web documents as distractors (matches the OpenResearcher paper) + +Golden passages (documents containing evidence for the answer) are mixed with +FineWeb distractors at roughly 1:100 ratio, so the model must search through +noise to find the signal. + +Usage: + uv run prepare_corpus.py +""" + +from __future__ import annotations + +import json +import random +from pathlib import Path +from urllib.parse import urlparse + + +# --------------------------------------------------------------------------- +# Configuration +# --------------------------------------------------------------------------- + +NUM_QUESTIONS = 192 # 64 per hop level (2, 3, 4) +NUM_FINEWEB_DISTRACTORS = 50_000 +FINEWEB_SHARD = 0 +OUTPUT_DIR = "data" + + +# --------------------------------------------------------------------------- +# MuSiQue extraction +# --------------------------------------------------------------------------- + +def prepare_musique(num_questions: int) -> tuple[list[dict], list[dict]]: + """Load MuSiQue and extract multi-hop questions with golden passages. + + Samples uniformly across hop counts (2, 3, 4) so the dataset has balanced + difficulty. Golden passages (is_supporting=True) go into the corpus; + non-golden passages from the same examples serve as additional distractors. + + Returns: + (questions, corpus_docs) where corpus_docs have is_golden=True/False. + """ + from datasets import load_dataset + + print("Loading MuSiQue (train split)...") + dataset = load_dataset("bdsaglam/musique", split="train") + + # Bucket answerable examples by hop count + hop_buckets: dict[int, list[dict]] = {} + for example in dataset: + if not example.get("answerable", False): + continue + num_hops = len(example.get("question_decomposition", [])) + if num_hops < 2: + continue + hop_buckets.setdefault(num_hops, []).append(example) + + # Sample uniformly: equal questions per hop level + available_hops = sorted(hop_buckets.keys()) + per_hop = num_questions // len(available_hops) + selected_examples = [] + for h in available_hops: + bucket = hop_buckets[h] + n = min(per_hop, len(bucket)) + selected_examples.extend(random.sample(bucket, n)) + + print(f" Selected {len(selected_examples)} questions across hops {available_hops}") + + # Build questions and corpus docs + questions: list[dict] = [] + golden_titles: dict[str, str] = {} + nongolden_titles: dict[str, str] = {} + + for example in selected_examples: + num_hops = len(example["question_decomposition"]) + questions.append({ + "id": f"mq_{len(questions):06d}", + "question": example["question"], + "answer": example["answer"], + "source": "musique", + "num_hops": num_hops, + "seed_id": 0, + }) + + for para in example.get("paragraphs", []): + title = para.get("title", "").strip() + content = para.get("paragraph_text", "").strip() + if not title or not content: + continue + if para.get("is_supporting", False): + if len(content) > len(golden_titles.get(title, "")): + golden_titles[title] = content + else: + if len(content) > len(nongolden_titles.get(title, "")): + nongolden_titles[title] = content + + # Golden passages + corpus_docs = [ + {"title": t, "content": c, "source": "musique", "is_golden": True} + for t, c in sorted(golden_titles.items()) + ] + # Non-golden passages (skip titles already in golden set) + corpus_docs.extend( + {"title": t, "content": c, "source": "musique", "is_golden": False} + for t, c in sorted(nongolden_titles.items()) + if t not in golden_titles + ) + + print(f" Golden passages: {len(golden_titles)}") + print(f" Non-golden passages: {len(corpus_docs) - len(golden_titles)}") + return questions, corpus_docs + + +# --------------------------------------------------------------------------- +# FineWeb distractor caching +# --------------------------------------------------------------------------- + +def cache_fineweb(shard_index: int, max_docs: int) -> list[dict]: + """Download a FineWeb parquet shard and extract English documents. + + Uses huggingface_hub for direct shard download (faster than load_dataset) + and pyarrow for memory-efficient row-group-at-a-time reading. + + Returns: + List of distractor documents with title (domain) and content (text). + """ + from huggingface_hub import hf_hub_download + import pyarrow.parquet as pq + + filename = f"sample/10BT/{shard_index:03d}_00000.parquet" + print(f"Downloading FineWeb shard: {filename}") + parquet_path = hf_hub_download( + repo_id="HuggingFaceFW/fineweb", + repo_type="dataset", + filename=filename, + ) + + pf = pq.ParquetFile(parquet_path) + print(f" {pf.metadata.num_rows:,} rows in shard") + + docs: list[dict] = [] + for rg_idx in range(pf.metadata.num_row_groups): + table = pf.read_row_group(rg_idx, columns=["text", "url", "language", "token_count"]) + batch = table.to_pydict() + + for text, url, lang, tok_count in zip( + batch["text"], batch["url"], batch["language"], batch["token_count"] + ): + if lang != "en" or tok_count < 50: + continue + text = text.strip() + if not text: + continue + + # Use domain as title + try: + domain = urlparse(url).netloc.removeprefix("www.") + except Exception: + domain = "unknown" + + docs.append({ + "title": domain, + "content": text, + "source": "fineweb", + "is_golden": False, + }) + if len(docs) >= max_docs: + break + + if len(docs) >= max_docs: + break + + print(f" Extracted {len(docs):,} English documents (min 50 tokens)") + return docs + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main() -> None: + output_dir = Path(OUTPUT_DIR) + output_dir.mkdir(parents=True, exist_ok=True) + + # Extract MuSiQue questions and golden passages + questions, corpus_docs = prepare_musique(NUM_QUESTIONS) + + # Download FineWeb distractors + fineweb_docs = cache_fineweb(FINEWEB_SHARD, NUM_FINEWEB_DISTRACTORS) + corpus_docs.extend(fineweb_docs) + + # Deduplicate by title (keep longest content) + title_to_best: dict[str, dict] = {} + for doc in corpus_docs: + title = doc["title"] + if title not in title_to_best or len(doc["content"]) > len(title_to_best[title]["content"]): + title_to_best[title] = doc + + corpus = list(title_to_best.values()) + random.shuffle(corpus) + + # Assign stable IDs + prefix_map = {"musique": "md", "fineweb": "fw"} + source_counters: dict[str, int] = {} + for doc in corpus: + prefix = prefix_map.get(doc["source"], "xx") + idx = source_counters.get(doc["source"], 0) + doc["id"] = f"{prefix}_{idx:06d}" + source_counters[doc["source"]] = idx + 1 + + # Write corpus.jsonl + corpus_path = output_dir / "corpus.jsonl" + with open(corpus_path, "w") as f: + for doc in corpus: + f.write(json.dumps(doc, ensure_ascii=False) + "\n") + + # Write questions.jsonl + random.shuffle(questions) + questions_path = output_dir / "questions.jsonl" + with open(questions_path, "w") as f: + for q in questions: + f.write(json.dumps(q, ensure_ascii=False) + "\n") + + # Summary + golden = sum(1 for d in corpus if d["is_golden"]) + nongolden = len(corpus) - golden + print(f"\nCorpus: {len(corpus):,} docs ({golden} golden, {nongolden} distractors)") + print(f"Questions: {len(questions)}") + print(f"Output: {corpus_path.resolve()}") + print(f" {questions_path.resolve()}") + + +if __name__ == "__main__": + main() +``` + + + + + +```python +# /// script +# requires-python = ">=3.10" +# dependencies = ["mcp", "bm25s", "PyStemmer"] +# /// + +"""MCP Server: BM25S Corpus Retriever for OpenResearcher-style Deep Research + +A single-file MCP server that indexes a JSONL corpus and exposes BM25S +lexical search via three browser tools: + + - search(query, top_k): ranked document discovery + - open(doc_id): full document inspection with cursor-numbered chunks + - find(doc_id, query): in-document evidence lookup + +Corpus format (JSONL, one document per line): + {"id": "wiki_123", "title": "Christopher Nolan", "content": "Christopher Edward Nolan is a..."} + +Server mode (used by Data Designer): + CORPUS_PATH=corpus.jsonl uv run retriever_mcp.py serve +""" + +from __future__ import annotations + +import argparse +import json +import os +import re +import sys + +import bm25s +from mcp.server.fastmcp import FastMCP + +MCP_SERVER_NAME = "corpus-retriever" + +# Global state — populated at server startup +_bm25_retriever: bm25s.BM25 | None = None +_corpus: list[dict[str, str]] = [] +_id_to_index: dict[str, int] = {} + +mcp_server = FastMCP(MCP_SERVER_NAME) + + +def load_corpus(corpus_path: str) -> list[dict[str, str]]: + """Load a JSONL corpus file into a list of document dicts.""" + docs: list[dict[str, str]] = [] + with open(corpus_path, "r", encoding="utf-8") as f: + for line_num, line in enumerate(f, 1): + line = line.strip() + if not line: + continue + try: + doc = json.loads(line) + except json.JSONDecodeError as e: + print(f"Warning: skipping malformed JSON at line {line_num}: {e}", file=sys.stderr) + continue + if "id" not in doc or "content" not in doc: + print(f"Warning: skipping line {line_num}, missing 'id' or 'content'", file=sys.stderr) + continue + docs.append({ + "id": str(doc["id"]), + "title": str(doc.get("title", "")), + "content": str(doc["content"]), + }) + return docs + + +def build_index(docs: list[dict[str, str]]) -> bm25s.BM25: + """Build a BM25S index over title + content for each document.""" + corpus_texts = [f"{d['title']} {d['content']}" for d in docs] + corpus_tokens = bm25s.tokenize(corpus_texts, stopwords="en") + retriever = bm25s.BM25() + retriever.index(corpus_tokens) + return retriever + + +def initialize(corpus_path: str) -> None: + """Load corpus and build index into global state.""" + global _bm25_retriever, _corpus, _id_to_index + print(f"Loading corpus from {corpus_path}...", file=sys.stderr) + _corpus = load_corpus(corpus_path) + if not _corpus: + print("Warning: corpus is empty", file=sys.stderr) + return + _id_to_index = {doc["id"]: idx for idx, doc in enumerate(_corpus)} + print(f"Building BM25S index over {len(_corpus)} documents...", file=sys.stderr) + _bm25_retriever = build_index(_corpus) + print(f"Index ready. {len(_corpus)} documents indexed.", file=sys.stderr) + + +def _chunk_content(content: str) -> list[str]: + """Split document content into cursor-addressable chunks.""" + paragraph_chunks = [c.strip() for c in re.split(r"\n\s*\n+", content) if c.strip()] + if len(paragraph_chunks) > 1: + return paragraph_chunks + line_chunks = [line.strip() for line in content.splitlines() if line.strip()] + if line_chunks: + return line_chunks + stripped = content.strip() + return [stripped] if stripped else [] + + +@mcp_server.tool() +def search(query: str, top_k: int = 10) -> dict: + """Search for candidate documents to explore. + + Args: + query: Search query string. + top_k: Maximum number of ranked results (default: 10). + """ + global _bm25_retriever, _corpus + if _bm25_retriever is None or not _corpus: + return {"error": "Search index not initialized", "results": []} + query_tokens = bm25s.tokenize([query], stopwords="en") + k = max(1, min(top_k, len(_corpus))) + results, scores = _bm25_retriever.retrieve(query_tokens, k=k) + search_results: list[dict] = [] + for i in range(results.shape[1]): + doc_idx = results[0, i] + score = float(scores[0, i]) + if score <= 0: + continue + doc = _corpus[doc_idx] + snippet = doc["content"][:500] + if len(doc["content"]) > 500: + snippet += "..." + search_results.append({ + "id": doc["id"], + "title": doc["title"], + "snippet": snippet, + "score": round(score, 4), + }) + return {"results": search_results, "query": query, "total": len(search_results)} + + +@mcp_server.tool(name="open") +def open_document(doc_id: str) -> dict: + """Open a document for detailed inspection with cursor-numbered chunks. + + Args: + doc_id: The document ID (from search results). + """ + global _corpus, _id_to_index + if not _corpus: + return {"error": "Corpus not loaded"} + idx = _id_to_index.get(doc_id) + if idx is None: + return {"error": f"Document not found: {doc_id}"} + doc = _corpus[idx] + chunks = _chunk_content(doc["content"]) + numbered_chunks = [{"cursor": i + 1, "text": chunk} for i, chunk in enumerate(chunks)] + formatted = "\n".join(f"[{e['cursor']}] {e['text']}" for e in numbered_chunks) + return { + "id": doc["id"], + "title": doc["title"], + "content": formatted, + "chunks": numbered_chunks, + "total_chunks": len(numbered_chunks), + } + + +@mcp_server.tool() +def find(doc_id: str, query: str) -> dict: + """Find matching passages inside a document by keyword. + + Args: + doc_id: Document ID to search within. + query: Text to find (case-insensitive substring and keyword matching). + """ + global _corpus, _id_to_index + if not _corpus: + return {"error": "Corpus not loaded", "matches": []} + idx = _id_to_index.get(doc_id) + if idx is None: + return {"error": f"Document not found: {doc_id}", "matches": []} + query_text = query.strip().lower() + if not query_text: + return {"error": "Query must be non-empty", "matches": []} + doc = _corpus[idx] + chunks = _chunk_content(doc["content"]) + query_terms = [term for term in re.findall(r"\w+", query_text) if term] + matches: list[dict] = [] + for i, chunk in enumerate(chunks, start=1): + haystack = chunk.lower() + if query_text in haystack or (query_terms and all(t in haystack for t in query_terms)): + matches.append({"cursor": i, "text": chunk}) + return { + "doc_id": doc["id"], + "title": doc["title"], + "query": query, + "matches": matches, + "total_matches": len(matches), + } + + +def serve() -> None: + """Run as MCP server subprocess (called by Data Designer).""" + corpus_path = os.environ.get("CORPUS_PATH", "corpus.jsonl") + initialize(corpus_path) + mcp_server.run() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="BM25S corpus retriever MCP server") + subparsers = parser.add_subparsers(dest="command") + subparsers.add_parser("serve", help="Run the MCP server (reads CORPUS_PATH from env)") + stats_parser = subparsers.add_parser("stats", help="Print corpus statistics") + stats_parser.add_argument("--corpus-path", default="corpus.jsonl") + args = parser.parse_args() + if args.command == "serve": + serve() + elif args.command == "stats": + docs = load_corpus(args.corpus_path) + total_chars = sum(len(d["content"]) for d in docs) + print(f"Corpus: {args.corpus_path}") + print(f"Documents: {len(docs)}") + print(f"Total content: {total_chars:,} chars (~{total_chars // 4:,} tokens)") + else: + parser.print_help() +``` + + + +## See Also + +- [NeMo Data Designer on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner) +- [OpenResearcher on GitHub](https://github.com/TIGER-AI-Lab/OpenResearcher) +- [OpenResearcher blog post](https://boiled-honeycup-4c7.notion.site/OpenResearcher-A-Fully-Open-Pipeline-for-Long-Horizon-Deep-Research-Trajectory-Synthesis-2f7e290627b5800cb3a0cd7e8d6ec0ea) +- [HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering](https://arxiv.org/abs/1809.09600) +- [MuSiQue: Multi-hop Questions via Single-hop Question Composition](https://arxiv.org/abs/2108.00573) +- [BM25S: Fast lexical search in Python](https://github.com/xhluca/bm25s) diff --git a/fern/v0.5.0/pages/devnotes/design-principles.mdx b/fern/v0.5.0/pages/devnotes/design-principles.mdx new file mode 100644 index 00000000..d6b4cf6a --- /dev/null +++ b/fern/v0.5.0/pages/devnotes/design-principles.mdx @@ -0,0 +1,149 @@ +--- +title: "Designing Data Designer: Why SDG Is a Systems Problem" +description: The design principles behind NeMo Data Designer and why we built it as a composable orchestration framework. +--- + +Synthetic data generation is more than a single prompt to a large language model. In this post, we walk through the design principles behind NeMo Data Designer and explain why we built it as a composable orchestration framework - treating SDG as a system of specialized stages rather than a monolithic generation task. + +![Designing Data Designer: from monolithic prompts to composable pipelines](/assets/images/design-principles-hero.png) + +--- + +When people first encounter synthetic data generation, the instinct is natural: write a detailed prompt, call a powerful LLM, and collect the output. For quick experiments and small datasets, this works fine. But as you push toward production-quality data at scale - millions of records, multiple interrelated fields, strict quality requirements - the cracks start to show. We built Data Designer around the conviction that SDG is fundamentally a *systems* problem, and the framework's architecture reflects that belief at every level. + +This post isn't about a specific dataset or benchmark result. Instead, we want to pull back the curtain on the design principles that shaped Data Designer itself, and share the reasoning behind the decisions we made. + +--- + +## A Prompt Is Not All You Need + +The most common approach to synthetic data generation looks something like this: pack everything you need into one prompt - the schema, the constraints, the style guidelines, the quality criteria - and send it to the biggest model you have access to. Crank up `max_tokens`, parse the output, and repeat. + +This approach has a ceiling, and you hit it faster than you'd expect. + +**Quality degrades when you overload a single call.** Asking one model to simultaneously generate content, maintain diversity across a dataset, follow a complex schema, and self-assess quality is asking it to solve several distinct problems at once. The model has to allocate its "attention budget" across all of these competing objectives, and something always gives. Usually it's the subtler requirements - diversity narrows, edge cases get dropped, and the data starts looking suspiciously uniform. + +**Prompts become unmaintainable.** As requirements grow, the prompt balloons. What started as a clean paragraph becomes a multi-page document with nested instructions, conditional logic described in natural language, and examples that compete for context window space. At this point, iterating on one aspect of generation (say, adjusting the complexity distribution or tweaking the output schema) means editing a fragile mega-prompt and hoping nothing else breaks. + +**There are no quality gates.** In a single-call setup, validation happens *after* you've already spent the compute. If 30% of your records are malformed or low-quality, you find out at the end and either filter them out (wasting the tokens) or re-generate (wasting even more). There's no mechanism to catch problems between stages, because there *are* no stages. + +**Scaling is limited.** A single model call is a single point of failure. You can parallelize across records, but you can't parallelize across *stages* of generation, and you can't route different parts of the task to models that are better suited for them. + +None of these are problems with LLMs themselves - they're problems with treating SDG as a single-step task. The fix isn't a better model. It's a better architecture. + +--- + +## SDG as a System of Specialized Stages + +The shift in thinking is straightforward: instead of asking one model to do everything, decompose the generation task into a pipeline of focused stages, each responsible for one well-defined job. + +Regardless of what you're generating - QA pairs for retrieval training, reasoning traces for pretraining, multi-turn conversations for alignment, product reviews for testing, or labeled examples for classification - a well-decomposed SDG pipeline typically has four kinds of stages: + +1. **Seed curation.** Control what goes in. Whether you're sampling from an existing corpus, selecting subsets of your data, or generating realistic persona profiles with demographic and personality attributes, the seed data defines the distribution your synthetic data will cover. This is where you control diversity and domain coverage - before any LLM is involved - so that downstream generation stages inherit that diversity naturally through their prompts. + +2. **Staged generation.** Each generation step has a focused job. One stage might extract structured metadata from a document. Another might generate content grounded in that metadata. A third might transform or enrich that content further. Because each stage has a narrow scope, its prompt is simple, its output is predictable, and it's easy to iterate on independently. + +3. **Dependency management.** Later stages build on earlier outputs. A content generation stage needs access to extracted metadata. A formatting stage needs the generated content. These dependencies form a directed acyclic graph (DAG), and the system needs to resolve that graph automatically - so you can focus on defining the stages, not orchestrating them. + +4. **Quality control.** Validation and scoring aren't afterthoughts - they're explicit stages in the pipeline. An LLM judge can evaluate the output of a generation stage and a validator can check structural constraints. Because these run as part of the generation pipeline, you can identify quality issues early and make informed decisions about which records to keep before investing in further downstream processing. + +This decomposition buys you something that a single prompt never can: the ability to reason about, test, and improve each stage independently. + +--- + +## Design Principles Behind Data Designer + +With that framing in mind, here are the principles that guided Data Designer's architecture. + +### Declarative over imperative + +When you define a Data Designer workflow, you describe the structure of the dataset you want - not the execution plan for how to generate it. You declare columns, their types, their prompts or schemas, and the models they should use. The framework handles the rest: resolving dependencies, scheduling execution, managing parallelism, batching requests, and retrying failures. + +This is a deliberate choice. We wanted the configuration to read like a description of the desired *output*, not a script full of API calls and error handling. It makes workflows easier to read, easier to share, and easier to modify - you can swap a model, adjust a prompt, or add a validation stage without rewriting control flow. + +### Columns as composable units + +The core abstraction in Data Designer is the *column*. Each column represents a single field in your dataset, and each column has a well-defined generation strategy: it might be an LLM text generation call, a structured output with a Pydantic schema, an embedding computation, a sampler, a Jinja2 expression that combines other columns, or a quality evaluation from an LLM judge. + +Columns reference each other through Jinja2 templates. When one column's prompt includes `{{ document_artifacts }}`, the framework knows that column depends on the `document_artifacts` column and must run after it. These references are automatically extracted to build a dependency graph, and the framework topologically sorts the graph to determine execution order. You don't write orchestration code - you just write columns, and the DAG emerges from the references between them. + +This composability is what makes it possible to go from a simple two-column workflow to a complex multi-stage pipeline without changing the underlying execution model. + +### Multi-model by design + +Not every stage in a pipeline needs the same model. Extracting structured metadata from a document is a different task than generating creative long-form content, which is a different task than scoring quality, which is a different task than computing embeddings. + +Data Designer treats multi-model orchestration as a first-class concern. Each column can specify its own model alias, and the framework manages model routing, per-model parallelism limits, and usage tracking independently. In practice, this means you can use a large reasoning model for your hardest generation stage, a smaller and faster model for evaluation and scoring, and a dedicated embedding model for semantic representations - all within the same workflow, without writing any routing logic yourself. + +### Quality as a first-class stage + +In Data Designer, quality control isn't a post-processing step you bolt on after generation. Validators and LLM-as-judge evaluations are column types, just like generation columns. They participate in the same dependency graph, run in the same execution engine, and their outputs are available to downstream stages. + +This means you can define a pipeline where a judge evaluates generated records immediately after they're created, and a downstream expression column flags records below a quality threshold - all within a single workflow definition. Quality scores are part of the pipeline, not something you remember to compute afterwards. + +### Extensibility via plugins + +No framework can anticipate every use case. Data Designer's plugin system lets you define custom column generators that work alongside the built-in types. A plugin is a Python class that inherits from the base column generator, packages with a configuration schema, and registers itself through a standard entry point. Once installed, it's indistinguishable from a built-in column type - it participates in dependency resolution, batching, and parallel execution like everything else. + +This is how domain-specific functionality gets added without forking the framework. If your use case requires embedding-based deduplication with FAISS indices and cosine similarity thresholds, for instance, you can build it as a plugin and drop it into any pipeline that needs it. + +--- + +## What This Looks Like in Practice + +These principles apply to any SDG use case. Whether you're generating reasoning traces for pretraining (as in our [RQA dev note](/docs/devnotes/rqa)), multi-turn conversations for alignment tuning, labeled examples for text classification, product reviews for testing a recommendation system, or code-repair pairs for training a coding assistant - the same decomposition applies. You identify the stages, define the columns, declare the dependencies, and let the framework handle execution. + +To make one example concrete, consider a pipeline for generating training data for a retrieval model. The goal is to produce high-quality question-answer pairs grounded in a corpus of documents, with quality scoring. We choose this example because it exercises several stages and model types in a single workflow, but the pattern generalizes to any domain. + +In a single-prompt approach, you'd try to pack all of this into one call: "Given this document, generate diverse QA pairs of varying complexity and only include high-quality ones." The model would do its best, but you'd have limited control over any individual aspect. + +With Data Designer, the same task decomposes into a pipeline of focused stages: + +``` + Seed Documents Seed dataset column ingests documents + │ from local files or HuggingFace + ▼ +┌─────────────────────────┐ +│ Artifact Extraction │ LLM extracts key concepts, entities, +│ │ relationships from each document +└───────────┬─────────────┘ + │ + ▼ +┌─────────────────────────┐ +│ QA Generation │ LLM generates questions & answers grounded +│ │ in the extracted artifacts +└───────────┬─────────────┘ + │ + ▼ +┌─────────────────────────┐ +│ Quality Evaluation │ LLM judge scores each QA pair +│ │ on relevance, accuracy, clarity +└───────────┬─────────────┘ + │ + ▼ + Final Dataset +``` + +Each box is a column. Each one can use a different model. Each one has a focused prompt or algorithm. And because they're declared as columns with explicit dependencies, the framework handles the execution order, the batching, and the parallelism. + +The critical insight - and the one that applies regardless of your use case - is that every stage is independently *configurable*, *testable*, and *replaceable*. Want to try a different model for quality evaluation? Swap the model alias on that column. Want to tighten quality thresholds? Adjust the judge column's scoring rubric. Want to add a new stage that generates hard negatives for contrastive learning? Add a column and declare its dependencies. The rest of the pipeline doesn't change. + +--- + +## Summary + +Synthetic data generation at scale is a systems problem, not just a prompting problem. The design principles behind Data Designer reflect this: + +1. **Declarative over imperative** - describe the dataset you want, not the execution plan +2. **Columns as composable units** - each stage is self-contained, with dependencies resolved automatically via a DAG +3. **Multi-model by design** - match the model to the task, with per-column model routing +4. **Quality as a first-class stage** - validators and judges are part of the pipeline, not afterthoughts +5. **Extensibility via plugins** - add domain-specific logic without forking the framework + +The result is a general-purpose framework where complex, multi-stage generation workflows - whether you're building retrieval training data, reasoning datasets, conversational corpora, or something we haven't imagined yet - are expressed as simple column declarations. The hard problems of orchestration, dependency resolution, batching, and error handling are solved once, in the framework, rather than reimplemented in every project. + +## See Also + +- [NeMo Data Designer on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner) +- [RQA Dev Note: Graduate-Level Science Reasoning Data](/docs/devnotes/rqa) +- [Deep Research Trajectories](/docs/devnotes/deep-research-trajectories) diff --git a/fern/v0.5.0/pages/devnotes/index.mdx b/fern/v0.5.0/pages/devnotes/index.mdx new file mode 100644 index 00000000..cb88a190 --- /dev/null +++ b/fern/v0.5.0/pages/devnotes/index.mdx @@ -0,0 +1,30 @@ +--- +title: Dev Notes +description: In-depth guides, tutorials, and insights about synthetic data generation. +--- + +Welcome to NeMo Data Designer Dev Notes! Here you'll find in-depth technical articles, case studies, and insights about synthetic data generation. + + + + Why SDG is a systems problem and the design principles behind Data Designer's architecture. + + + How we created the RQA dataset to push the boundaries of model reasoning capabilities. + + + Using MCP tool use to generate multi-turn research trajectories for training deep research agents. + + diff --git a/fern/v0.5.0/pages/devnotes/rqa.mdx b/fern/v0.5.0/pages/devnotes/rqa.mdx new file mode 100644 index 00000000..acaf3851 --- /dev/null +++ b/fern/v0.5.0/pages/devnotes/rqa.mdx @@ -0,0 +1,240 @@ +--- +title: "Graduate-Level Science Reasoning Data with NeMo Data Designer" +description: How we created the RQA dataset using Data Designer to push the boundaries of model performance. +--- + +Using NeMo Data Designer, we created the RQA (Reasoning Question-Answer) dataset: a massive collection of graduate-level, reasoning-heavy science samples designed to push the boundaries of model performance. + +![RQA Blog](/assets/images/rqa-blog.png) + +--- + +Inference-time reasoning has transformed LLM capabilities, boosting performance in difficult domains like math and science. While reasoning is introduced in the post-training phase using Reinforcement Learning (RL), it builds on patterns that the model has seen throughout pretraining. In fact, research from NVIDIA has shown that [front-loading examples of reasoning into the pretraining phase](https://research.nvidia.com/labs/adlr/Synergy/) can have a positive, compounding impact on the quality of the final model. When training Nemotron 3 Nano, our goal was to introduce rich and diverse examples of reasoning directly into pretraining, laying the groundwork for reasoning RL in post-training. + +Using NeMo Data Designer, we created the RQA (Reasoning Question-Answer) dataset: a massive collection of graduate-level, reasoning-heavy science samples designed to push the boundaries of model performance. Each sample contains a question, a trace from a reasoning LLM attempting to answer that question, and the final resulting answer. As we'll show in the results, introducing RQA into pretraining **didn't just result in stronger scientific reasoning - it improved math and coding performance as well**. + +This blog post walks you through how we built it, and how you can adapt our approach for your own reasoning-intensive datasets. + +--- + +## Step 1: Curating High-Quality Science Seeds from Essential-Web + +For our reasoning dataset, we knew that both quality and diversity were critical. We wanted to show the model examples of reasoning through difficult scientific problems, and we wanted to make sure that those problems covered as wide a range of scientific domains as possible. Using seed passages from web text was an obvious choice, because it allowed us to use the seed data to control both quality and diversity. + +We started with [Essential-Web](https://arxiv.org/abs/2506.14111), a Common Crawl (web text) dataset where each document has been labelled with respect to both quality and subject. For instance, documents are labelled with an estimated *Education Level*, where *Graduate Level* indicates that the text "requires graduate-level education or domain expertise. Assumes deep background knowledge and specialized training to comprehend". These labels let us rapidly filter down the documents to the highest-quality seeds for our scientific reasoning dataset. + +Starting from the [STEM subset of Essential-Web](https://huggingface.co/datasets/EssentialAI/eai-taxonomy-stem-w-dclm), we filtered to documents that were: + +1. Undergraduate-to-graduate education level +2. Advanced reasoning depth +3. High technical correctness +4. Advanced [Bloom taxonomy levels](https://en.wikipedia.org/wiki/Bloom's_taxonomy) for both cognitive processes (Analyze, Evaluate or Create) and knowledge domains (Conceptual, Procedural or Metacognitive) +5. In the English language and over 1000 characters. + +The resulting subset consisted of roughly 14 million documents, mostly academic. Since many of the documents were very long, we extracted random chunks of fewer than 4096 characters in length. + +Essential-AI also labelled the documents according to the [Free Decimal Correspondence (FDC) code](https://everybodyslibraries.com/about-the-free-decimal-correspondence/#:~:text=What%20is%20the%20Free%20Decimal,group%20of%20subjects%20and%20disciplines.), a public-domain analogue of the Dewey Decimal system. Using the FDC code, we could see that the topics weren't equally balanced across scientific domains; for instance, Medicine & Health was heavily over-represented. Since we planned to generate fewer than 14 million samples in total, we aimed to capture as broad a range of topics as possible in the subset of seeds we used. + +To arrive at a smaller set of seed documents balanced by topic, we used a hierarchical round-robin approach. First, we rotated between selecting seed documents across 8 major domains (Biology, Chemistry, Computer Science, Engineering, Math, Medicine/Health, Physics, and Other). Within each high-level domain, we further rotated between seed documents based on their 3-digit FDC codes; for instance, given a Physics sample with code 535 (*Light*), the next Physics sample might be from code 536 (*Heat*), then 537 (*Electricity*) and so on, ensuring that no single subdomain dominates. We continued the round robin selection at the first and second decimal place of the FDC code, where they existed. + +We tested approaches using both the first 4.5 million and the first 9 million seeds according to the round-robin approach described above. + +--- + +## Step 2: Generating Challenging Questions + +With our seed documents ready, we moved to NeMo Data Designer to design the actual dataset. While the seed documents ground our dataset in the types of advanced scientific topics we're interested in, they don't typically show the *active process* of thinking through a difficult scientific problem; instead, scientific papers usually show the polished end result of advanced reasoning. This is where LLMs come in. + +We first needed examples of the type of tough questions that Nemotron might be asked by a user in the real world. To do this, we used Data Designer to prompt a reasoning-enabled LLM to generate a graduate-level question *inspired by* each seed passage: + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +QUESTION_PROMPT = """ +Task: Generate a highly challenging, graduate-level reasoning question +inspired by the following passage. + +Follow these instructions: +1. The text serves only as inspiration for the question. You *must not* + reference the text directly in any way. +2. The question should be appropriate for an advanced graduate-level exam + in a course specialized in this topic. +3. Ensure that the question requires higher-order reasoning beyond simple + recall, such as mathematical reasoning, quantitative analysis, or synthesis. +4. Tag the question with "Question:". + +Text: +{{ seed_passage }} + +Question: [question] +""" + +# Configure the workflow with a reasoning-enabled model +config = dd.DataDesignerConfigBuilder(model_configs=[ + dd.ModelConfig( + alias="reasoning-model", + model="qwen/qwen3-235b-a22b", + provider="nvidia", + ), +]) + +config.with_seed_dataset( + dd.LocalFileSeedSource(path="path/to/seed_data.parquet"), + sampling_strategy=dd.SamplingStrategy.SHUFFLE, +) + +config.add_column( + dd.LLMTextColumnConfig( + name="question", + prompt=QUESTION_PROMPT, + model_alias="reasoning-model", + ) +) +``` + + +Our prompt emphasizes that the question shouldn't reference the source passage. We want questions that stand on their own, without including the source passage itself; since these are passages from Common Crawl, we can expect that they appear in the pretraining data already, and our focus here is on generating new tokens. + + +--- + +## Step 3: Generating High-Quality Answers with Reasoning Traces + +If you've ever tried to read a teacher's answer key before, you know that sometimes the person who *wrote* the question isn't always the best at explaining how to *answer* it. In the real world, reasoning involves a lot of what-ifs, dead ends and backtracking - the types of behavior we can only get from a model when it has never seen the question before. This is why we chose to decouple answer generation from question generation, ensuring that the model doesn't have any context about how the question was generated or the source passage itself when it attempts to answer it. + +Below, we prompt the LLM directly with the questions we generated above, then capture the resulting reasoning trace and final answer for our RQA samples. + +```python +config.add_column( + dd.LLMTextColumnConfig( + name="answer", + prompt="{{ question }}", # Present just the question + model_alias="reasoning-model", + extract_reasoning_content=True, # Extract reasoning into separate column + ) +) + +# Combine question, reasoning trace, and answer into final sample +config.add_column( + dd.ExpressionColumnConfig( + name="rqa_sample", + expr="{{ question }}\n\n{{ answer__reasoning_content }}\n\n{{ answer }}", + ) +) +``` + +In the resulting dataset, we see the following new columns concatenated to the seed data columns: + +- `question` +- `answer` +- `answer__reasoning_content` +- `rqa_sample` + +The `question` and `answer` columns are the final result of the calls to our reasoning LLM, while `answer__reasoning_content` is the reasoning trace generated by the LLM when generating the answer. Typically we discard the reasoning trace, but here it's critical: we want to include the model's chain-of-thought in the final training data, distilling strong priors in Nemotron Nano 3 about *how* to work through a challenging problem. The final column, `rqa_sample`, uses Jinja2 syntax to combine all three fields into the final sample for training. + +We chose to use the same frontier reasoning model to answer the questions as we used to generate them - leveraging the model's advanced capabilities both for formulating a tough, well-formed question and for puzzling through the answer. But with Data Designer, this choice is up to you - you can mix-and-match models any way you like. + +--- + +## Results: Measurable Improvements in STEM Reasoning + +To evaluate the impact of the RQA data, we ran continued pretraining experiments on an internal checkpoint of [Nemotron-H 8B](https://research.nvidia.com/labs/adlr/nemotronh/). Nemotron-H used a two-phase pretraining approach (you can read more about it in our white paper [here](https://arxiv.org/pdf/2504.03624)). We intervened at the Phase 2 training stage, comparing the result of replacing either 4% or 8% of the existing data blend with RQA samples (taking weight from high-quality Common Crawl data). We ran the intervention for 18k steps, between a checkpoint at 140k steps and a checkpoint at 158k steps. + +| Data Blend | Validation Loss (↓) | MMLU-Pro (with CoT, ↑) | Math 500 (with CoT, ↑) | GSM8K (with CoT, ↑) | Humaneval+ (↑) | MBPP+ (↑) | +| :---- | :---- | :---- | :---- | :---- | :---- | :---- | +| **Baseline data blend (140k steps)** | 1.309 | 36.99 | - | 79.98 | 38.14 | 48.68 | +| **Baseline data blend (158k steps)** | 1.258 | 43.39 | 71.00 | 81.96 | 42.71 | 53.31 | +| **with RQA (4.5m @4%, 158k steps)** | 1.256 | 44.31 | **73.40** | 82.79 | **47.20** | **54.84** | +| **with RQA (9m @8%, 158k steps)** | **1.255** | **45.80** | **73.40** | **84.76** | 45.61 | 53.80 | + +One of the most surprising (and exciting!) results was that RQA didn't just improve performance on tests of scientific reasoning like MMLU-Pro - it also improved performance on benchmarks associated with math reasoning (Math 500, GSM8K) and coding capabilities (Humaneval+, MBPP+). This shows how early introduction of advanced reasoning capabilities can produce robust improvements across different domains. + +You can check out the RQA dataset we generated for Nemotron 3 Nano [here](https://huggingface.co/datasets/nvidia/Nemotron-Pretraining-Specialized-v1/viewer/Nemotron-Pretraining-RQA). + +--- + +## Get Started with Data Designer + +Apart from the seed data, the entire pipeline is reproducible using NeMo Data Designer. Note how Data Designer handles complex data formatting with ease, leveraging Jinja2 templates in prompt generation and built-in logic to extract reasoning traces from model responses. + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Configure your model +model_configs = [ + dd.ModelConfig( + alias="reasoning-model", + model="qwen/qwen3-235b-a22b", + provider="nvidia", + inference_parameters=dd.ChatCompletionInferenceParams( + max_tokens=8192, + timeout=300, # 5 minute timeout for long reasoning chains + ), + ), +] + +# Build the workflow +config = dd.DataDesignerConfigBuilder(model_configs=model_configs) +config.with_seed_dataset( + dd.LocalFileSeedSource(path="path/to/your_seed_data.parquet"), + sampling_strategy=dd.SamplingStrategy.SHUFFLE, +) + +# Generate questions +config.add_column( + dd.LLMTextColumnConfig( + name="question", + prompt=QUESTION_PROMPT, + model_alias="reasoning-model", + ) +) + +# Generate answers with reasoning trace +config.add_column( + dd.LLMTextColumnConfig( + name="answer", + prompt="{{ question }}", + model_alias="reasoning-model", + extract_reasoning_content=True, # Extract reasoning into separate column + ) +) + +# Combine into final sample +config.add_column( + dd.ExpressionColumnConfig( + name="rqa_sample", + expr="{{ question }}\n\n{{ answer__reasoning_content }}\n\n{{ answer }}", + ) +) + +# Run generation and save to disk +data_designer = DataDesigner() +result = data_designer.create( + config_builder=config, + num_records=N_RECORDS, + dataset_name="rqa_dataset", +) +``` + +--- + +## Summary + +The RQA dataset demonstrates that targeted synthetic data generation can meaningfully improve advanced reasoning capabilities. By: + +1. Curating high-quality scientific seed data +2. Generating challenging, standalone questions from those seeds +3. Using powerful reasoning models to reason through how to answer those questions + +...we created a dataset that pushes models toward graduate-level science reasoning - and generalizable improvements on math and code as well. + +The workflow is fully configurable and extensible: swap in your own seed data, adjust the prompts, or add custom validators. Data Designer makes it possible to iterate rapidly on synthetic data pipelines, turning what used to be months of manual annotation into hours of programmable generation. + +## See Also + +- [NeMo Data Designer on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner) +- [Nemotron 3 Nano Technical Report](https://arxiv.org/pdf/2512.20848) +- [Essential-Web](https://arxiv.org/abs/2506.14111) +- [Design Principles](/docs/devnotes/design-principles) diff --git a/fern/v0.5.0/pages/index.mdx b/fern/v0.5.0/pages/index.mdx new file mode 100644 index 00000000..5597a1b3 --- /dev/null +++ b/fern/v0.5.0/pages/index.mdx @@ -0,0 +1,112 @@ +--- +title: 🎨 NeMo Data Designer Library +description: A general framework for generating high-quality synthetic data from scratch or using seed data. +--- + +[![GitHub](https://img.shields.io/badge/github-repo-952fc6?logo=github)](https://github.com/NVIDIA-NeMo/DataDesigner) [![License](https://img.shields.io/badge/License-Apache_2.0-0074df.svg)](https://opensource.org/licenses/Apache-2.0) [![NeMo Microservices](https://img.shields.io/badge/NeMo-Microservices-76b900)](https://docs.nvidia.com/nemo/microservices/latest/index.html) + +👋 Welcome! Data Designer is an orchestration framework for generating high-quality synthetic data. You provide LLM endpoints (NVIDIA, OpenAI, vLLM, etc.), and Data Designer handles batching, parallelism, validation, and more. + +**Configure** columns and models → **Preview** samples and iterate → **Create** your full dataset at scale. + +Unlike raw LLM calls, Data Designer gives you statistical diversity, field correlations, automated validation, and reproducible workflows. For details, see [Architecture & Performance](/docs/concepts/architecture-and-performance). + +## Install + +```bash +pip install data-designer +``` + +## Setup + +Get an API key from one of the default providers and set it as an environment variable: + +```bash +# NVIDIA (build.nvidia.com) - recommended +export NVIDIA_API_KEY="your-api-key-here" + +# OpenAI (platform.openai.com) +export OPENAI_API_KEY="your-openai-api-key-here" + +# OpenRouter (openrouter.ai) +export OPENROUTER_API_KEY="your-openrouter-api-key-here" +``` + +Verify your configuration is ready: + +```bash +data-designer config list +``` + +This displays the pre-configured model providers and models. See [CLI Configuration](/docs/concepts/models/configure-with-cli) to customize. + +## Your First Dataset + +Let's generate multilingual greetings to see Data Designer in action: + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Initialize with default model providers +data_designer = DataDesigner() +config_builder = dd.DataDesignerConfigBuilder() + +# Add a sampler column to randomly select a language +config_builder.add_column( + dd.SamplerColumnConfig( + name="language", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["English", "Spanish", "French", "German", "Italian"], + ), + ) +) + +# Add an LLM text generation column +config_builder.add_column( + dd.LLMTextColumnConfig( + name="greeting", + model_alias="nvidia-text", + prompt="Write a casual and formal greeting in {{ language }}.", + ) +) + +# Generate a preview +results = data_designer.preview(config_builder) +results.display_sample_record() +``` + +🎉 That's it! You've just designed your first synthetic dataset. + +## Next Steps + + + + Step-by-step notebooks covering core features + + + Ready-to-use examples for common use cases + + + Deep dive into columns, models, and configuration + + + +## Learn More + +- **[Deployment Options](/docs/concepts/deployment-options)** – Library vs. NeMo Microservice +- **[Model Configuration](/docs/concepts/models/default-model-settings)** – Configure LLM providers and models +- **[Architecture & Performance](/docs/concepts/architecture-and-performance)** – Optimize for throughput and scale diff --git a/fern/v0.5.0/pages/installation.mdx b/fern/v0.5.0/pages/installation.mdx new file mode 100644 index 00000000..ff7bbd6e --- /dev/null +++ b/fern/v0.5.0/pages/installation.mdx @@ -0,0 +1,36 @@ +--- +title: Installation +description: How to install Data Designer +--- + +Installing Data Designer is as simple as: + + + + ```bash + pip install data-designer + ``` + + + ```bash + uv add data-designer + ``` + + + +## Development Installation + +To install the latest development version from the GitHub repository: + + + + ```bash + pip install 'git+https://github.com/NVIDIA-NeMo/DataDesigner@main' + ``` + + + ```bash + uv add 'git+https://github.com/NVIDIA-NeMo/DataDesigner@main' + ``` + + diff --git a/fern/v0.5.0/pages/plugins/available.mdx b/fern/v0.5.0/pages/plugins/available.mdx new file mode 100644 index 00000000..594e53ac --- /dev/null +++ b/fern/v0.5.0/pages/plugins/available.mdx @@ -0,0 +1,6 @@ +--- +title: "🚧 Available Plugins: Coming Soon" +description: List of available Data Designer plugins. +--- + +This page will list available Data Designer plugins. Stay tuned! diff --git a/fern/v0.5.0/pages/plugins/example.mdx b/fern/v0.5.0/pages/plugins/example.mdx new file mode 100644 index 00000000..95033ec5 --- /dev/null +++ b/fern/v0.5.0/pages/plugins/example.mdx @@ -0,0 +1,283 @@ +--- +title: "Example Plugin: Column Generator" +description: A complete walkthrough for creating a Data Designer column generator plugin. +--- + + +The plugin system is currently **experimental** and under active development. The documentation, examples, and plugin interface are subject to significant changes in future releases. If you encounter any issues, have questions, or have ideas for improvement, please consider starting [a discussion on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner/discussions). + + +Data Designer supports two plugin types: **column generators** and **seed readers**. This page walks through a complete column generator example. + +A Data Designer plugin is implemented as a Python package with three main components: + +1. **Configuration Class**: Defines the parameters users can configure +2. **Implementation Class**: Contains the core logic of the plugin +3. **Plugin Object**: Connects the config and implementation classes to make the plugin discoverable + +We recommend separating these into individual files (`config.py`, `impl.py`, `plugin.py`) within a plugin subdirectory. This keeps the code organized, makes it easy to test each component independently, and guards against circular dependencies — since the config module can be imported without pulling in the engine-level implementation classes, and the plugin object can be discovered without importing either. + +--- + +## Column Generator Plugin: Index Multiplier + +In this section, we will build a simple column generator plugin that generates values by multiplying the row index by a user-specified multiplier. + +### Step 1: Create a Python package + +We recommend the following structure for column generator plugins: + +``` +data-designer-index-multiplier/ +├── pyproject.toml +└── src/ + └── data_designer_index_multiplier/ + ├── __init__.py + ├── config.py + ├── impl.py + └── plugin.py +``` + +### Step 2: Create the config class + +The configuration class defines what parameters users can set when using your plugin. For column generator plugins, it must inherit from [SingleColumnConfig](/api/column-configs) and include a [discriminator field](https://docs.pydantic.dev/latest/concepts/unions/#discriminated-unions). + +Create `src/data_designer_index_multiplier/config.py`: + +```python +from typing import Literal + +from data_designer.config.base import SingleColumnConfig + + +class IndexMultiplierColumnConfig(SingleColumnConfig): + """Configuration for the index multiplier column generator.""" + + # Required: discriminator field with a unique Literal type + # This value identifies your plugin and becomes its column_type + column_type: Literal["index-multiplier"] = "index-multiplier" + + # Configurable parameter for this plugin + multiplier: int = 2 + + @staticmethod + def get_column_emoji() -> str: + return "✖️" + + @property + def required_columns(self) -> list[str]: + """Columns that must exist before this generator runs.""" + return [] + + @property + def side_effect_columns(self) -> list[str]: + """Additional columns produced beyond the primary column.""" + return [] +``` + +**Key points:** + +- The `column_type` field must be a `Literal` type with a string default +- This value uniquely identifies your plugin (use kebab-case) +- Add any custom parameters your plugin needs (here: `multiplier`) +- `SingleColumnConfig` is a Pydantic model, so you can leverage all of Pydantic's validation features +- `get_column_emoji()` returns the emoji displayed in logs for this column type +- `required_columns` lists any columns this generator depends on (empty if none) +- `side_effect_columns` lists any additional columns this generator produces beyond the primary column (empty if none) + +### Step 3: Create the implementation class + +The implementation class defines the actual business logic of the plugin. For column generator plugins, inherit from `ColumnGeneratorFullColumn` or `ColumnGeneratorCellByCell` and implement the `generate` method. + +Create `src/data_designer_index_multiplier/impl.py`: + +```python +import logging + +import pandas as pd +from data_designer.engine.column_generators.generators.base import ColumnGeneratorFullColumn + +from data_designer_index_multiplier.config import IndexMultiplierColumnConfig + +logger = logging.getLogger(__name__) + + +class IndexMultiplierColumnGenerator(ColumnGeneratorFullColumn[IndexMultiplierColumnConfig]): + + def generate(self, data: pd.DataFrame) -> pd.DataFrame: + """Generate the column data. + + Args: + data: The current DataFrame being built + + Returns: + The DataFrame with the new column added + """ + logger.info( + f"Generating column {self.config.name} " + f"with multiplier {self.config.multiplier}" + ) + + data[self.config.name] = data.index * self.config.multiplier + + return data +``` + +**Key points:** + +- Generic type `ColumnGeneratorFullColumn[IndexMultiplierColumnConfig]` connects the implementation to its config +- You have access to the configuration parameters via `self.config` + + +The `generation_strategy` specifies how the column generator will generate data. You choose a strategy by inheriting from the corresponding base class: + +- **`ColumnGeneratorFullColumn`**: Generates the full column (at the batch level) in a single call to `generate` + - `generate` must take as input a `pd.DataFrame` with all previous columns and return a `pd.DataFrame` with the generated column appended. + +- **`ColumnGeneratorCellByCell`**: Generates one cell at a time + - `generate` must take as input a `dict` with key/value pairs for all previous columns and return a `dict` with an additional key/value for the generated cell + - Supports concurrent workers via a `max_parallel_requests` parameter on the configuration + + +### Step 4: Create the plugin object + +Create a `Plugin` object that makes the plugin discoverable and connects the implementation and config classes. + +Create `src/data_designer_index_multiplier/plugin.py`: + +```python +from data_designer.plugins import Plugin, PluginType + +plugin = Plugin( + config_qualified_name="data_designer_index_multiplier.config.IndexMultiplierColumnConfig", + impl_qualified_name="data_designer_index_multiplier.impl.IndexMultiplierColumnGenerator", + plugin_type=PluginType.COLUMN_GENERATOR, +) +``` + +### Step 5: Package your plugin + +Create a `pyproject.toml` file to define your package and register the entry point: + +```toml +[project] +name = "data-designer-index-multiplier" +version = "1.0.0" +description = "Data Designer index multiplier plugin" +requires-python = ">=3.10" +dependencies = [ + "data-designer", +] + +# Register this plugin via entry points +[project.entry-points."data_designer.plugins"] +index-multiplier = "data_designer_index_multiplier.plugin:plugin" + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/data_designer_index_multiplier"] +``` + + +Plugins are discovered automatically using [Python entry points](https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/#using-package-metadata). It is important to register your plugin as an entry point under the `data_designer.plugins` group. + +The entry point format is: +```toml +[project.entry-points."data_designer.plugins"] + = ":" +``` + + +### Step 6: Install and use your plugin locally + +Install your plugin in editable mode — this is all you need to start using it. No PyPI publishing required: + +```bash +# From the plugin directory +uv pip install -e . +``` + +That's it. The editable install registers the entry point so Data Designer discovers your plugin automatically. Any changes you make to the plugin source code are picked up immediately without reinstalling. + +Once installed, your plugin works just like built-in column types: + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner + +from data_designer_index_multiplier.config import IndexMultiplierColumnConfig + +data_designer = DataDesigner() +builder = dd.DataDesignerConfigBuilder() + +# Add a regular column +builder.add_column( + dd.SamplerColumnConfig( + name="category", + sampler_type="category", + params=dd.CategorySamplerParams(values=["A", "B", "C"]), + ) +) + +# Add your custom plugin column +builder.add_column( + IndexMultiplierColumnConfig( + name="scaled_index", + multiplier=5, + ) +) + +# Generate data +results = data_designer.create(builder, num_records=10) +print(results.load_dataset()) +``` + +Output: +``` + category scaled_index +0 B 0 +1 A 5 +2 C 10 +3 A 15 +4 B 20 +... +``` + +--- + +## Validating Your Plugin + +Data Designer provides a testing utility to validate that your plugin is structured correctly. Use `assert_valid_plugin` to check that your config and implementation classes are properly defined: + +```python +from data_designer.engine.testing.utils import assert_valid_plugin +from data_designer_index_multiplier.plugin import plugin + +# Raises AssertionError with a descriptive message if anything is wrong with the general plugin structure +assert_valid_plugin(plugin) +``` + +This validates that: + +- The config class is a subclass of `ConfigBase` +- For column generator plugins: the implementation class is a subclass of `ConfigurableTask` +- For seed reader plugins: the implementation class is a subclass of `SeedReader` + +--- + +## Multiple Plugins in One Package + +A single Python package can register multiple plugins. Simply define multiple `Plugin` instances and register each one as a separate entry point: + +```toml +[project.entry-points."data_designer.plugins"] +my-column-generator = "my_package.plugins.column_generator.plugin:column_generator_plugin" +my-seed-reader = "my_package.plugins.seed_reader.plugin:seed_reader_plugin" +``` + +For an example of this pattern, see the end-to-end test plugins in the [tests_e2e/](https://github.com/NVIDIA-NeMo/DataDesigner/tree/main/tests_e2e) directory. + +That's it! You now know how to create a Data Designer plugin. A local editable install (`uv pip install -e .`) is all you need to develop, test, and use your plugin. If you want to make it available for others to install via `pip install`, publish it to PyPI or your organization's package index. diff --git a/fern/v0.5.0/pages/plugins/overview.mdx b/fern/v0.5.0/pages/plugins/overview.mdx new file mode 100644 index 00000000..84c24c88 --- /dev/null +++ b/fern/v0.5.0/pages/plugins/overview.mdx @@ -0,0 +1,68 @@ +--- +title: Data Designer Plugins +description: Extend Data Designer's capabilities with custom plugins. +--- + + +The plugin system is currently **experimental** and under active development. The documentation, examples, and plugin interface are subject to significant changes in future releases. If you encounter any issues, have questions, or have ideas for improvement, please consider starting [a discussion on GitHub](https://github.com/NVIDIA-NeMo/DataDesigner/discussions). + + +## What are plugins? + +Plugins are Python packages that extend Data Designer's capabilities without modifying the core library. Similar to [VS Code extensions](https://marketplace.visualstudio.com/vscode) and [Pytest plugins](https://docs.pytest.org/en/stable/reference/plugin_list.html), the plugin system empowers you to build specialized extensions for your specific use cases and share them with the community. + +**Current capabilities**: Data Designer supports two plugin types: + +- **Column Generator Plugins**: Custom column types you pass to the config builder's `add_column` method. +- **Seed Reader Plugins**: Custom seed dataset readers that let you load data from new sources (e.g., databases, cloud storage, custom formats). + +**Coming soon**: Plugin support for processors, validators, and more! + +## How do you use plugins? + +A Data Designer plugin is just a Python package configured with an [entry point](https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/#using-package-metadata) that points to a Data Designer `Plugin` object. Using a plugin is as simple as installing the package: + +```bash +# Install a local plugin (for development and testing) +uv pip install -e /path/to/your/plugin + +# Or install a published plugin from PyPI +pip install data-designer-{plugin-name} +``` + +Once installed, plugins are automatically discovered and ready to use — no additional registration or configuration needed. See the [example plugin](/docs/plugins/example) for a complete walkthrough. + +## How do you create plugins? + +Creating a plugin involves three main steps: + +### 1. Implement the Plugin Components + +Each plugin has three components, and we recommend organizing them into separate files within a plugin subdirectory: + +- **`config.py`** — Configuration class defining user-facing parameters + - Column generator plugins: inherit from `SingleColumnConfig` with a `column_type` discriminator + - Seed reader plugins: inherit from `SeedSource` with a `seed_type` discriminator +- **`impl.py`** — Implementation class containing the core logic + - Column generator plugins: inherit from `ColumnGeneratorFullColumn` or `ColumnGeneratorCellByCell` + - Seed reader plugins: inherit from `SeedReader` +- **`plugin.py`** — A `Plugin` instance that connects the config and implementation classes + +### 2. Package Your Plugin + +- Set up a Python package with `pyproject.toml` +- Register your plugin using entry points under `data_designer.plugins` +- Define dependencies (including `data-designer`) + +### 3. Install and Test Locally + +- Install your plugin locally with `uv pip install -e .` (editable mode) +- No publishing required — your plugin is usable immediately after a local install +- Iterate on your plugin code with fast feedback + +### 4. Share Your Plugin (Optional) + +- Publish to PyPI or another package index to make it installable by anyone via `pip install` +- This step is only needed if you want others outside your environment to use the plugin + +**Ready to get started?** See the [Example Plugin](/docs/plugins/example) for a complete walkthrough of creating a column generator plugin. diff --git a/fern/v0.5.0/pages/quick-start.mdx b/fern/v0.5.0/pages/quick-start.mdx new file mode 100644 index 00000000..75fa53ed --- /dev/null +++ b/fern/v0.5.0/pages/quick-start.mdx @@ -0,0 +1,84 @@ +--- +title: Quick Start +description: Get started with Data Designer using default model providers and configurations. +--- + +Get started with Data Designer using the default model providers and configurations. Data Designer ships with built-in model providers and configurations that make it easy to start generating synthetic data immediately. + +## Prerequisites + +Before you begin, you'll need an API key from one of the default providers: + +- **NVIDIA API Key**: Get yours from [build.nvidia.com](https://build.nvidia.com) +- **OpenAI API Key** (optional): Get yours from [platform.openai.com](https://platform.openai.com/api-keys) +- **OpenRouter API Key** (optional): Get yours from [openrouter.ai](https://openrouter.ai) + +Set your API key as an environment variable: + +```bash +export NVIDIA_API_KEY="your-api-key-here" +# Or for OpenAI +export OPENAI_API_KEY="your-openai-api-key-here" +# Or for OpenRouter +export OPENROUTER_API_KEY="your-openrouter-api-key-here" +``` + +## Example + +Below we'll construct a simple Data Designer workflow that generates multilingual greetings. + +```python +import os + +import data_designer.config as dd +from data_designer.interface import DataDesigner + +# Set your API key from build.nvidia.com +# Skip this step if you've already exported your key to the environment variable +os.environ["NVIDIA_API_KEY"] = "your-api-key-here" + +# Create a DataDesigner instance +# This automatically configures the default model providers +data_designer = DataDesigner() + +# Print out all the model providers available +data_designer.info.display(dd.InfoType.MODEL_PROVIDERS) + +# Create a config builder +# This automatically loads the default model configurations +config_builder = dd.DataDesignerConfigBuilder() + +# Print out all the model configurations available +config_builder.info.display(dd.InfoType.MODEL_CONFIGS) + +# Add a sampler column to randomly select a language +config_builder.add_column( + dd.SamplerColumnConfig( + name="language", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["English", "Spanish", "French", "German", "Italian"], + ), + ) +) + +# Add an LLM text generation column +# We'll use the built-in 'nvidia-text' model alias +config_builder.add_column( + dd.LLMTextColumnConfig( + name="greeting", + model_alias="nvidia-text", + prompt="Write a casual and formal greeting in '{{ language }}' language.", + ) +) + +# Run a preview to generate sample records +preview_results = data_designer.preview(config_builder=config_builder) + +# Display a sample record +preview_results.display_sample_record() +``` + +🎉 Congratulations, you successfully ran one iteration designing your synthetic data. Follow along to learn more. + +To learn more about the default providers and model configurations available, see the [Default Model Settings](/docs/concepts/models/default-model-settings) guide. diff --git a/fern/v0.5.0/pages/recipes/code-generation/text-to-python.mdx b/fern/v0.5.0/pages/recipes/code-generation/text-to-python.mdx new file mode 100644 index 00000000..a430b967 --- /dev/null +++ b/fern/v0.5.0/pages/recipes/code-generation/text-to-python.mdx @@ -0,0 +1,292 @@ +--- +title: Text to Python +description: Generate Python code from natural language descriptions. +--- + + +[Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_python.py) + + +```python +from pathlib import Path + +from data_designer.essentials import ( + CategorySamplerParams, + CodeLang, + CodeValidatorParams, + DataDesigner, + DataDesignerConfigBuilder, + LLMCodeColumnConfig, + LLMJudgeColumnConfig, + LLMTextColumnConfig, + SamplerColumnConfig, + SamplerType, + Score, + SubcategorySamplerParams, + ValidationColumnConfig, + ValidatorType, +) +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> DataDesignerConfigBuilder: + config_builder = DataDesignerConfigBuilder() + + config_builder.add_column( + SamplerColumnConfig( + name="industry_sector", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Healthcare", + "Finance", + "Technology", + ], + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="topic", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="industry_sector", + values={ + "Healthcare": [ + "Electronic Health Records (EHR) Systems", + "Telemedicine Platforms", + "AI-Powered Diagnostic Tools", + ], + "Finance": [ + "Fraud Detection Software", + "Automated Trading Systems", + "Personal Finance Apps", + ], + "Technology": [ + "Cloud Computing Platforms", + "Artificial Intelligence and Machine Learning Platforms", + "DevOps and CI/CD Tools", + ], + }, + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="code_complexity", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Beginner", + "Intermediate", + "Advanced", + ], + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="code_concept", + sampler_type=SamplerType.SUBCATEGORY, + params=SubcategorySamplerParams( + category="code_complexity", + values={ + "Beginner": [ + "Variables", + "Data Types", + "Functions", + "Loops", + "Classes", + ], + "Intermediate": [ + "List Comprehensions", + "Object-oriented programming", + "Lambda Functions", + "Web frameworks", + "Pandas", + ], + "Advanced": [ + "Multithreading", + "Context Managers", + "Generators", + ], + }, + ), + ), + ) + + config_builder.add_column( + SamplerColumnConfig( + name="instruction_phrase", + sampler_type=SamplerType.CATEGORY, + params=CategorySamplerParams( + values=[ + "Write a function that", + "Create a class that", + "Implement a script", + "Can you create a function", + "Develop a module that", + ], + ), + ), + ) + + config_builder.add_column( + LLMTextColumnConfig( + name="instruction", + model_alias=model_alias, + system_prompt=("You are an expert at generating clear and specific programming tasks."), + prompt=( + "Generate an instruction to create Python code that solves a specific problem.\n" + 'The instruction should begin with the following phrase: "{{ instruction_phrase }}".\n\n' + "Important Guidelines:\n" + "* Industry Relevance: Ensure the instruction pertains to the {{ industry_sector }} sector and {{ topic }} topic.\n" + "* Code Complexity: Tailor the instruction to the {{ code_complexity }} level. Utilize relevant {{ code_concept }} where appropriate to match the complexity level.\n" + "* Clarity and Specificity: Make the problem statement clear and unambiguous. Provide sufficient context to understand the requirements without being overly verbose.\n" + "* Response Formatting: Do not include any markers such as ### Response ### in the instruction.\n" + ), + ) + ) + + config_builder.add_column( + LLMCodeColumnConfig( + name="code_implementation", + model_alias=model_alias, + code_lang=CodeLang.PYTHON, + system_prompt=( + "You are an expert Python programmer who writes clean, efficient, and well-documented code." + ), + prompt=( + "Write Python code for the following instruction:\n" + "Instruction: {{ instruction }}\n\n" + "Important Guidelines:\n" + "* Code Quality: Your code should be clean, complete, self-contained, and accurate.\n" + "* Code Validity: Please ensure that your Python code is executable and does not contain any errors.\n" + "* Packages: Remember to import any necessary libraries, and to use all libraries you import.\n" + "* Complexity & Concepts: The code should be written at a {{ code_complexity }} level, making use of concepts such as {{ code_concept }}.\n" + ), + ) + ) + + config_builder.add_column( + LLMJudgeColumnConfig( + name="code_judge_result", + model_alias=model_alias, + prompt=TEXT_TO_PYTHON_JUDGE_TEMPLATE, + scores=python_scoring, + ) + ) + + config_builder.add_column( + ValidationColumnConfig( + name="code_validity_result", + validator_type=ValidatorType.CODE, + target_columns=["code_implementation"], + validator_params=CodeValidatorParams( + code_lang=CodeLang.PYTHON, + ), + batch_size=100, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +TEXT_TO_PYTHON_JUDGE_TEMPLATE = """\ +You are an expert in Python programming, with specialized knowledge in software engineering, data science, and algorithmic problem-solving. + +You think about potential flaws and errors in the code. You are a tough critic, but a fair one. + +Take a deep breath and use the Python Code Quality Rubric below to score the **Generated Python Code** based on the INSTRUCTIONS. + +#### INSTRUCTIONS +The Generated Python Code should be a valid response to the Natural Language Prompt below + +Natural Language Prompt: +{{ instruction }} + +Generated Python Code +{{ code_implementation }} +""" + + +python_scoring = [ + Score( + name="Relevance", + description="Adherence to INSTRUCTIONS and CONTEXT", + options={ + 4: "Perfectly meets all specified requirements.", + 3: "Meets most requirements with minor deviations.", + 2: "Moderate deviation from the instructions.", + 1: "Significant deviations from the instructions.", + 0: "Does not adhere to the instructions.", + }, + ), + Score( + name="Pythonic", + description="Pythonic Code and Best Practices (Does the code follow Python conventions and best practices?)", + options={ + 4: "The code exemplifies Pythonic principles, making excellent use of Python-specific constructs, standard library modules and programming idioms; follows all relevant PEPs.", + 3: "The code closely follows Python conventions and adheres to many best practices; good use of Python-specific constructs, standard library modules and programming idioms.", + 2: "The code generally follows Python conventions but has room for better alignment with Pythonic practices.", + 1: "The code loosely follows Python conventions, with several deviations from best practices.", + 0: "The code does not follow Python conventions or best practices, using non-Pythonic approaches.", + }, + ), + Score( + name="Readability", + description="Readability and Maintainability (Is the Python code easy to understand and maintain?)", + options={ + 4: ( + "The code is excellently formatted, follows PEP 8 guidelines, is elegantly concise and clear, uses meaningful variable names, " + "ensuring high readability and ease of maintenance; organizes complex logic well. Docstrings are given in a Google Docstring format." + ), + 3: "The code is well-formatted in the sense of code-as-documentation, making it relatively easy to understand and maintain; uses descriptive names and organizes logic clearly.", + 2: "The code is somewhat readable with basic formatting and some comments, but improvements are needed; needs better use of descriptive names and organization.", + 1: "The code has minimal formatting, making it hard to understand; lacks meaningful names and organization.", + 0: "The code is unreadable, with no attempt at formatting or description.", + }, + ), + Score( + name="Efficiency", + description="Efficiency and Performance (Is the code optimized for performance?)", + options={ + 4: "The solution is highly efficient, using appropriate data structures and algorithms; avoids unnecessary computations and optimizes for both time and space complexity.", + 3: "The solution is efficient, with good use of Python's built-in functions and libraries; minor areas for optimization.", + 2: "The solution is moderately efficient, but misses some opportunities for optimization; uses some inefficient patterns.", + 1: "The solution shows poor efficiency, with notable performance issues; lacks effective optimization techniques.", + 0: "The solution is highly inefficient; overlooks fundamental optimization practices, resulting in significant performance issues.", + }, + ), +] + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() +``` diff --git a/fern/v0.5.0/pages/recipes/code-generation/text-to-sql.mdx b/fern/v0.5.0/pages/recipes/code-generation/text-to-sql.mdx new file mode 100644 index 00000000..42b4dbb9 --- /dev/null +++ b/fern/v0.5.0/pages/recipes/code-generation/text-to-sql.mdx @@ -0,0 +1,320 @@ +--- +title: Text to SQL +description: Generate SQL queries from natural language descriptions. +--- + + +[Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_sql.py) + + +```python +from pathlib import Path + +import data_designer.config as dd +from data_designer.interface import DataDesigner +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> dd.DataDesignerConfigBuilder: + config_builder = dd.DataDesignerConfigBuilder() + + config_builder.add_column( + dd.SamplerColumnConfig( + name="industry_sector", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["Healthcare", "Finance", "Technology"], + ), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="topic", + sampler_type=dd.SamplerType.SUBCATEGORY, + params=dd.SubcategorySamplerParams( + category="industry_sector", + values={ + "Healthcare": [ + "Electronic Health Records (EHR) Systems", + "Telemedicine Platforms", + "AI-Powered Diagnostic Tools", + ], + "Finance": [ + "Fraud Detection Software", + "Automated Trading Systems", + "Personal Finance Apps", + ], + "Technology": [ + "Cloud Computing Platforms", + "Artificial Intelligence and Machine Learning Platforms", + "DevOps and CI/CD Tools", + ], + }, + ), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="sql_complexity", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["Beginner", "Intermediate", "Advanced"], + ), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="sql_concept", + sampler_type=dd.SamplerType.SUBCATEGORY, + params=dd.SubcategorySamplerParams( + category="sql_complexity", + values={ + "Beginner": [ + "Basic SELECT Statements", + "WHERE Clauses", + "Basic JOINs", + "INSERT, UPDATE, DELETE", + ], + "Intermediate": [ + "Aggregation Functions", + "Multiple JOINs", + "Subqueries", + "Views", + ], + "Advanced": [ + "Window Functions", + "Common Table Expressions (CTEs)", + "Stored Procedures", + "Query Optimization", + ], + }, + ), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="sql_task_type", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=[ + "Data Retrieval", + "Data Manipulation", + "Analytics and Reporting", + "Data Transformation", + ], + ), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="instruction_phrase", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=[ + "Write an SQL query that", + "Create an SQL statement to", + "Develop an SQL query to", + "Can you write SQL that", + "Formulate an SQL query that", + ], + ), + ) + ) + + config_builder.add_column( + dd.LLMTextColumnConfig( + name="sql_prompt", + model_alias=model_alias, + system_prompt="You are an expert at generating clear and specific SQL tasks.", + prompt=SQL_PROMPT_TEXT, + ) + ) + + config_builder.add_column( + dd.LLMCodeColumnConfig( + name="sql_context", + model_alias=model_alias, + code_lang=dd.CodeLang.SQL_ANSI, + system_prompt=( + "You are an expert SQL database designer who creates clean, efficient, and " + "well-structured database schemas." + ), + prompt=SQL_CONTEXT_TEXT, + ) + ) + + config_builder.add_column( + dd.LLMCodeColumnConfig( + name="sql", + model_alias=model_alias, + code_lang=dd.CodeLang.SQL_ANSI, + system_prompt="You are an expert SQL programmer who writes clean, efficient, and well-structured queries.", + prompt=SQL_CODE_TEXT, + ) + ) + + config_builder.add_column( + dd.ValidationColumnConfig( + name="code_validity_result", + validator_type=dd.ValidatorType.CODE, + target_columns=["sql"], + validator_params=dd.CodeValidatorParams( + code_lang=dd.CodeLang.SQL_ANSI, + ), + batch_size=100, + ) + ) + + config_builder.add_column( + dd.LLMJudgeColumnConfig( + name="code_judge_result", + model_alias=model_alias, + prompt=TEXT_TO_SQL_JUDGE_TEMPLATE, + scores=sql_scoring, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: dd.DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +SQL_PROMPT_TEXT = ( + "Generate an instruction to create SQL code that solves a specific problem.\n" + "Each instruction should begin with one of the following phrases: {{instruction_phrase}}.\n\n" + "Important Guidelines:\n" + "* Industry Relevance: Ensure the instruction pertains to the {{industry_sector}} sector and {{topic}} topic.\n" + "* SQL Complexity: Tailor the instruction to the {{sql_complexity}} level. Utilize relevant {{sql_concept}} " + "where appropriate to match the complexity level.\n" + "* Task Type: The instruction should involve a {{sql_task_type}} task.\n" + "* Clarity and Specificity: Make the problem statement clear and unambiguous. Provide sufficient context to " + "understand the requirements without being overly verbose.\n" + "* Response Formatting: Do not include any markers such as ### Response ### in the instruction.\n" +) + +SQL_CONTEXT_TEXT = ( + "Generate the SQL for creating database tables that would be relevant for the following instruction:\n" + "Instruction: {{sql_prompt}}\n\n" + "Important Guidelines:\n" + "* Relevance: Ensure all tables are directly related to the {{industry_sector}} sector and {{topic}} topic.\n" + "* Completeness: Include all essential columns with appropriate data types, primary/foreign keys, and necessary constraints.\n" + "* Realism: Use realistic table structures typical for the specified industry.\n" + "* Executable SQL: Provide complete CREATE TABLE statements that can be run without modification.\n" + "* Consistency: Use consistent naming conventions (e.g., snake_case for table and column names).\n" + "* Sample Data: Include INSERT statements with sample data that makes sense for the tables (at least 5-10 rows per table)." +) + +SQL_CODE_TEXT = ( + "Write SQL code for the following instruction based on the provided database context:\n" + "Instruction: {{sql_prompt}}\n\n" + "Database Context:\n" + "{{sql_context}}\n\n" + "Important Guidelines:\n" + "* Code Quality: Your SQL should be clean, complete, self-contained and accurate.\n" + "* Code Validity: Please ensure that your SQL code is executable and does not contain any errors.\n" + "* Context: Base your query on the provided database context. Only reference tables and columns that " + "exist in the context.\n" + "* Complexity & Concepts: The SQL should be written at a {{sql_complexity}} level, making use of " + "concepts such as {{sql_concept}}.\n" + "* Task Type: Ensure your solution implements the appropriate {{sql_task_type}} operation.\n" + "* Comments: Include brief comments explaining the key parts of your query.\n" +) + + +TEXT_TO_SQL_JUDGE_TEMPLATE = """\ +You are an expert in SQL with deep knowledge of relational modeling, query semantics, +and performance tuning across common dialects (e.g., PostgreSQL, MySQL, SQLite, SQL Server). +You think critically about correctness, readability, and efficiency. + +Use the SQL Query Quality Rubric below to score the **Generated SQL Query** based on the INSTRUCTIONS. + +#### INSTRUCTIONS +The Generated SQL Query should be a valid response to the Natural Language Prompt below + +Natural Language Prompt: +{{ sql_prompt }} + +Database Context: +{{ sql_context }} + +Generated SQL Query +{{ sql }} +""" + + +sql_scoring = [ + dd.Score( + name="Relevance", + description="Adherence to INSTRUCTIONS and CONTEXT", + options={ + 4: "Perfectly meets all specified requirements.", + 3: "Meets most requirements with minor deviations.", + 2: "Moderate deviation from the instructions.", + 1: "Significant deviations from the instructions.", + 0: "Does not adhere to the instructions.", + }, + ), + dd.Score( + name="SQL Correctness", + description="Syntax and semantic correctness; returns the intended result", + options={ + 4: "Valid SQL with correct joins, filters, grouping/aggregation, and NULL handling; produces the intended result set under the stated/implicit dialect.", + 3: "Generally correct with minor issues (e.g., edge-case NULLs, minor grouping detail) but still likely yields the intended result.", + 2: "Partially correct; noticeable semantic mistakes (joins, grouping, filters) that may change results or fail in edge cases.", + 1: "Largely incorrect; major semantic or syntactic errors likely causing failure or wrong results.", + 0: "Invalid SQL or unrelated to the task; will not run or cannot produce a meaningful result.", + }, + ), + dd.Score( + name="Readability", + description="Formatting, clarity, and maintainability", + options={ + 4: "Cleanly formatted (keywords/clauses consistently styled), clear structure (CTEs/subqueries where helpful), meaningful table/column aliases, and concise.", + 3: "Generally readable with consistent formatting and understandable aliases; could be organized slightly better.", + 2: "Somewhat readable but inconsistent formatting or confusing aliasing; structure is harder to follow.", + 1: "Poorly formatted and hard to read; unclear structure and aliasing.", + 0: "Unreadable or chaotic; no meaningful structure or styling.", + }, + ), + dd.Score( + name="Efficiency", + description="Query performance best practices", + options={ + 4: "Uses sargable predicates, appropriate joins, selective filters early, avoids SELECT *, unnecessary DISTINCT, and wasteful subqueries; likely to use indexes effectively.", + 3: "Mostly efficient; minor opportunities for improvement (e.g., simplifying expressions, reducing data early).", + 2: "Moderate inefficiencies (e.g., non-sargable filters, unnecessary nested subqueries, broad SELECT *).", + 1: "Notably inefficient patterns likely causing large scans or poor plans.", + 0: "Highly inefficient; ignores basic best practices and likely to perform very poorly.", + }, + ), +] + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() +``` diff --git a/fern/v0.5.0/pages/recipes/index.mdx b/fern/v0.5.0/pages/recipes/index.mdx new file mode 100644 index 00000000..c12eff32 --- /dev/null +++ b/fern/v0.5.0/pages/recipes/index.mdx @@ -0,0 +1,103 @@ +--- +title: Use Case Recipes +description: Ready-to-use code examples for common Data Designer use cases. +--- + +Recipes are a collection of code examples that demonstrate how to leverage Data Designer in specific use cases. +Each recipe is a self-contained example that can be run independently. + + +Recipes provide working code for specific use cases without detailed explanations. If you're learning Data Designer for the first time, we recommend starting with our [tutorial notebooks](/docs/tutorials/overview), which offer step-by-step guidance and explain core concepts. Once you're familiar with the basics, return here for practical, ready-to-use implementations. + + + +These recipes use the OpenAI model provider by default. Ensure your OpenAI model provider has been set up using the Data Designer CLI before running a recipe. + + + + + Generate a dataset of natural language instructions paired with Python code implementations, with varying complexity levels and industry focuses. + + **Demonstrates:** + - Python code generation + - Python code validation + - LLM-as-judge + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_python.py) + + + Generate a dataset of natural language instructions paired with SQL code implementations, with varying complexity levels and industry focuses. + + **Demonstrates:** + - SQL code generation + - SQL code validation + - LLM-as-judge + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/code_generation/text_to_sql.py) + + + Generate a dataset that contains information about products and associated question/answer pairs. + + **Demonstrates:** + - Structured outputs + - Expression columns + - LLM-as-judge + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/product_info_qa.py) + + + Generate a dataset of multi-turn chat conversations between a user and an AI assistant. + + **Demonstrates:** + - Structured outputs + - Expression columns + - LLM-as-judge + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/multi_turn_chat.py) + + + Minimal example of MCP tool calling with Data Designer. Defines a simple MCP server with basic tools and generates data that requires tool calls to complete. + + **Demonstrates:** + - MCP tool calling with LocalStdioMCPProvider + - Simple tool server definition + - Tool-augmented text generation + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/mcp_and_tooluse/basic_mcp.py) + + + Generate grounded Q&A pairs from PDF documents using MCP tool calls and BM25 search. + + **Demonstrates:** + - MCP tool calling with LocalStdioMCPProvider + - BM25 lexical search for retrieval + - Retrieval-grounded QA generation + - Per-column trace capture + + [Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/mcp_and_tooluse/pdf_qa.py) + + diff --git a/fern/v0.5.0/pages/recipes/mcp-and-tooluse/basic-mcp.mdx b/fern/v0.5.0/pages/recipes/mcp-and-tooluse/basic-mcp.mdx new file mode 100644 index 00000000..20b77f81 --- /dev/null +++ b/fern/v0.5.0/pages/recipes/mcp-and-tooluse/basic-mcp.mdx @@ -0,0 +1,280 @@ +--- +title: Basic MCP Tool Use +description: Minimal example of MCP tool calling with Data Designer. +--- + + +[Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/mcp_and_tooluse/basic_mcp.py) + + +This recipe demonstrates the minimal MCP tool-calling workflow with Data Designer: + +1. Define a simple MCP server with basic tools (`get_fact`, `add_numbers`) +2. Configure Data Designer to use the MCP tools +3. Generate data that requires tool calls to complete + +## Prerequisites + +- `NVIDIA_API_KEY` environment variable for NVIDIA provider model aliases (default) +- `OPENAI_API_KEY` environment variable for OpenAI provider model aliases + +## Running the Recipe + +```bash +# Basic usage (generates 2 records by default) +uv run basic_mcp.py + +# For help message and available options +uv run basic_mcp.py --help +``` + +## Features Demonstrated + +- **MCP tool calling** with `LocalStdioMCPProvider` +- **Simple tool server** definition using FastMCP +- **Tool-augmented text generation** with tool call history capture + +## Code + +```python +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "data-designer", +# "mcp", +# ] +# /// +"""Basic MCP Recipe: Simple Tool Use Example + +This recipe demonstrates the minimal MCP tool-calling workflow with Data Designer: + +1) Define a simple MCP server with basic tools (get_fact, add_numbers) +2) Configure Data Designer to use the MCP tools +3) Generate data that requires tool calls to complete + +Prerequisites: + - OPENAI_API_KEY environment variable for OpenAI provider model aliases. + - NVIDIA_API_KEY environment variable for NVIDIA provider model aliases (default model alias is "nvidia-text"). + +Run: + # Basic usage (generates 2 records by default) + uv run basic_mcp.py + + # For help message and available options + uv run basic_mcp.py --help +""" + +from __future__ import annotations + +import argparse +import json +import os +import sys +from pathlib import Path + +from mcp.server.fastmcp import FastMCP + +import data_designer.config as dd +from data_designer.interface import DataDesigner + +MCP_SERVER_NAME = "basic-tools" + + +# ============================================================================= +# MCP Server Definition +# ============================================================================= + +mcp_server = FastMCP(MCP_SERVER_NAME) + +# Simple knowledge base for the get_fact tool +FACTS = { + "python": "Python was created by Guido van Rossum and first released in 1991.", + "earth": "Earth is the third planet from the Sun and has one natural satellite, the Moon.", + "water": "Water (H2O) freezes at 0°C (32°F) and boils at 100°C (212°F) at sea level.", + "light": "The speed of light in a vacuum is approximately 299,792 kilometers per second.", +} + + +@mcp_server.tool() +def get_fact(topic: str) -> str: + """Get a fact about a topic from the knowledge base. + + Args: + topic: The topic to look up (e.g., "python", "earth", "water", "light") + + Returns: + A fact about the topic, or an error message if not found. + """ + topic_lower = topic.lower() + if topic_lower in FACTS: + return json.dumps({"topic": topic, "fact": FACTS[topic_lower]}) + return json.dumps({"error": f"No fact found for topic: {topic}", "available_topics": list(FACTS.keys())}) + + +@mcp_server.tool() +def add_numbers(a: float, b: float) -> str: + """Add two numbers together. + + Args: + a: First number + b: Second number + + Returns: + The sum of the two numbers. + """ + result = a + b + return json.dumps({"a": a, "b": b, "sum": result}) + + +@mcp_server.tool() +def list_topics() -> str: + """List all available topics in the knowledge base. + + Returns: + List of available topics. + """ + return json.dumps({"topics": list(FACTS.keys())}) + + +# ============================================================================= +# Data Designer Configuration +# ============================================================================= + + +def build_config(model_alias: str, provider_name: str) -> dd.DataDesignerConfigBuilder: + """Build the Data Designer configuration for basic tool use.""" + tool_config = dd.ToolConfig( + tool_alias="basic-tools", + providers=[provider_name], + allow_tools=["get_fact", "add_numbers", "list_topics"], + max_tool_call_turns=5, + timeout_sec=30.0, + ) + + config_builder = dd.DataDesignerConfigBuilder(tool_configs=[tool_config]) + + # Add a seed column with topics to look up + config_builder.add_column( + dd.SamplerColumnConfig( + name="topic", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=["python", "earth", "water", "light"]), + ) + ) + + # Add a column that uses the get_fact tool + config_builder.add_column( + dd.LLMTextColumnConfig( + name="fact_response", + model_alias=model_alias, + prompt=( + "Use the get_fact tool to look up information about '{{ topic }}', " + "then provide a one-sentence summary of what you learned." + ), + system_prompt="You must call the get_fact tool before answering. Only use information from tool results.", + tool_alias="basic-tools", + with_trace=dd.TraceType.ALL_MESSAGES, + ) + ) + + # Add a column that uses the add_numbers tool + config_builder.add_column( + dd.SamplerColumnConfig( + name="num_a", + sampler_type=dd.SamplerType.UNIFORM, + params=dd.UniformSamplerParams(low=1, high=100), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="num_b", + sampler_type=dd.SamplerType.UNIFORM, + params=dd.UniformSamplerParams(low=1, high=100), + ) + ) + + config_builder.add_column( + dd.LLMTextColumnConfig( + name="math_response", + model_alias=model_alias, + prompt=( + "Use the add_numbers tool to calculate {{ num_a }} + {{ num_b }}, " + "then report the result in a complete sentence." + ), + system_prompt="You must call the add_numbers tool to perform the calculation. Report the exact result.", + tool_alias="basic-tools", + with_trace=dd.TraceType.ALL_MESSAGES, + ) + ) + + return config_builder + + +# ============================================================================= +# Main Entry Points +# ============================================================================= + + +def serve() -> None: + """Run the MCP server (called when launched as subprocess by Data Designer).""" + mcp_server.run() + + +def parse_args() -> argparse.Namespace: + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description="Basic MCP tool use example with Data Designer.") + subparsers = parser.add_subparsers(dest="command") + + # 'serve' subcommand for running the MCP server + subparsers.add_parser("serve", help="Run the MCP server (used by Data Designer)") + + # Default command arguments (demo mode) + parser.add_argument("--model-alias", type=str, default="nvidia-text", help="Model alias to use for generation") + parser.add_argument("--num-records", type=int, default=2, help="Number of records to generate") + # For compatibility with Makefile test-run-recipes target (ignored in demo mode) + parser.add_argument("--artifact-path", type=str, default=None, help=argparse.SUPPRESS) + + return parser.parse_args() + + +def main() -> None: + """Main entry point for the demo.""" + args = parse_args() + + # Handle 'serve' subcommand + if args.command == "serve": + serve() + return + + # Demo mode: run Data Designer with the MCP server + if os.environ.get("NVIDIA_API_KEY") is None and args.model_alias.startswith("nvidia"): + raise RuntimeError("NVIDIA_API_KEY must be set when using NVIDIA model aliases.") + + # Configure MCP provider to run via stdio transport (local subprocess) + mcp_provider = dd.LocalStdioMCPProvider( + name=MCP_SERVER_NAME, + command=sys.executable, + args=[str(Path(__file__).resolve()), "serve"], + ) + + config_builder = build_config( + model_alias=args.model_alias, + provider_name=MCP_SERVER_NAME, + ) + + data_designer = DataDesigner(mcp_providers=[mcp_provider]) + preview_results = data_designer.preview(config_builder, num_records=args.num_records) + + # Display results + print("\n" + "=" * 60) + print("GENERATED DATA") + print("=" * 60) + preview_results.display_sample_record() + + +if __name__ == "__main__": + main() +``` diff --git a/fern/v0.5.0/pages/recipes/mcp-and-tooluse/pdf-qa.mdx b/fern/v0.5.0/pages/recipes/mcp-and-tooluse/pdf-qa.mdx new file mode 100644 index 00000000..090e6c97 --- /dev/null +++ b/fern/v0.5.0/pages/recipes/mcp-and-tooluse/pdf-qa.mdx @@ -0,0 +1,473 @@ +--- +title: PDF Document QA +description: Generate grounded Q&A pairs from PDF documents using MCP tool calls and BM25 search. +--- + + +[Download Code](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/mcp_and_tooluse/pdf_qa.py) + + +This recipe demonstrates an end-to-end MCP tool-calling workflow: + +1. Load one or more PDF documents from URLs or local paths +2. Index them with BM25S for fast lexical search +3. Use Data Designer tool calls (`search_docs`) to generate grounded Q&A pairs + +## Prerequisites + +- `NVIDIA_API_KEY` environment variable for NVIDIA provider model aliases (default) +- `OPENAI_API_KEY` environment variable for OpenAI provider model aliases + +## Running the Recipe + +```bash +# Basic usage with default sample PDF (generates 4 Q&A pairs) +uv run pdf_qa.py + +# For help message and available options +uv run pdf_qa.py --help + +# Index a custom PDF +uv run pdf_qa.py --pdf path/to/your/document.pdf + +# Index multiple PDFs +uv run pdf_qa.py --pdf doc1.pdf --pdf https://example.com/doc2.pdf +``` + +## Features Demonstrated + +- **MCP tool calling** with `LocalStdioMCPProvider` +- **BM25 lexical search** for document retrieval +- **Retrieval-grounded QA generation** with citations +- **Per-column trace capture** for debugging tool calls +- **Structured output** for Q&A pairs with Pydantic models + +## Architecture + +``` +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ PDF Files │ │ BM25 Index │ │ Data Designer │ +│ (local/URL) │ ───► │ (in-memory) │ ◄─── │ (tool calls) │ +└─────────────────┘ └─────────────────┘ └─────────────────┘ + │ │ + │ search_docs() │ + ◄──────────────────────────┤ + │ │ + │ results │ + ├──────────────────────────► + │ │ + ▼ + ┌─────────────────┐ + │ Q&A Pair with │ + │ Citation │ + └─────────────────┘ +``` + +## Output Schema + +Each generated record includes: + +| Field | Description | +|-------|-------------| +| `question` | A question grounded in the document text | +| `answer` | A concise answer grounded in the supporting passage | +| `supporting_passage` | A 2-4 sentence excerpt from the search result | +| `citation` | Source reference (URL, page number, etc.) | + +## Code + +```python +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "data-designer", +# "mcp", +# "bm25s", +# "pymupdf", +# "rich", +# ] +# /// +"""MCP + Tool Use Recipe: Document Q&A with BM25S Lexical Search + +This recipe demonstrates an end-to-end MCP tool-calling workflow: + +1) Load one or more PDF documents from URLs or local paths. +2) Index them with BM25S for fast lexical search. +3) Use Data Designer tool calls (`search_docs`) to generate grounded Q&A pairs. + +Prerequisites: + - OPENAI_API_KEY environment variable for OpenAI provider model aliases. + - NVIDIA_API_KEY environment variable for NVIDIA provider model aliases (default model alias is "nvidia-reasoning"). + +Run: + # Basic usage with default sample PDF (generates 4 Q&A pairs) + uv run pdf_qa.py + + # For help message and available options + uv run pdf_qa.py --help +""" + +from __future__ import annotations + +import argparse +import io +import json +import os +import sys +from pathlib import Path +from urllib.parse import urlparse +from urllib.request import urlopen + +import bm25s +import fitz +from mcp.server.fastmcp import FastMCP +from pydantic import BaseModel, Field + +import data_designer.config as dd +from data_designer.config.preview_results import PreviewResults +from data_designer.interface import DataDesigner + +DEFAULT_PDF_URL = "https://research.nvidia.com/labs/nemotron/files/NVIDIA-Nemotron-3-Nano-Technical-Report.pdf" +MCP_SERVER_NAME = "doc-bm25-search" + +# Global state for the BM25 index (populated at server startup) +_bm25_retriever: bm25s.BM25 | None = None +_corpus: list[dict[str, str]] = [] + + +class QAPair(BaseModel): + question: str = Field(..., description="A question grounded in the document text.") + answer: str = Field(..., description="A concise answer grounded in the supporting passage.") + supporting_passage: str = Field( + ..., description="A short excerpt (2-4 sentences) copied from the search result that supports the answer." + ) + citation: str = Field( + ..., description="The citation (e.g. source url, page number, etc) of the supporting passage." + ) + + +class TopicList(BaseModel): + topics: list[str] = Field( + ..., + description="High-level topics covered by the document.", + ) + + +def _is_url(path_or_url: str) -> bool: + """Check if the given string is a URL.""" + parsed = urlparse(path_or_url) + return parsed.scheme in ("http", "https") + + +def _get_source_name(path_or_url: str) -> str: + """Extract a human-readable source name from a path or URL.""" + if _is_url(path_or_url): + parsed = urlparse(path_or_url) + return Path(parsed.path).name or parsed.netloc + return Path(path_or_url).name + + +def extract_pdf_text(path_or_url: str) -> list[dict[str, str]]: + """Extract text from a PDF file or URL, returning a list of passages with metadata.""" + passages: list[dict[str, str]] = [] + source_name = _get_source_name(path_or_url) + + if _is_url(path_or_url): + with urlopen(path_or_url) as response: + pdf_bytes = response.read() + doc = fitz.open(stream=io.BytesIO(pdf_bytes), filetype="pdf") + else: + doc = fitz.open(path_or_url) + + for page_num in range(len(doc)): + page = doc[page_num] + text = page.get_text("text").strip() + if text: + passages.append( + { + "text": text, + "page": str(page_num + 1), + "source": source_name, + } + ) + + doc.close() + return passages + + +def build_bm25_index(passages: list[dict[str, str]]) -> bm25s.BM25: + """Build a BM25S index from the extracted passages.""" + corpus_texts = [p["text"] for p in passages] + corpus_tokens = bm25s.tokenize(corpus_texts, stopwords="en") + + retriever = bm25s.BM25() + retriever.index(corpus_tokens) + + return retriever + + +def initialize_search_index(pdf_sources: list[str]) -> None: + """Load PDFs from paths/URLs and build the BM25 index.""" + global _bm25_retriever, _corpus + + _corpus = [] + for source in pdf_sources: + passages = extract_pdf_text(source) + _corpus.extend(passages) + + if _corpus: + _bm25_retriever = build_bm25_index(_corpus) + + +# MCP Server Definition +mcp_server = FastMCP(MCP_SERVER_NAME) + + +@mcp_server.tool() +def search_docs(query: str, limit: int = 5, document: str = "", page: str = "") -> str: + """Search through documents using BM25 lexical search. + + BM25 is a keyword-based retrieval algorithm that matches exact terms. For best results: + + - Use specific keywords, not full questions + - Include domain-specific terms that would appear in the source text + - Combine multiple relevant terms to narrow results + + Args: + query: Search query string - use specific keywords for best results + limit: Maximum number of results to return (default: 5) + document: Optional document source name to restrict search to + page: Optional page number to restrict search to (requires document) + + Returns: + JSON string with search results including text excerpts and page numbers + """ + global _bm25_retriever, _corpus + + if _bm25_retriever is None or not _corpus: + return json.dumps({"error": "Search index not initialized"}) + + if page and not document: + return json.dumps({"error": "The 'page' parameter requires 'document' to be specified"}) + + query_tokens = bm25s.tokenize([query], stopwords="en") + + retrieve_limit = len(_corpus) if (document or page) else limit + results, scores = _bm25_retriever.retrieve(query_tokens, k=min(retrieve_limit, len(_corpus))) + + search_results: list[dict[str, str | float]] = [] + for i in range(results.shape[1]): + doc_idx = results[0, i] + score = float(scores[0, i]) + + if score <= 0: + continue + + passage = _corpus[doc_idx] + + if document and passage["source"] != document: + continue + + if page and passage["page"] != page: + continue + + search_results.append( + { + "text": passage["text"][:2000], + "page": passage["page"], + "source": passage["source"], + "score": round(score, 4), + "url": f"file://{passage['source']}#page={passage['page']}", + } + ) + + if len(search_results) >= limit: + break + + return json.dumps({"results": search_results, "query": query, "total": len(search_results)}) + + +@mcp_server.tool() +def list_docs() -> str: + """List all documents in the search index with their page counts.""" + global _corpus + + if not _corpus: + return json.dumps({"error": "Search index not initialized", "documents": []}) + + doc_pages: dict[str, set[str]] = {} + for passage in _corpus: + source = passage["source"] + page = passage["page"] + if source not in doc_pages: + doc_pages[source] = set() + doc_pages[source].add(page) + + documents = [{"source": source, "page_count": len(pages)} for source, pages in sorted(doc_pages.items())] + + return json.dumps({"documents": documents, "total_documents": len(documents)}) + + +def build_config(model_alias: str, provider_name: str) -> dd.DataDesignerConfigBuilder: + """Build the Data Designer configuration for document Q&A generation.""" + tool_config = dd.ToolConfig( + tool_alias="doc-search", + providers=[provider_name], + allow_tools=["list_docs", "search_docs"], + max_tool_call_turns=100, + timeout_sec=30.0, + ) + + config_builder = dd.DataDesignerConfigBuilder(tool_configs=[tool_config]) + config_builder.add_column( + dd.SamplerColumnConfig( + name="seed_id", + sampler_type=dd.SamplerType.UUID, + params=dd.UUIDSamplerParams(), + drop=True, + ) + ) + + config_builder.add_column( + dd.LLMStructuredColumnConfig( + name="topic_candidates", + model_alias=model_alias, + prompt="Extract a high-level list of all topics covered by documents our knowledge base.", + system_prompt=( + "You must call tools before answering. " + "Do not use outside knowledge; only use tool results. " + "You can use as many tool calls as required to answer the user query." + ), + output_format=TopicList, + tool_alias="doc-search", + with_trace=dd.TraceType.ALL_MESSAGES, + ) + ) + + config_builder.add_column( + dd.ExpressionColumnConfig( + name="topic", + expr="{{ topic_candidates.topics | random }}", + ) + ) + + qa_prompt = """\ +Create a question-answer pair on the topic "{{topic}}", with supporting text and citation. +The supporting_passage must be a 2-4 sentence excerpt copied from the tool result that demonstrates +why the answer is correct. +""" + + config_builder.add_column( + dd.LLMStructuredColumnConfig( + name="qa_pair", + model_alias=model_alias, + prompt=qa_prompt, + system_prompt=( + "You must call tools before answering. " + "Do not use outside knowledge; only use tool results. " + "You can use as many tool calls as required to answer the user query." + ), + output_format=QAPair, + tool_alias="doc-search", + with_trace=dd.TraceType.ALL_MESSAGES, + extract_reasoning_content=True, + ) + ) + + config_builder.add_column( + dd.ExpressionColumnConfig( + name="question", + expr="{{ qa_pair.question }}", + ) + ) + config_builder.add_column( + dd.ExpressionColumnConfig( + name="answer", + expr="{{ qa_pair.answer }}", + ) + ) + config_builder.add_column( + dd.ExpressionColumnConfig( + name="supporting_passage", + expr="{{ qa_pair.supporting_passage }}", + ) + ) + config_builder.add_column( + dd.ExpressionColumnConfig( + name="citation", + expr="{{ qa_pair.citation }}", + ) + ) + return config_builder + + +def serve() -> None: + """Run the MCP server (called when launched as subprocess by Data Designer).""" + pdf_sources_json = os.environ.get("PDF_SOURCES", "[]") + pdf_sources = json.loads(pdf_sources_json) + if not pdf_sources: + pdf_sources = [DEFAULT_PDF_URL] + initialize_search_index(pdf_sources) + mcp_server.run() + + +def parse_args() -> argparse.Namespace: + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description="Generate document Q&A pairs using MCP tool calls with BM25S search.") + subparsers = parser.add_subparsers(dest="command") + + # 'serve' subcommand for running the MCP server + subparsers.add_parser("serve", help="Run the MCP server (used by Data Designer)") + + # Default command arguments (demo mode) + parser.add_argument("--model-alias", type=str, default="nvidia-reasoning", help="Model alias to use for generation") + parser.add_argument("--num-records", type=int, default=4, help="Number of Q&A pairs to generate") + parser.add_argument( + "--pdf", + type=str, + action="append", + dest="pdfs", + metavar="PATH_OR_URL", + help="PDF file path or URL to index (can be specified multiple times). Defaults to a sample PDF if not provided.", + ) + # For compatibility with Makefile test-run-recipes target (ignored in demo mode) + parser.add_argument("--artifact-path", type=str, default=None, help=argparse.SUPPRESS) + + return parser.parse_args() + + +def main() -> None: + """Main entry point for the demo.""" + args = parse_args() + + if args.command == "serve": + serve() + return + + if os.environ.get("NVIDIA_API_KEY") is None and args.model_alias.startswith("nvidia"): + raise RuntimeError("NVIDIA_API_KEY must be set when using NVIDIA model aliases.") + + pdf_sources = args.pdfs if args.pdfs else [DEFAULT_PDF_URL] + + mcp_provider = dd.LocalStdioMCPProvider( + name=MCP_SERVER_NAME, + command=sys.executable, + args=[str(Path(__file__).resolve()), "serve"], + env={"PDF_SOURCES": json.dumps(pdf_sources)}, + ) + + config_builder = build_config( + model_alias=args.model_alias, + provider_name=MCP_SERVER_NAME, + ) + + data_designer = DataDesigner(mcp_providers=[mcp_provider]) + preview_results = data_designer.preview(config_builder, num_records=args.num_records) + preview_results.display_sample_record() + + +if __name__ == "__main__": + main() +``` diff --git a/fern/v0.5.0/pages/recipes/qa-and-chat/multi-turn-chat.mdx b/fern/v0.5.0/pages/recipes/qa-and-chat/multi-turn-chat.mdx new file mode 100644 index 00000000..a5258128 --- /dev/null +++ b/fern/v0.5.0/pages/recipes/qa-and-chat/multi-turn-chat.mdx @@ -0,0 +1,205 @@ +--- +title: Multi-Turn Chat +description: Generate multi-turn conversational dialogues. +--- + + +[Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/multi_turn_chat.py) + + +```python +from pathlib import Path +from typing import Literal + +from pydantic import BaseModel, Field + +import data_designer.config as dd +from data_designer.interface import DataDesigner +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> dd.DataDesignerConfigBuilder: + config_builder = dd.DataDesignerConfigBuilder() + + config_builder.add_column( + dd.SamplerColumnConfig( + name="domain", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=["Tech Support", "Personal Finances", "Educational Guidance"]), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="topic", + sampler_type=dd.SamplerType.SUBCATEGORY, + params=dd.SubcategorySamplerParams( + category="domain", + values={ + "Tech Support": [ + "Troubleshooting a Laptop", + "Setting Up a Home Wi-Fi Network", + "Installing Software Updates", + ], + "Personal Finances": [ + "Budgeting Advice", + "Understanding Taxes", + "Investment Strategies", + ], + "Educational Guidance": [ + "Choosing a College Major", + "Effective Studying Techniques", + "Learning a New Language", + ], + }, + ), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="complexity", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=["Basic", "Intermediate", "Advanced"]), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="conversation_length", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=[2, 4, 6, 8]), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="user_mood", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=["happy", "silly", "sarcastic", "combative", "disappointed", "toxic"]), + ) + ) + + config_builder.add_column( + dd.LLMTextColumnConfig( + name="assistant_system_prompt", + prompt=( + "Write a reasonable system prompt for a helpful AI assistant with expertise in " + "{{domain}} and {{topic}}. The AI assistant must not engage in harmful behaviors." + ), + model_alias=model_alias, + ) + ) + + config_builder.add_column( + dd.LLMTextColumnConfig( + name="user_task", + prompt="Define a simple task related to {{topic}} of {{complexity}} complexity for the user.", + model_alias=model_alias, + ) + ) + + config_builder.add_column( + dd.LLMStructuredColumnConfig( + name="conversation", + prompt=( + "\n{{user_task}}\n\n\n" + "{{assistant_system_prompt}}\n\n" + "Generate a conversation between a user and an AI assistant with about .\n" + "User is asking the assistant for advice and is in a {{user_mood}} mood.\n" + "The conversation must be {{conversation_length}} messages in length.\n" + "The conversation must come to a natural end in {{conversation_length}} messages and if the assistant is unable " + "to solve the user's needs by then, they should offer to continue the conversation later or redirect them to additional resources.\n" + "The conversation must be realistic and natural, incorporating details from both the user's and assistant's profiles.\n" + "The AI assistant role has the system prompt defined in . All of its responses must conform to :\n" + ), + output_format=ChatConversation, + model_alias=model_alias, + ) + ) + + config_builder.add_column( + dd.LLMJudgeColumnConfig( + name="toxicity_evaluation", + prompt=TOXICITY_PROMPT_TEMPLATE, + scores=[toxicity_score], + model_alias=model_alias, + ) + ) + + return config_builder + + +def create_dataset( + config_builder: dd.DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +toxicity_score = dd.Score( + name="Toxicity", + description="Evaluation of harmful, harassing, or hateful language in conversations", + options={ + 4: "No toxicity detected; conversation is respectful and appropriate.", + 3: "Mild toxicity; slightly rude or sarcastic but not hateful or harmful.", + 2: "Moderate toxicity; contains disrespectful or harassing language.", + 1: "Severe toxicity; contains overt hate, harassment, or harmful content.", + 0: "Extreme toxicity; contains dangerous, threatening, or abusive content.", + }, +) + +TOXICITY_PROMPT_TEMPLATE = ( + "You are an expert in content moderation and digital communication ethics. " + "You are evaluating the toxicity level in user messages.\n" + "Take a deep breath and carefully analyze the conversation provided. \n" + "Use the rubric to score the level of toxicity in the USER'S messages only.\n" + "Consider:\n" + "- Focus primarily on harassment, hate, or harmful language\n" + "- Evaluate the full context of the conversation\n" + "- Be objective and consistent in your assessment\n" + "- If any single message exhibits toxicity, it should influence the final score\n" + "## CONVERSATION\n" + "{{ conversation }}" +) + + +class Message(BaseModel): + """A single message turn in the conversation.""" + + role: Literal["user", "assistant"] = Field(..., description="Which role is writing the message.") + content: str = Field(..., description="Message contents.") + + +class ChatConversation(BaseModel): + """A chat conversation between a specific user and an AI assistant. + * All conversations are initiated by the user role. + * The assistant role always responds to the user message. + * Turns alternate between user and assistant roles. + * The last message is always from the assistant role. + * Message content can be long or short. + * All assistant messages are faithful responses and must be answered fully. + """ + + conversation: list[Message] = Field(..., description="List of all messages in the conversation.") + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() +``` diff --git a/fern/v0.5.0/pages/recipes/qa-and-chat/product-info-qa.mdx b/fern/v0.5.0/pages/recipes/qa-and-chat/product-info-qa.mdx new file mode 100644 index 00000000..d6fdd0a1 --- /dev/null +++ b/fern/v0.5.0/pages/recipes/qa-and-chat/product-info-qa.mdx @@ -0,0 +1,223 @@ +--- +title: Product Info QA +description: Generate question-answer pairs for product information. +--- + + +[Download the complete recipe script](https://github.com/NVIDIA-NeMo/DataDesigner/blob/main/docs/assets/recipes/qa_and_chat/product_info_qa.py) + + +```python +import string +from pathlib import Path + +from pydantic import BaseModel, Field + +import data_designer.config as dd +from data_designer.interface import DataDesigner +from data_designer.interface.results import DatasetCreationResults + + +def build_config(model_alias: str) -> dd.DataDesignerConfigBuilder: + config_builder = dd.DataDesignerConfigBuilder() + config_builder.add_column( + dd.SamplerColumnConfig( + name="category", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=[ + "Electronics", + "Clothing", + "Home Appliances", + "Groceries", + "Toiletries", + "Sports Equipment", + "Toys", + "Books", + "Pet Supplies", + "Tools & Home Improvement", + "Beauty", + "Health & Wellness", + "Outdoor Gear", + "Automotive", + "Jewelry", + "Watches", + "Office Supplies", + "Gifts", + "Arts & Crafts", + "Baby & Kids", + "Music", + "Video Games", + "Movies", + "Software", + "Tech Devices", + ] + ), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="price_tens_of_dollars", + sampler_type=dd.SamplerType.UNIFORM, + params=dd.UniformSamplerParams(low=1, high=200), + ) + ) + + config_builder.add_column( + dd.ExpressionColumnConfig( + name="product_price", + expr="{{ (price_tens_of_dollars * 10) - 0.01 | round(2) }}", + dtype="float", + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="first_letter", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=list(string.ascii_uppercase)), + ) + ) + + config_builder.add_column( + dd.SamplerColumnConfig( + name="is_hallucination", + sampler_type=dd.SamplerType.BERNOULLI, + params=dd.BernoulliSamplerParams(p=0.5), + ) + ) + + config_builder.add_column( + dd.LLMStructuredColumnConfig( + name="product_info", + model_alias=model_alias, + prompt=( + "Generate a realistic product description for a product in the {{ category }} " + "category that costs {{ product_price }}.\n" + "The name of the product MUST start with the letter {{ first_letter }}.\n" + ), + output_format=ProductInfo, + ) + ) + + config_builder.add_column( + dd.LLMTextColumnConfig( + name="question", + model_alias=model_alias, + prompt=("Ask a question about the following product:\n\n {{ product_info }}"), + ) + ) + + config_builder.add_column( + dd.LLMTextColumnConfig( + name="answer", + model_alias=model_alias, + prompt=( + "{%- if is_hallucination == 0 -%}\n" + "\n" + "{{ product_info }}\n" + "\n" + "{%- endif -%}\n" + "User Question: {{ question }}\n" + "Directly and succinctly answer the user's question.\n" + "{%- if is_hallucination == 1 -%}\n" + "Make up whatever information you need to in order to answer the user's request.\n" + "{%- endif -%}" + ), + ) + ) + + # Evaluate answer quality + config_builder.add_column( + dd.LLMJudgeColumnConfig( + name="llm_answer_metrics", + model_alias=model_alias, + prompt=( + "\n" + "{{ product_info }}\n" + "\n" + "User Question: {{question }}\n" + "AI Assistant Answer: {{ answer }}\n" + "Judge the AI assistant's response to the user's question about the product described in ." + ), + scores=answer_quality_scores, + ) + ) + + config_builder.add_column( + dd.ExpressionColumnConfig( + name="completeness_result", + expr="{{ llm_answer_metrics.Completeness.score }}", + ) + ) + + config_builder.add_column( + dd.ExpressionColumnConfig( + name="accuracy_result", + expr="{{ llm_answer_metrics.Accuracy.score }}", + ) + ) + + return config_builder + + +def create_dataset( + config_builder: dd.DataDesignerConfigBuilder, + num_records: int, + artifact_path: Path | str | None = None, +) -> DatasetCreationResults: + data_designer = DataDesigner(artifact_path=artifact_path) + results = data_designer.create(config_builder, num_records=num_records) + return results + + +class ProductInfo(BaseModel): + product_name: str = Field(..., description="A realistic product name for the market.") + key_features: list[str] = Field(..., min_length=1, max_length=3, description="Key product features.") + description: str = Field( + ..., + description="A short, engaging description of what the product does, highlighting a unique but believable feature.", + ) + price_usd: float = Field(..., description="The price of the product", ge=10, le=1000, decimal_places=2) + + +completeness_score = dd.Score( + name="Completeness", + description="Evaluation of AI assistant's thoroughness in addressing all aspects of the user's query.", + options={ + "Complete": "The response thoroughly covers all key points requested in the question, providing sufficient detail to satisfy the user's information needs.", + "PartiallyComplete": "The response addresses the core question but omits certain important details or fails to elaborate on relevant aspects that were requested.", + "Incomplete": "The response significantly lacks necessary information, missing major components of what was asked and leaving the query largely unanswered.", + }, +) + +accuracy_score = dd.Score( + name="Accuracy", + description="Evaluation of how factually correct the AI assistant's response is relative to the product information.", + options={ + "Accurate": "The information provided aligns perfectly with the product specifications without introducing any misleading or incorrect details.", + "PartiallyAccurate": "While some information is correctly stated, the response contains minor factual errors or potentially misleading statements about the product.", + "Inaccurate": "The response presents significantly wrong information about the product, with claims that contradict the actual product details.", + }, +) + +answer_quality_scores = [completeness_score, accuracy_score] + + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser() + parser.add_argument("--model-alias", type=str, default="openai-text") + parser.add_argument("--num-records", type=int, default=5) + parser.add_argument("--artifact-path", type=str, default=None) + args = parser.parse_args() + + config_builder = build_config(model_alias=args.model_alias) + results = create_dataset(config_builder, num_records=args.num_records, artifact_path=args.artifact_path) + + print(f"Dataset saved to: {results.artifact_storage.final_dataset_path}") + + results.load_analysis().to_report() +``` diff --git a/fern/v0.5.0/pages/tutorials/editing-images-notebook.mdx b/fern/v0.5.0/pages/tutorials/editing-images-notebook.mdx new file mode 100644 index 00000000..c7a0e823 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/editing-images-notebook.mdx @@ -0,0 +1,13 @@ +--- +title: "Image-to-Image Editing (Notebook)" +description: Data Designer tutorial with executed output. +--- + +import { NotebookViewer } from "@/components/NotebookViewer"; +import notebook from "@/components/notebooks/6-editing-images-with-image-context"; + + diff --git a/fern/v0.5.0/pages/tutorials/generating-images-notebook.mdx b/fern/v0.5.0/pages/tutorials/generating-images-notebook.mdx new file mode 100644 index 00000000..6cf4dec2 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/generating-images-notebook.mdx @@ -0,0 +1,13 @@ +--- +title: "Generating Images (Notebook)" +description: Data Designer tutorial with executed output. +--- + +import { NotebookViewer } from "@/components/NotebookViewer"; +import notebook from "@/components/notebooks/5-generating-images"; + + diff --git a/fern/v0.5.0/pages/tutorials/images-as-context-notebook.mdx b/fern/v0.5.0/pages/tutorials/images-as-context-notebook.mdx new file mode 100644 index 00000000..2f97d449 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/images-as-context-notebook.mdx @@ -0,0 +1,13 @@ +--- +title: "Images as Context (Notebook)" +description: Data Designer tutorial with executed output. +--- + +import { NotebookViewer } from "@/components/NotebookViewer"; +import notebook from "@/components/notebooks/4-providing-images-as-context"; + + diff --git a/fern/v0.5.0/pages/tutorials/images-as-context.mdx b/fern/v0.5.0/pages/tutorials/images-as-context.mdx new file mode 100644 index 00000000..d137c0e2 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/images-as-context.mdx @@ -0,0 +1,272 @@ +--- +title: "🎨 Data Designer Tutorial: Images as Context for Vision-Based Generation" +--- + + +Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/4-providing-images-as-context.ipynb). + + +#### 📚 What you'll learn + +This notebook demonstrates how to provide images as context to generate text descriptions using vision-language models. + +- ✨ **Visual Document Processing**: Converting images to chat-ready format for model consumption +- 🔍 **Vision-Language Generation**: Using vision models to generate detailed summaries from images + +If this is your first time using Data Designer, we recommend starting with the [first tutorial](/docs/tutorials/the-basics) in this series. + +### 📦 Import Data Designer + +- `data_designer.config` provides access to the configuration API. +- `DataDesigner` is the main interface for data generation. + +```python +# Standard library imports +import base64 +import io +import uuid + +# Third-party imports +import pandas as pd +import rich +from datasets import load_dataset +from IPython.display import display +from rich.panel import Panel + +# Data Designer imports +import data_designer.config as dd +from data_designer.interface import DataDesigner +``` + +### ⚙️ Initialize the Data Designer interface + +- `DataDesigner` is the main object is responsible for managing the data generation process. +- When initialized without arguments, the [default model providers](/docs/concepts/models/default-model-settings) are used. + +```python +data_designer = DataDesigner() +``` + +### 🎛️ Define model configurations + +- Each `ModelConfig` defines a model that can be used during the generation process. +- The "model alias" is used to reference the model in the Data Designer config (as we will see below). +- The "model provider" is the external service that hosts the model (see the [model config](/docs/concepts/models/default-model-settings) docs for more details). +- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider. + +```python +# This name is set in the model provider configuration. +MODEL_PROVIDER = "nvidia" + +model_configs = [ + dd.ModelConfig( + alias="vision", + model="meta/llama-4-scout-17b-16e-instruct", + provider=MODEL_PROVIDER, + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=0.60, + top_p=0.95, + max_tokens=2048, + ), + ), +] +``` + +### 🏗️ Initialize the Data Designer Config Builder + +- The Data Designer config defines the dataset schema and generation process. +- The config builder provides an intuitive interface for building this configuration. +- The list of model configs is provided to the builder at initialization. + +```python +config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs) +``` + +### 🌱 Seed Dataset Creation + +In this section, we'll prepare our visual documents as a seed dataset for summarization: + +- **Loading Visual Documents**: We use the ColPali dataset containing document images +- **Image Processing**: Convert images to base64 format for vision model consumption +- **Metadata Extraction**: Preserve relevant document information (filename, page number, source, etc.) + +The seed dataset will be used to generate detailed text summaries of each document image. + +```python +# Dataset processing configuration +IMG_COUNT = 512 # Number of images to process +BASE64_IMAGE_HEIGHT = 512 # Standardized height for model input + +# Load ColPali dataset for visual documents +img_dataset_cfg = {"path": "vidore/colpali_train_set", "split": "train", "streaming": True} +``` + +```python +def resize_image(image, height: int): + """ + Resize image while maintaining aspect ratio. + + Args: + image: PIL Image object + height: Target height in pixels + + Returns: + Resized PIL Image object + """ + original_width, original_height = image.size + width = int(original_width * (height / original_height)) + return image.resize((width, height)) + + +def convert_image_to_chat_format(record, height: int) -> dict: + """ + Convert PIL image to base64 format for chat template usage. + + Args: + record: Dataset record containing image and metadata + height: Target height for image resizing + + Returns: + Updated record with base64_image and uuid fields + """ + # Resize image for consistent processing + image = resize_image(record["image"], height) + + # Convert to base64 string + img_buffer = io.BytesIO() + image.save(img_buffer, format="PNG") + byte_data = img_buffer.getvalue() + base64_encoded_data = base64.b64encode(byte_data) + base64_string = base64_encoded_data.decode("utf-8") + + # Return updated record + return record | {"base64_image": base64_string, "uuid": str(uuid.uuid4())} +``` + +```python +# Load and process the visual document dataset +print("📥 Loading and processing document images...") + +img_dataset_iter = iter( + load_dataset(**img_dataset_cfg).map(convert_image_to_chat_format, fn_kwargs={"height": BASE64_IMAGE_HEIGHT}) +) +img_dataset = pd.DataFrame([next(img_dataset_iter) for _ in range(IMG_COUNT)]) + +print(f"✅ Loaded {len(img_dataset)} images with columns: {list(img_dataset.columns)}") +``` + +```python +img_dataset.head() +``` + +```python +# Add the seed dataset containing our processed images +df_seed = pd.DataFrame(img_dataset)[["uuid", "image_filename", "base64_image", "page", "options", "source"]] +config_builder.with_seed_dataset(dd.DataFrameSeedSource(df=df_seed)) +``` + +```python +# Add a column to generate detailed document summaries +config_builder.add_column( + dd.LLMTextColumnConfig( + name="summary", + model_alias="vision", + prompt=( + "Provide a detailed summary of the content in this image in Markdown format. " + "Start from the top of the image and then describe it from top to bottom. " + "Place a summary at the bottom." + ), + multi_modal_context=[ + dd.ImageContext( + column_name="base64_image", + data_type=dd.ModalityDataType.BASE64, + image_format=dd.ImageFormat.PNG, + ) + ], + ) +) +``` + +### 🔁 Iteration is key – preview the dataset! + +1. Use the `preview` method to generate a sample of records quickly. +2. Inspect the results for quality and format issues. +3. Adjust column configurations, prompts, or parameters as needed. +4. Re-run the preview until satisfied. + +```python +preview = data_designer.preview(config_builder, num_records=2) +``` + +```python +# Run this cell multiple times to cycle through the 2 preview records. +preview.display_sample_record() +``` + +```python +# The preview dataset is available as a pandas DataFrame. +preview.dataset +``` + +### 📊 Analyze the generated data + +- Data Designer automatically generates a basic statistical analysis of the generated data. +- This analysis is available via the `analysis` property of generation result objects. + +```python +# Print the analysis as a table. +preview.analysis.to_report() +``` + +### 🔎 Visual Inspection + +Let's compare the original document image with the generated summary to validate quality: + +```python +# Compare original document with generated summary +index = 0 # Change this to view different examples + +# Merge preview data with original images for comparison +comparison_dataset = preview.dataset.merge(pd.DataFrame(img_dataset)[["uuid", "image"]], how="left", on="uuid") + +# Extract the record for display +record = comparison_dataset.iloc[index] + +print("📄 Original Document Image:") +display(resize_image(record.image, BASE64_IMAGE_HEIGHT)) + +print("\n📝 Generated Summary:") +rich.print(Panel(record.summary, title="Document Summary", title_align="left")) +``` + +### 🆙 Scale up! + +- Happy with your preview data? +- Use the `create` method to submit larger Data Designer generation jobs. + +```python +results = data_designer.create(config_builder, num_records=10, dataset_name="tutorial-4") +``` + +```python +# Load the generated dataset as a pandas DataFrame. +dataset = results.load_dataset() + +dataset.head() +``` + +```python +# Load the analysis results into memory. +analysis = results.load_analysis() + +analysis.to_report() +``` + +## ⏭️ Next Steps + +Now that you've learned how to use visual context for image summarization in Data Designer, explore more: + +- Experiment with different vision models for specific document types +- Try different prompt variations to generate specialized descriptions (e.g., technical details, key findings) +- Combine vision-based summaries with other column types for multi-modal workflows +- Apply this pattern to other vision tasks like image captioning, OCR validation, or visual question answering diff --git a/fern/v0.5.0/pages/tutorials/overview.mdx b/fern/v0.5.0/pages/tutorials/overview.mdx new file mode 100644 index 00000000..22694c79 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/overview.mdx @@ -0,0 +1,142 @@ +--- +title: Tutorials +description: Step-by-step tutorials for learning Data Designer. +--- + +Welcome to the Data Designer tutorials! These interactive notebooks guide you through the core concepts and features of Data Designer. + + +// Absolute path from fern folder root +import { CustomCard } from "@/components/CustomCard" + + + + +## Getting Started + +Each tutorial builds on the previous one, so we recommend following them in order: + + + + Learn the fundamentals of Data Designer by generating a simple product review dataset. + + **Topics covered:** + - Sampler columns for categorical and numerical data + - LLM-generated text columns + - Previewing and iterating on your dataset + + + Learn advanced data generation using structured outputs and Jinja expressions. + + **Topics covered:** + - Pydantic models for structured output schemas + - Expression columns with Jinja2 templates + - Conditional logic in prompts + + + Bootstrap generation from existing data to create domain-grounded synthetic datasets. + + **Topics covered:** + - Loading seed datasets from local files + - Referencing seed data in prompts + - Combining real and synthetic data + + + Use vision-language models to generate text descriptions from images. + + **Topics covered:** + - Processing images for model input + - Vision model configuration + - Document summarization workflows + + + Create synthetic images using diffusion and autoregressive models. + + **Topics covered:** + - Image generation model configuration + - Text-to-image generation + - Prompt engineering for image generation + + + Transform and edit images using AI models. + + **Topics covered:** + - Image-to-image transformation workflows + - Style transfer and editing prompts + - Multi-modal context for image editing + + + +## Running the Tutorials + +Each tutorial is available as an interactive Jupyter notebook that you can run in Google Colab. Click the "Open in Colab" badge at the top of each tutorial to launch it directly in your browser. + +### Prerequisites + +Before running the tutorials, make sure you have: + +1. **An API key** from one of the supported providers: + - [NVIDIA API Key](https://build.nvidia.com) (recommended) + - [OpenAI API Key](https://platform.openai.com/api-keys) + - [OpenRouter API Key](https://openrouter.ai) + +2. **Set your API key** as an environment variable or in the notebook: + ```bash + export NVIDIA_API_KEY="your-api-key-here" + ``` + +### Running Locally + +To run the tutorials locally instead of in Colab: + +1. Install Data Designer: + ```bash + pip install data-designer + ``` + +2. Configure your model provider using the CLI: + ```bash + data-designer config add-provider nvidia + ``` + +3. Clone the repository and run the notebooks: + ```bash + git clone https://github.com/NVIDIA-NeMo/DataDesigner.git + cd DataDesigner/docs/colab_notebooks + jupyter notebook + ``` + +## Additional Resources + +- **[Concepts Guide](/docs/concepts/columns)**: Deep dive into core Data Designer concepts +- **[Quick Start Guide](/docs/quick-start)**: A condensed introduction to Data Designer +- **[Use Case Recipes](/docs/recipes)**: Complete working examples for specific use cases +- **[API Reference](/api/models)**: Detailed documentation for all configuration options diff --git a/fern/v0.5.0/pages/tutorials/seeding-with-dataset-notebook.mdx b/fern/v0.5.0/pages/tutorials/seeding-with-dataset-notebook.mdx new file mode 100644 index 00000000..5e475a20 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/seeding-with-dataset-notebook.mdx @@ -0,0 +1,13 @@ +--- +title: "Seeding with a Dataset (Notebook)" +description: Data Designer tutorial with executed output. +--- + +import { NotebookViewer } from "@/components/NotebookViewer"; +import notebook from "@/components/notebooks/3-seeding-with-a-dataset"; + + diff --git a/fern/v0.5.0/pages/tutorials/seeding-with-dataset.mdx b/fern/v0.5.0/pages/tutorials/seeding-with-dataset.mdx new file mode 100644 index 00000000..072182ba --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/seeding-with-dataset.mdx @@ -0,0 +1,261 @@ +--- +title: "🎨 Data Designer Tutorial: Seeding with an External Dataset" +--- + + +Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb). + + +#### 📚 What you'll learn + +In this notebook, we will demonstrate how to seed synthetic data generation in Data Designer with an external dataset. + +If this is your first time using Data Designer, we recommend starting with the [first tutorial](/docs/tutorials/the-basics) in this series. + +### 📦 Import Data Designer + +- `data_designer.config` provides access to the configuration API. +- `DataDesigner` is the main interface for data generation. + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner +``` + +### ⚙️ Initialize the Data Designer interface + +- `DataDesigner` is the main object is responsible for managing the data generation process. +- When initialized without arguments, the [default model providers](/docs/concepts/models/default-model-settings) are used. + +```python +data_designer = DataDesigner() +``` + +### 🎛️ Define model configurations + +- Each `ModelConfig` defines a model that can be used during the generation process. +- The "model alias" is used to reference the model in the Data Designer config (as we will see below). +- The "model provider" is the external service that hosts the model (see the [model config](/docs/concepts/models/default-model-settings) docs for more details). +- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider. + +```python +# This name is set in the model provider configuration. +MODEL_PROVIDER = "nvidia" + +# The model ID is from build.nvidia.com. +MODEL_ID = "nvidia/nemotron-3-nano-30b-a3b" + +# We choose this alias to be descriptive for our use case. +MODEL_ALIAS = "nemotron-nano-v3" + +model_configs = [ + dd.ModelConfig( + alias=MODEL_ALIAS, + model=MODEL_ID, + provider=MODEL_PROVIDER, + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=1.0, + top_p=1.0, + max_tokens=2048, + extra_body={"chat_template_kwargs": {"enable_thinking": False}}, + ), + ) +] +``` + +### 🏗️ Initialize the Data Designer Config Builder + +- The Data Designer config defines the dataset schema and generation process. +- The config builder provides an intuitive interface for building this configuration. +- The list of model configs is provided to the builder at initialization. + +```python +config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs) +``` + +## 🏥 Prepare a seed dataset + +- For this notebook, we'll create a synthetic dataset of patient notes. +- We will _seed_ the generation process with a [symptom-to-diagnosis dataset](https://huggingface.co/datasets/gretelai/symptom_to_diagnosis). + + +- Seed datasets let you steer the generation process by providing context that is specific to your use case. +- Seed datasets are also an excellent way to inject real-world diversity into your synthetic data. +- During generation, prompt templates can reference any of the seed dataset fields. + + +```python +# Download sample dataset from Github +import urllib.request + +url = "https://raw.githubusercontent.com/NVIDIA/GenerativeAIExamples/refs/heads/main/nemo/NeMo-Data-Designer/data/gretelai_symptom_to_diagnosis.csv" +local_filename, _ = urllib.request.urlretrieve(url, "gretelai_symptom_to_diagnosis.csv") + +# Seed datasets are passed as reference objects to the config builder. +seed_source = dd.LocalFileSeedSource(path=local_filename) + +config_builder.with_seed_dataset(seed_source) +``` + +## 🎨 Designing our synthetic patient notes dataset + +- We use concrete config objects for clarity and type safety. +- **Note**: The prompt template can reference fields from our seed dataset: + - `{{ diagnosis }}` - the medical diagnosis from the seed data + - `{{ patient_summary }}` - the symptom description from the seed data + +```python +config_builder.add_column( + dd.SamplerColumnConfig( + name="patient_sampler", + sampler_type=dd.SamplerType.PERSON_FROM_FAKER, + params=dd.PersonFromFakerSamplerParams(), + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="doctor_sampler", + sampler_type=dd.SamplerType.PERSON_FROM_FAKER, + params=dd.PersonFromFakerSamplerParams(), + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="patient_id", + sampler_type=dd.SamplerType.UUID, + params=dd.UUIDSamplerParams( + prefix="PT-", + short_form=True, + uppercase=True, + ), + ) +) + +config_builder.add_column( + dd.ExpressionColumnConfig( + name="first_name", + expr="{{ patient_sampler.first_name }}", + ) +) + +config_builder.add_column( + dd.ExpressionColumnConfig( + name="last_name", + expr="{{ patient_sampler.last_name }}", + ) +) + +config_builder.add_column( + dd.ExpressionColumnConfig( + name="dob", + expr="{{ patient_sampler.birth_date }}", + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="symptom_onset_date", + sampler_type=dd.SamplerType.DATETIME, + params=dd.DateTimeSamplerParams(start="2024-01-01", end="2024-12-31"), + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="date_of_visit", + sampler_type=dd.SamplerType.TIMEDELTA, + params=dd.TimeDeltaSamplerParams(dt_min=1, dt_max=30, reference_column_name="symptom_onset_date"), + ) +) + +config_builder.add_column( + dd.ExpressionColumnConfig( + name="physician", + expr="Dr. {{ doctor_sampler.last_name }}", + ) +) + +config_builder.add_column( + dd.LLMTextColumnConfig( + name="physician_notes", + prompt="""\ +You are a primary-care physician who just had an appointment with {{ first_name }} {{ last_name }}, +who has been struggling with symptoms from {{ diagnosis }} since {{ symptom_onset_date }}. +The date of today's visit is {{ date_of_visit }}. + +{{ patient_summary }} + +Write careful notes about your visit with {{ first_name }}, +as Dr. {{ doctor_sampler.first_name }} {{ doctor_sampler.last_name }}. + +Format the notes as a busy doctor might. +Respond with only the notes, no other text. +""", + model_alias=MODEL_ALIAS, + ) +) + +data_designer.validate(config_builder) +``` + +### 🔁 Iteration is key – preview the dataset! + +1. Use the `preview` method to generate a sample of records quickly. +2. Inspect the results for quality and format issues. +3. Adjust column configurations, prompts, or parameters as needed. +4. Re-run the preview until satisfied. + +```python +preview = data_designer.preview(config_builder, num_records=2) +``` + +```python +# Run this cell multiple times to cycle through the 2 preview records. +preview.display_sample_record() +``` + +```python +# The preview dataset is available as a pandas DataFrame. +preview.dataset +``` + +### 📊 Analyze the generated data + +- Data Designer automatically generates a basic statistical analysis of the generated data. +- This analysis is available via the `analysis` property of generation result objects. + +```python +# Print the analysis as a table. +preview.analysis.to_report() +``` + +### 🆙 Scale up! + +- Happy with your preview data? +- Use the `create` method to submit larger Data Designer generation jobs. + +```python +results = data_designer.create(config_builder, num_records=10, dataset_name="tutorial-3") +``` + +```python +# Load the generated dataset as a pandas DataFrame. +dataset = results.load_dataset() + +dataset.head() +``` + +```python +# Load the analysis results into memory. +analysis = results.load_analysis() + +analysis.to_report() +``` + +## ⏭️ Next Steps + +Check out the following tutorial to learn more about: + +- [Providing images as context](/docs/tutorials/images-as-context) diff --git a/fern/v0.5.0/pages/tutorials/structured-outputs-notebook.mdx b/fern/v0.5.0/pages/tutorials/structured-outputs-notebook.mdx new file mode 100644 index 00000000..ceb8f845 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/structured-outputs-notebook.mdx @@ -0,0 +1,13 @@ +--- +title: "Structured Outputs (Notebook)" +description: Data Designer tutorial with executed output. +--- + +import { NotebookViewer } from "@/components/NotebookViewer"; +import notebook from "@/components/notebooks/2-structured-outputs-and-jinja-expressions"; + + diff --git a/fern/v0.5.0/pages/tutorials/structured-outputs.mdx b/fern/v0.5.0/pages/tutorials/structured-outputs.mdx new file mode 100644 index 00000000..189988c0 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/structured-outputs.mdx @@ -0,0 +1,304 @@ +--- +title: "🎨 Data Designer Tutorial: Structured Outputs and Jinja Expressions" +--- + + +Run this tutorial interactively in [Google Colab](https://colab.research.google.com/github/NVIDIA-NeMo/DataDesigner/blob/main/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb). + + +#### 📚 What you'll learn + +In this notebook, we will continue our exploration of Data Designer, demonstrating more advanced data generation using structured outputs and Jinja expressions. + +If this is your first time using Data Designer, we recommend starting with the [first tutorial](/docs/tutorials/the-basics) in this series. + +### 📦 Import Data Designer + +- `data_designer.config` provides access to the configuration API. +- `DataDesigner` is the main interface for data generation. + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner +``` + +### ⚙️ Initialize the Data Designer interface + +- `DataDesigner` is the main object that is used to interface with the library. +- When initialized without arguments, the [default model providers](/docs/concepts/models/default-model-settings) are used. + +```python +data_designer = DataDesigner() +``` + +### 🎛️ Define model configurations + +- Each `ModelConfig` defines a model that can be used during the generation process. +- The "model alias" is used to reference the model in the Data Designer config (as we will see below). +- The "model provider" is the external service that hosts the model (see the [model config](/docs/concepts/models/default-model-settings) docs for more details). +- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider. + +```python +# This name is set in the model provider configuration. +MODEL_PROVIDER = "nvidia" + +# The model ID is from build.nvidia.com. +MODEL_ID = "nvidia/nemotron-3-nano-30b-a3b" + +# We choose this alias to be descriptive for our use case. +MODEL_ALIAS = "nemotron-nano-v3" + +model_configs = [ + dd.ModelConfig( + alias=MODEL_ALIAS, + model=MODEL_ID, + provider=MODEL_PROVIDER, + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=1.0, + top_p=1.0, + max_tokens=2048, + extra_body={"chat_template_kwargs": {"enable_thinking": False}}, + ), + ) +] +``` + +### 🏗️ Initialize the Data Designer Config Builder + +- The Data Designer config defines the dataset schema and generation process. +- The config builder provides an intuitive interface for building this configuration. +- The list of model configs is provided to the builder at initialization. + +```python +config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs) +``` + +### 🧑‍🎨 Designing our data + +- We will again create a product review dataset, but this time we will use structured outputs and Jinja expressions. +- Structured outputs let you specify the exact schema of the data you want to generate. +- Data Designer supports schemas specified using either JSON schema or Pydantic data models (recommended). + +We'll define our structured outputs using [Pydantic](https://docs.pydantic.dev/latest/) data models. + + +- Pydantic models provide better IDE support and type validation. +- They are more Pythonic than raw JSON schemas. +- They integrate seamlessly with Data Designer's structured output system. + + +```python +from decimal import Decimal +from typing import Literal + +from pydantic import BaseModel, Field + + +# We define a Product schema so that the name, description, and price are generated +# in one go, with the types and constraints specified. +class Product(BaseModel): + name: str = Field(description="The name of the product") + description: str = Field(description="A description of the product") + price: Decimal = Field(description="The price of the product", ge=10, le=1000, decimal_places=2) + + +class ProductReview(BaseModel): + rating: int = Field(description="The rating of the product", ge=1, le=5) + customer_mood: Literal["irritated", "mad", "happy", "neutral", "excited"] = Field( + description="The mood of the customer" + ) + review: str = Field(description="A review of the product") +``` + +Next, let's design our product review dataset using a few more tricks compared to the previous notebook. + +```python +# Since we often only want a few attributes from Person objects, we can +# set drop=True in the column config to drop the column from the final dataset. +config_builder.add_column( + dd.SamplerColumnConfig( + name="customer", + sampler_type=dd.SamplerType.PERSON_FROM_FAKER, + params=dd.PersonFromFakerSamplerParams(), + drop=True, + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="product_category", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=[ + "Electronics", + "Clothing", + "Home & Kitchen", + "Books", + "Home Office", + ], + ), + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="product_subcategory", + sampler_type=dd.SamplerType.SUBCATEGORY, + params=dd.SubcategorySamplerParams( + category="product_category", + values={ + "Electronics": ["Smartphones", "Laptops", "Headphones", "Cameras", "Accessories"], + "Clothing": ["Men's Clothing", "Women's Clothing", "Winter Coats", "Activewear", "Accessories"], + "Home & Kitchen": ["Appliances", "Cookware", "Furniture", "Decor", "Organization"], + "Books": ["Fiction", "Non-Fiction", "Self-Help", "Textbooks", "Classics"], + "Home Office": ["Desks", "Chairs", "Storage", "Office Supplies", "Lighting"], + }, + ), + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="target_age_range", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=["18-25", "25-35", "35-50", "50-65", "65+"]), + ) +) + +# Sampler columns support conditional params, which are used if the condition is met. +# In this example, we set the review style to rambling if the target age range is 18-25. +# Note conditional parameters are only supported for Sampler column types. +config_builder.add_column( + dd.SamplerColumnConfig( + name="review_style", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["rambling", "brief", "detailed", "structured with bullet points"], + weights=[1, 2, 2, 1], + ), + conditional_params={ + "target_age_range == '18-25'": dd.CategorySamplerParams(values=["rambling"]), + }, + ) +) + +# Optionally validate that the columns are configured correctly. +data_designer.validate(config_builder) +``` + +Next, we will use more advanced Jinja expressions to create new columns. + +Jinja expressions let you: + +- Access nested attributes: `{{ customer.first_name }}` +- Combine values: `{{ customer.first_name }} {{ customer.last_name }}` +- Use conditional logic: `{% if condition %}...{% endif %}` + +```python +# We can create new columns using Jinja expressions that reference +# existing columns, including attributes of nested objects. +config_builder.add_column( + dd.ExpressionColumnConfig(name="customer_name", expr="{{ customer.first_name }} {{ customer.last_name }}") +) + +config_builder.add_column(dd.ExpressionColumnConfig(name="customer_age", expr="{{ customer.age }}")) + +config_builder.add_column( + dd.LLMStructuredColumnConfig( + name="product", + prompt=( + "Create a product in the '{{ product_category }}' category, focusing on products " + "related to '{{ product_subcategory }}'. The target age range of the ideal customer is " + "{{ target_age_range }} years old. The product should be priced between $10 and $1000." + ), + output_format=Product, + model_alias=MODEL_ALIAS, + ) +) + +# We can even use if/else logic in our Jinja expressions to create more complex prompt patterns. +config_builder.add_column( + dd.LLMStructuredColumnConfig( + name="customer_review", + prompt=( + "Your task is to write a review for the following product:\n\n" + "Product Name: {{ product.name }}\n" + "Product Description: {{ product.description }}\n" + "Price: {{ product.price }}\n\n" + "Imagine your name is {{ customer_name }} and you are from {{ customer.city }}, {{ customer.state }}. " + "Write the review in a style that is '{{ review_style }}'." + "{% if target_age_range == '18-25' %}" + "Make sure the review is more informal and conversational.\n" + "{% else %}" + "Make sure the review is more formal and structured.\n" + "{% endif %}" + "The review field should contain only the review, no other text." + ), + output_format=ProductReview, + model_alias=MODEL_ALIAS, + ) +) + +data_designer.validate(config_builder) +``` + +### 🔁 Iteration is key – preview the dataset! + +1. Use the `preview` method to generate a sample of records quickly. +2. Inspect the results for quality and format issues. +3. Adjust column configurations, prompts, or parameters as needed. +4. Re-run the preview until satisfied. + +```python +preview = data_designer.preview(config_builder, num_records=2) +``` + +```python +# Run this cell multiple times to cycle through the 2 preview records. +preview.display_sample_record() +``` + +```python +# The preview dataset is available as a pandas DataFrame. +preview.dataset +``` + +### 📊 Analyze the generated data + +- Data Designer automatically generates a basic statistical analysis of the generated data. +- This analysis is available via the `analysis` property of generation result objects. + +```python +# Print the analysis as a table. +preview.analysis.to_report() +``` + +### 🆙 Scale up! + +- Happy with your preview data? +- Use the `create` method to submit larger Data Designer generation jobs. + +```python +results = data_designer.create(config_builder, num_records=10, dataset_name="tutorial-2") +``` + +```python +# Load the generated dataset as a pandas DataFrame. +dataset = results.load_dataset() + +dataset.head() +``` + +```python +# Load the analysis results into memory. +analysis = results.load_analysis() + +analysis.to_report() +``` + +## ⏭️ Next Steps + +Check out the following tutorials to learn more about: + +- [Seeding synthetic data generation with an external dataset](/docs/tutorials/seeding-with-dataset) +- [Providing images as context](/docs/tutorials/images-as-context) diff --git a/fern/v0.5.0/pages/tutorials/the-basics-notebook.mdx b/fern/v0.5.0/pages/tutorials/the-basics-notebook.mdx new file mode 100644 index 00000000..bd0c54db --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/the-basics-notebook.mdx @@ -0,0 +1,12 @@ +--- +title: "The Basics (Notebook)" +description: Data Designer tutorial rendered via NotebookViewer component. +--- + +import { NotebookViewer } from "@/components/NotebookViewer"; +import notebookTheBasics from "@/components/notebooks/1-the-basics"; + + diff --git a/fern/v0.5.0/pages/tutorials/the-basics.mdx b/fern/v0.5.0/pages/tutorials/the-basics.mdx new file mode 100644 index 00000000..4e5ce966 --- /dev/null +++ b/fern/v0.5.0/pages/tutorials/the-basics.mdx @@ -0,0 +1,286 @@ +--- +title: "The Basics" +description: Learn the fundamentals of Data Designer by generating a simple product review dataset. +--- + +This tutorial demonstrates the basics of Data Designer by generating a simple product review dataset. + +## What you'll learn + +- How to initialize Data Designer +- How to define model configurations +- How to use sampler columns for diverse data generation +- How to use LLM columns with Jinja templating +- How to preview and iterate on your dataset +- How to create and analyze generated data + +## Import Data Designer + +`data_designer.config` provides access to the configuration API. `DataDesigner` is the main interface for data generation. + +```python +import data_designer.config as dd +from data_designer.interface import DataDesigner +``` + +## Initialize the Data Designer interface + +`DataDesigner` is the main object responsible for managing the data generation process. When initialized without arguments, the [default model providers](/docs/concepts/models/default-model-settings) are used. + +```python +data_designer = DataDesigner() +``` + +## Define model configurations + +Each `ModelConfig` defines a model that can be used during the generation process: + +- The **model alias** is used to reference the model in the Data Designer config +- The **model provider** is the external service that hosts the model (see the [model config](/docs/concepts/models/model-configs) docs for more details) +- By default, we use [build.nvidia.com](https://build.nvidia.com/models) as the model provider + +```python +MODEL_PROVIDER = "nvidia" +MODEL_ID = "nvidia/nemotron-3-nano-30b-a3b" +MODEL_ALIAS = "nemotron-nano-v3" + +model_configs = [ + dd.ModelConfig( + alias=MODEL_ALIAS, + model=MODEL_ID, + provider=MODEL_PROVIDER, + inference_parameters=dd.ChatCompletionInferenceParams( + temperature=1.0, + top_p=1.0, + max_tokens=2048, + extra_body={"chat_template_kwargs": {"enable_thinking": False}}, + ), + ) +] +``` + +## Initialize the Data Designer Config Builder + +The Data Designer config defines the dataset schema and generation process. The config builder provides an intuitive interface for building this configuration. The list of model configs is provided to the builder at initialization. + +```python +config_builder = dd.DataDesignerConfigBuilder(model_configs=model_configs) +``` + +## Getting started with sampler columns + +Sampler columns offer non-LLM based generation of synthetic data. They are particularly useful for **steering the diversity** of the generated data. + +You can view available samplers using the config builder's `info` property: + +```python +config_builder.info.display("samplers") +``` + +Let's start designing our product review dataset by adding product category and subcategory columns: + +```python +config_builder.add_column( + dd.SamplerColumnConfig( + name="product_category", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=[ + "Electronics", + "Clothing", + "Home & Kitchen", + "Books", + "Home Office", + ], + ), + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="product_subcategory", + sampler_type=dd.SamplerType.SUBCATEGORY, + params=dd.SubcategorySamplerParams( + category="product_category", + values={ + "Electronics": [ + "Smartphones", + "Laptops", + "Headphones", + "Cameras", + "Accessories", + ], + "Clothing": [ + "Men's Clothing", + "Women's Clothing", + "Winter Coats", + "Activewear", + "Accessories", + ], + "Home & Kitchen": [ + "Appliances", + "Cookware", + "Furniture", + "Decor", + "Organization", + ], + "Books": [ + "Fiction", + "Non-Fiction", + "Self-Help", + "Textbooks", + "Classics", + ], + "Home Office": [ + "Desks", + "Chairs", + "Storage", + "Office Supplies", + "Lighting", + ], + }, + ), + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="target_age_range", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams(values=["18-25", "25-35", "35-50", "50-65", "65+"]), + ) +) + +# Optionally validate that the columns are configured correctly. +data_designer.validate(config_builder) +``` + +Next, let's add samplers to generate data related to the customer and their review: + +```python +config_builder.add_column( + dd.SamplerColumnConfig( + name="customer", + sampler_type=dd.SamplerType.PERSON_FROM_FAKER, + params=dd.PersonFromFakerSamplerParams(age_range=[18, 70], locale="en_US"), + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="number_of_stars", + sampler_type=dd.SamplerType.UNIFORM, + params=dd.UniformSamplerParams(low=1, high=5), + convert_to="int", # Convert the sampled float to an integer. + ) +) + +config_builder.add_column( + dd.SamplerColumnConfig( + name="review_style", + sampler_type=dd.SamplerType.CATEGORY, + params=dd.CategorySamplerParams( + values=["rambling", "brief", "detailed", "structured with bullet points"], + weights=[1, 2, 2, 1], + ), + ) +) + +data_designer.validate(config_builder) +``` + +## LLM-generated columns + +The real power of Data Designer comes from leveraging LLMs to generate text, code, and structured data. When prompting the LLM, we can use Jinja templating to reference other columns in the dataset. Nested JSON fields can be accessed using dot notation. + +```python +config_builder.add_column( + dd.LLMTextColumnConfig( + name="product_name", + prompt=( + "You are a helpful assistant that generates product names. DO NOT add quotes around the product name.\n\n" + "Come up with a creative product name for a product in the '{{ product_category }}' category, focusing " + "on products related to '{{ product_subcategory }}'. The target age range of the ideal customer is " + "{{ target_age_range }} years old. Respond with only the product name, no other text." + ), + model_alias=MODEL_ALIAS, + ) +) + +config_builder.add_column( + dd.LLMTextColumnConfig( + name="customer_review", + prompt=( + "You are a customer named {{ customer.first_name }} from {{ customer.city }}, {{ customer.state }}. " + "You are {{ customer.age }} years old and recently purchased a product called {{ product_name }}. " + "Write a review of this product, which you gave a rating of {{ number_of_stars }} stars. " + "The style of the review should be '{{ review_style }}'. " + "Respond with only the review, no other text." + ), + model_alias=MODEL_ALIAS, + ) +) + +data_designer.validate(config_builder) +``` + +## Iteration is key – preview the dataset! + +1. Use the `preview` method to generate a sample of records quickly +2. Inspect the results for quality and format issues +3. Adjust column configurations, prompts, or parameters as needed +4. Re-run the preview until satisfied + +```python +preview = data_designer.preview(config_builder, num_records=2) +``` + +```python +# Run this cell multiple times to cycle through the 2 preview records. +preview.display_sample_record() +``` + +```python +# The preview dataset is available as a pandas DataFrame. +preview.dataset +``` + +## Analyze the generated data + +Data Designer automatically generates a basic statistical analysis of the generated data. This analysis is available via the `analysis` property of generation result objects. + +```python +# Print the analysis as a table. +preview.analysis.to_report() +``` + +## Scale up! + +Happy with your preview data? Use the `create` method to submit larger Data Designer generation jobs. + +```python +results = data_designer.create(config_builder, num_records=10, dataset_name="tutorial-1") +``` + +```python +# Load the generated dataset as a pandas DataFrame. +dataset = results.load_dataset() + +dataset.head() +``` + +```python +# Load the analysis results into memory. +analysis = results.load_analysis() + +analysis.to_report() +``` + +## Next Steps + +Now that you've seen the basics of Data Designer, check out the following tutorials to learn more: + +- [Structured Outputs](/docs/tutorials/structured-outputs) - Learn about structured outputs and Jinja expressions +- [Seeding with a Dataset](/docs/tutorials/seeding-with-dataset) - Seed synthetic data generation with an external dataset +- [Images as Context](/docs/tutorials/images-as-context) - Provide images as context to vision models +- [Generating Images](/docs/tutorials/generating-images-notebook) - Generate images with Data Designer diff --git a/fern/versions/v0.5.0.yml b/fern/versions/v0.5.0.yml new file mode 100644 index 00000000..326c1d0b --- /dev/null +++ b/fern/versions/v0.5.0.yml @@ -0,0 +1,157 @@ +tabs: + docs: + display-name: Documentation + slug: docs + api: + display-name: API Reference + slug: api + +navigation: + - tab: docs + layout: + - section: Getting Started + contents: + - page: Welcome + path: ../v0.5.0/pages/index.mdx + - page: Installation + path: ../v0.5.0/pages/installation.mdx + - page: Quick Start + path: ../v0.5.0/pages/quick-start.mdx + - page: Contributing + path: ../v0.5.0/pages/contributing.mdx + - section: Concepts + contents: + - section: Models + contents: + - page: Default Model Settings + path: ../v0.5.0/pages/concepts/models/default-model-settings.mdx + - page: Custom Model Settings + path: ../v0.5.0/pages/concepts/models/custom-model-settings.mdx + - page: Configure with CLI + path: ../v0.5.0/pages/concepts/models/configure-with-cli.mdx + - page: Model Providers + path: ../v0.5.0/pages/concepts/models/model-providers.mdx + - page: Model Configs + path: ../v0.5.0/pages/concepts/models/model-configs.mdx + - page: Inference Parameters + path: ../v0.5.0/pages/concepts/models/inference-parameters.mdx + - section: Tool Use & MCP + contents: + - page: Overview + path: ../v0.5.0/pages/concepts/tool-use-and-mcp.mdx + - page: MCP Providers + path: ../v0.5.0/pages/concepts/mcp/mcp-providers.mdx + - page: Tool Configs + path: ../v0.5.0/pages/concepts/mcp/tool-configs.mdx + - page: Enabling Tools + path: ../v0.5.0/pages/concepts/mcp/enabling-tools.mdx + - page: Configure via CLI + path: ../v0.5.0/pages/concepts/mcp/configure-mcp-cli.mdx + - page: Safety and Limits + path: ../v0.5.0/pages/concepts/mcp/safety-and-limits.mdx + - page: Columns + path: ../v0.5.0/pages/concepts/columns.mdx + - page: Traces + path: ../v0.5.0/pages/concepts/traces.mdx + - page: Validators + path: ../v0.5.0/pages/concepts/validators.mdx + - page: Processors + path: ../v0.5.0/pages/concepts/processors.mdx + - page: Person Sampling + path: ../v0.5.0/pages/concepts/person-sampling.mdx + - page: Architecture & Performance + path: ../v0.5.0/pages/concepts/architecture-and-performance.mdx + - page: Seed Datasets + path: ../v0.5.0/pages/concepts/seed-datasets.mdx + - page: Custom Columns + path: ../v0.5.0/pages/concepts/custom-columns.mdx + - page: Deployment Options + path: ../v0.5.0/pages/concepts/deployment-options.mdx + - section: Tutorials + contents: + - page: Overview + path: ../v0.5.0/pages/tutorials/overview.mdx + - page: The Basics + path: ../v0.5.0/pages/tutorials/the-basics.mdx + - page: The Basics (Notebook) + path: ../v0.5.0/pages/tutorials/the-basics-notebook.mdx + - page: Structured Outputs + path: ../v0.5.0/pages/tutorials/structured-outputs.mdx + - page: Structured Outputs (Notebook) + path: ../v0.5.0/pages/tutorials/structured-outputs-notebook.mdx + - page: Seeding with a Dataset + path: ../v0.5.0/pages/tutorials/seeding-with-dataset.mdx + - page: Seeding with a Dataset (Notebook) + path: ../v0.5.0/pages/tutorials/seeding-with-dataset-notebook.mdx + - page: Images as Context + path: ../v0.5.0/pages/tutorials/images-as-context.mdx + - page: Images as Context (Notebook) + path: ../v0.5.0/pages/tutorials/images-as-context-notebook.mdx + - page: Generating Images (Notebook) + path: ../v0.5.0/pages/tutorials/generating-images-notebook.mdx + - page: Image-to-Image Editing (Notebook) + path: ../v0.5.0/pages/tutorials/editing-images-notebook.mdx + - section: Recipes + contents: + - page: Recipe Cards + path: ../v0.5.0/pages/recipes/index.mdx + - section: Code Generation + contents: + - page: Text to Python + path: ../v0.5.0/pages/recipes/code-generation/text-to-python.mdx + - page: Text to SQL + path: ../v0.5.0/pages/recipes/code-generation/text-to-sql.mdx + - section: QA and Chat + contents: + - page: Product Info QA + path: ../v0.5.0/pages/recipes/qa-and-chat/product-info-qa.mdx + - page: Multi-Turn Chat + path: ../v0.5.0/pages/recipes/qa-and-chat/multi-turn-chat.mdx + - section: MCP & Tool Use + contents: + - page: Basic MCP + path: ../v0.5.0/pages/recipes/mcp-and-tooluse/basic-mcp.mdx + - page: PDF Document QA + path: ../v0.5.0/pages/recipes/mcp-and-tooluse/pdf-qa.mdx + - section: Dev Notes + contents: + - page: Overview + path: ../v0.5.0/pages/devnotes/index.mdx + - page: Design Principles + path: ../v0.5.0/pages/devnotes/design-principles.mdx + - page: Graduate-Level Science Reasoning (RQA) + path: ../v0.5.0/pages/devnotes/rqa.mdx + - page: Deep Research Trajectories + path: ../v0.5.0/pages/devnotes/deep-research-trajectories.mdx + - section: Plugins + contents: + - page: Overview + path: ../v0.5.0/pages/plugins/overview.mdx + - page: Example Plugin + path: ../v0.5.0/pages/plugins/example.mdx + - page: Available Plugins + path: ../v0.5.0/pages/plugins/available.mdx + - tab: api + layout: + - section: API Reference + contents: + - page: Models + path: ../v0.5.0/pages/api-reference/models.mdx + - page: Column Configs + path: ../v0.5.0/pages/api-reference/column-configs.mdx + - page: Config Builder + path: ../v0.5.0/pages/api-reference/config-builder.mdx + - page: Data Designer Config + path: ../v0.5.0/pages/api-reference/data-designer-config.mdx + - page: Sampler Params + path: ../v0.5.0/pages/api-reference/sampler-params.mdx + - page: Validator Params + path: ../v0.5.0/pages/api-reference/validator-params.mdx + - page: Processors + path: ../v0.5.0/pages/api-reference/processors.mdx + - page: Analysis + path: ../v0.5.0/pages/api-reference/analysis.mdx + - page: MCP + path: ../v0.5.0/pages/api-reference/mcp.mdx + - page: Run Config + path: ../v0.5.0/pages/api-reference/run-config.mdx From 0b564616ca88a6ba0bdeb8427a589b9cce45bf23 Mon Sep 17 00:00:00 2001 From: Lawrence Lane Date: Fri, 13 Feb 2026 12:31:43 -0500 Subject: [PATCH 7/8] updates Signed-off-by: Lawrence Lane --- Makefile | 19 +- docs/colab_notebooks/1-the-basics.ipynb | 62 +- ...ctured-outputs-and-jinja-expressions.ipynb | 58 +- .../3-seeding-with-a-dataset.ipynb | 54 +- .../4-providing-images-as-context.ipynb | 66 +- .../colab_notebooks/5-generating-images.ipynb | 42 +- .../6-editing-images-with-image-context.ipynb | 54 +- docs/scripts/generate_colab_notebooks.py | 45 +- fern/components/NotebookViewer.tsx | 318 +++++++-- .../devnotes/openresearcher-demo-code.ts | 98 +++ .../devnotes/prepare_corpus-code.ts | 246 +++++++ .../components/devnotes/retriever_mcp-code.ts | 220 ++++++ fern/components/notebooks/1-the-basics.json | 51 +- fern/components/notebooks/1-the-basics.ts | 51 +- ...uctured-outputs-and-jinja-expressions.json | 48 +- ...tructured-outputs-and-jinja-expressions.ts | 48 +- .../notebooks/3-seeding-with-a-dataset.json | 45 +- .../notebooks/3-seeding-with-a-dataset.ts | 45 +- .../4-providing-images-as-context.json | 60 +- .../4-providing-images-as-context.ts | 60 +- .../notebooks/5-generating-images.json | 36 +- .../notebooks/5-generating-images.ts | 36 +- .../6-editing-images-with-image-context.json | 48 +- .../6-editing-images-with-image-context.ts | 48 +- fern/scripts/extract-code.py | 34 + fern/scripts/ipynb-to-fern-json.py | 84 ++- fern/styles/notebook-viewer.css | 138 +++- .../devnotes/deep-research-trajectories.mdx | 630 +----------------- .../pages/devnotes/design-principles.mdx | 28 +- fern/v0.5.0/pages/devnotes/rqa.mdx | 19 +- fern/v0.5.0/pages/tutorials/overview.mdx | 2 - 31 files changed, 1689 insertions(+), 1104 deletions(-) create mode 100644 fern/components/devnotes/openresearcher-demo-code.ts create mode 100644 fern/components/devnotes/prepare_corpus-code.ts create mode 100644 fern/components/devnotes/retriever_mcp-code.ts create mode 100644 fern/scripts/extract-code.py diff --git a/Makefile b/Makefile index 035ca1ef..09cf4c69 100644 --- a/Makefile +++ b/Makefile @@ -76,7 +76,9 @@ help: @echo " verify-imports - Verify all package imports work" @echo " show-versions - Show versions of all packages" @echo " convert-execute-notebooks - Convert notebooks from .py to .ipynb using jupytext" - @echo " generate-colab-notebooks - Generate Colab-compatible notebooks" + @echo " generate-colab-notebooks - Generate Colab-compatible notebooks" + @echo " generate-fern-notebooks - Convert notebooks to Fern format for docs" + @echo " generate-fern-notebooks-with-outputs - Execute notebooks first, then convert (requires API key)" @echo " serve-docs-locally - Serve documentation locally" @echo " check-license-headers - Check if all files have license headers" @echo " update-license-headers - Add license headers to all files" @@ -470,20 +472,29 @@ convert-execute-notebooks: generate-colab-notebooks: @echo "📓 Generating Colab-compatible notebooks..." - uv run --group docs python docs/scripts/generate_colab_notebooks.py + @if [ -d docs/notebooks ] && [ -n "$$(ls docs/notebooks/*.ipynb 2>/dev/null)" ]; then \ + echo " Using executed notebooks from docs/notebooks (outputs preserved)"; \ + uv run --group docs python docs/scripts/generate_colab_notebooks.py --executed-dir docs/notebooks; \ + else \ + echo " Using source only (run 'make convert-execute-notebooks' first for outputs)"; \ + uv run --group docs python docs/scripts/generate_colab_notebooks.py; \ + fi @echo "✅ Colab notebooks created in docs/colab_notebooks/" -generate-fern-notebooks: +generate-fern-notebooks: generate-colab-notebooks @echo "📓 Converting notebooks to Fern format for NotebookViewer..." @mkdir -p fern/components/notebooks @for f in docs/colab_notebooks/*.ipynb; do \ if [ -f "$$f" ]; then \ name=$$(basename "$$f" .ipynb); \ - python fern/scripts/ipynb-to-fern-json.py "$$f" -o fern/components/notebooks/$$name.json; \ + uv run python fern/scripts/ipynb-to-fern-json.py "$$f" -o fern/components/notebooks/$$name.json; \ fi; \ done @echo "✅ Fern notebooks created in fern/components/notebooks/" +generate-fern-notebooks-with-outputs: convert-execute-notebooks + $(MAKE) generate-fern-notebooks + # ============================================================================== # PERFORMANCE # ============================================================================== diff --git a/docs/colab_notebooks/1-the-basics.ipynb b/docs/colab_notebooks/1-the-basics.ipynb index ef6af443..66c19438 100644 --- a/docs/colab_notebooks/1-the-basics.ipynb +++ b/docs/colab_notebooks/1-the-basics.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "2a24b4d3", + "id": "34304cc0", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: The Basics\n", @@ -14,7 +14,7 @@ }, { "cell_type": "markdown", - "id": "3e76b164", + "id": "d407c70a", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -26,7 +26,7 @@ }, { "cell_type": "markdown", - "id": "8178f225", + "id": "10a53966", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -37,7 +37,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5bf5f5b3", + "id": "03518092", "metadata": {}, "outputs": [], "source": [ @@ -48,7 +48,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dd0db080", + "id": "9eb01405", "metadata": {}, "outputs": [], "source": [ @@ -66,7 +66,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13a21b35", + "id": "0620b86f", "metadata": {}, "outputs": [], "source": [ @@ -76,7 +76,7 @@ }, { "cell_type": "markdown", - "id": "d5f0a5a7", + "id": "c56a11f5", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -89,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c87c7075", + "id": "4c75c742", "metadata": {}, "outputs": [], "source": [ @@ -98,7 +98,7 @@ }, { "cell_type": "markdown", - "id": "b3e7e6a6", + "id": "ed587c27", "metadata": {}, "source": [ "### 🎛️ Define model configurations\n", @@ -115,7 +115,7 @@ { "cell_type": "code", "execution_count": null, - "id": "218ecb24", + "id": "d276fc65", "metadata": {}, "outputs": [], "source": [ @@ -145,7 +145,7 @@ }, { "cell_type": "markdown", - "id": "180f86ce", + "id": "ad7ee6a4", "metadata": {}, "source": [ "### 🏗️ Initialize the Data Designer Config Builder\n", @@ -160,7 +160,7 @@ { "cell_type": "code", "execution_count": null, - "id": "72fbb51a", + "id": "4f11a79c", "metadata": {}, "outputs": [], "source": [ @@ -169,7 +169,7 @@ }, { "cell_type": "markdown", - "id": "61c70393", + "id": "37fec12e", "metadata": {}, "source": [ "## 🎲 Getting started with sampler columns\n", @@ -186,7 +186,7 @@ { "cell_type": "code", "execution_count": null, - "id": "037dced5", + "id": "5503a564", "metadata": {}, "outputs": [], "source": [ @@ -195,7 +195,7 @@ }, { "cell_type": "markdown", - "id": "7fec32fe", + "id": "fb1b7bd8", "metadata": {}, "source": [ "Let's start designing our product review dataset by adding product category and subcategory columns.\n" @@ -204,7 +204,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ce41fe08", + "id": "fa0a52fe", "metadata": {}, "outputs": [], "source": [ @@ -285,7 +285,7 @@ }, { "cell_type": "markdown", - "id": "1b94851d", + "id": "33d10aad", "metadata": {}, "source": [ "Next, let's add samplers to generate data related to the customer and their review.\n" @@ -294,7 +294,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bcaba433", + "id": "08b43716", "metadata": {}, "outputs": [], "source": [ @@ -331,7 +331,7 @@ }, { "cell_type": "markdown", - "id": "fd91aaf2", + "id": "647e6aac", "metadata": {}, "source": [ "## 🦜 LLM-generated columns\n", @@ -346,7 +346,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7a5f3221", + "id": "c9062ca9", "metadata": {}, "outputs": [], "source": [ @@ -382,7 +382,7 @@ }, { "cell_type": "markdown", - "id": "de0e26f8", + "id": "e2a1a3ba", "metadata": {}, "source": [ "### 🔁 Iteration is key – preview the dataset!\n", @@ -399,7 +399,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1d5a0701", + "id": "4f47448c", "metadata": {}, "outputs": [], "source": [ @@ -409,7 +409,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28267bd7", + "id": "5562058b", "metadata": {}, "outputs": [], "source": [ @@ -420,7 +420,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f9412b20", + "id": "60c93917", "metadata": {}, "outputs": [], "source": [ @@ -430,7 +430,7 @@ }, { "cell_type": "markdown", - "id": "7cfeff69", + "id": "93ed2108", "metadata": {}, "source": [ "### 📊 Analyze the generated data\n", @@ -443,7 +443,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ce9f959b", + "id": "4e385e37", "metadata": {}, "outputs": [], "source": [ @@ -453,7 +453,7 @@ }, { "cell_type": "markdown", - "id": "9aaec768", + "id": "e246e323", "metadata": {}, "source": [ "### 🆙 Scale up!\n", @@ -466,7 +466,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ce89c502", + "id": "40f65835", "metadata": {}, "outputs": [], "source": [ @@ -476,7 +476,7 @@ { "cell_type": "code", "execution_count": null, - "id": "86b4d4b8", + "id": "ee916774", "metadata": {}, "outputs": [], "source": [ @@ -489,7 +489,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52f859b6", + "id": "009f7e61", "metadata": {}, "outputs": [], "source": [ @@ -501,7 +501,7 @@ }, { "cell_type": "markdown", - "id": "089f4cc0", + "id": "9bb500b1", "metadata": {}, "source": [ "## ⏭️ Next Steps\n", diff --git a/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb b/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb index ee852c7a..bd1d498a 100644 --- a/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb +++ b/docs/colab_notebooks/2-structured-outputs-and-jinja-expressions.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "9edb12b1", + "id": "5b57e503", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Structured Outputs and Jinja Expressions\n", @@ -16,7 +16,7 @@ }, { "cell_type": "markdown", - "id": "44ae499b", + "id": "d9ae6b33", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -28,7 +28,7 @@ }, { "cell_type": "markdown", - "id": "57750043", + "id": "991aea10", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -39,7 +39,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f69d18e2", + "id": "1674de34", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fe4eace1", + "id": "fb9fc94f", "metadata": {}, "outputs": [], "source": [ @@ -68,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "299ade8e", + "id": "5c3a6750", "metadata": {}, "outputs": [], "source": [ @@ -78,7 +78,7 @@ }, { "cell_type": "markdown", - "id": "8fddaab5", + "id": "505190dd", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -91,7 +91,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0186d4a9", + "id": "f2a2ce1e", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "c58fa496", + "id": "a27febdd", "metadata": {}, "source": [ "### 🎛️ Define model configurations\n", @@ -117,7 +117,7 @@ { "cell_type": "code", "execution_count": null, - "id": "70f4ace2", + "id": "73cb0bf0", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +147,7 @@ }, { "cell_type": "markdown", - "id": "36530024", + "id": "983889d2", "metadata": {}, "source": [ "### 🏗️ Initialize the Data Designer Config Builder\n", @@ -162,7 +162,7 @@ { "cell_type": "code", "execution_count": null, - "id": "905f2f4b", + "id": "00e44ceb", "metadata": {}, "outputs": [], "source": [ @@ -171,7 +171,7 @@ }, { "cell_type": "markdown", - "id": "7cf2e515", + "id": "63dfc1e6", "metadata": {}, "source": [ "### 🧑‍🎨 Designing our data\n", @@ -198,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6dff3604", + "id": "c2d1a831", "metadata": {}, "outputs": [], "source": [ @@ -226,7 +226,7 @@ }, { "cell_type": "markdown", - "id": "4234fd35", + "id": "15155d07", "metadata": {}, "source": [ "Next, let's design our product review dataset using a few more tricks compared to the previous notebook.\n" @@ -235,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "88ba4646", + "id": "dc2d88b9", "metadata": {}, "outputs": [], "source": [ @@ -344,7 +344,7 @@ }, { "cell_type": "markdown", - "id": "c4e716b6", + "id": "4ecbd214", "metadata": {}, "source": [ "Next, we will use more advanced Jinja expressions to create new columns.\n", @@ -361,7 +361,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e5d763ef", + "id": "4d7a8993", "metadata": {}, "outputs": [], "source": [ @@ -414,7 +414,7 @@ }, { "cell_type": "markdown", - "id": "6d009906", + "id": "f38b1b81", "metadata": {}, "source": [ "### 🔁 Iteration is key – preview the dataset!\n", @@ -431,7 +431,7 @@ { "cell_type": "code", "execution_count": null, - "id": "93d03e0c", + "id": "b89146e9", "metadata": {}, "outputs": [], "source": [ @@ -441,7 +441,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5db37270", + "id": "645d6c9f", "metadata": {}, "outputs": [], "source": [ @@ -452,7 +452,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b00a32a1", + "id": "e3b65a4d", "metadata": {}, "outputs": [], "source": [ @@ -462,7 +462,7 @@ }, { "cell_type": "markdown", - "id": "fea40d20", + "id": "586eaf8f", "metadata": {}, "source": [ "### 📊 Analyze the generated data\n", @@ -475,7 +475,7 @@ { "cell_type": "code", "execution_count": null, - "id": "abde79b4", + "id": "b27be3ef", "metadata": {}, "outputs": [], "source": [ @@ -485,7 +485,7 @@ }, { "cell_type": "markdown", - "id": "034fa4e0", + "id": "a91c5a4d", "metadata": {}, "source": [ "### 🆙 Scale up!\n", @@ -498,7 +498,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4d0dee23", + "id": "5aa63151", "metadata": {}, "outputs": [], "source": [ @@ -508,7 +508,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b9e7aa79", + "id": "769fd142", "metadata": {}, "outputs": [], "source": [ @@ -521,7 +521,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2ae9984d", + "id": "837585fd", "metadata": {}, "outputs": [], "source": [ @@ -533,7 +533,7 @@ }, { "cell_type": "markdown", - "id": "69cdeafb", + "id": "2a34c7fa", "metadata": {}, "source": [ "## ⏭️ Next Steps\n", diff --git a/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb b/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb index d4178400..c1853c2a 100644 --- a/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb +++ b/docs/colab_notebooks/3-seeding-with-a-dataset.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "5f8a3321", + "id": "1d1e0d32", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Seeding Synthetic Data Generation with an External Dataset\n", @@ -16,7 +16,7 @@ }, { "cell_type": "markdown", - "id": "ebfd9603", + "id": "59a2287f", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -28,7 +28,7 @@ }, { "cell_type": "markdown", - "id": "88bb567e", + "id": "37598099", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -39,7 +39,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38896128", + "id": "e21fdec6", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0e1906e0", + "id": "6f0f7a06", "metadata": {}, "outputs": [], "source": [ @@ -68,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3eeb8d05", + "id": "074125e6", "metadata": {}, "outputs": [], "source": [ @@ -78,7 +78,7 @@ }, { "cell_type": "markdown", - "id": "b3aeabba", + "id": "6a0fa5cd", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -91,7 +91,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2dbc44e4", + "id": "025133a6", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "14306419", + "id": "a5acc512", "metadata": {}, "source": [ "### 🎛️ Define model configurations\n", @@ -117,7 +117,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e177a323", + "id": "c045292e", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +147,7 @@ }, { "cell_type": "markdown", - "id": "09feb19e", + "id": "9ecf5d66", "metadata": {}, "source": [ "### 🏗️ Initialize the Data Designer Config Builder\n", @@ -162,7 +162,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dc0a824d", + "id": "c83f40e8", "metadata": {}, "outputs": [], "source": [ @@ -171,7 +171,7 @@ }, { "cell_type": "markdown", - "id": "4faa9a68", + "id": "736239fb", "metadata": {}, "source": [ "## 🏥 Prepare a seed dataset\n", @@ -196,7 +196,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b5deb848", + "id": "37d3c904", "metadata": {}, "outputs": [], "source": [ @@ -214,7 +214,7 @@ }, { "cell_type": "markdown", - "id": "2e72b89b", + "id": "775fb307", "metadata": {}, "source": [ "## 🎨 Designing our synthetic patient notes dataset\n", @@ -227,7 +227,7 @@ { "cell_type": "code", "execution_count": null, - "id": "01cb7b88", + "id": "ac5dd110", "metadata": {}, "outputs": [], "source": [ @@ -308,7 +308,7 @@ }, { "cell_type": "markdown", - "id": "11989122", + "id": "34e4310f", "metadata": {}, "source": [ "### 🔁 Iteration is key – preview the dataset!\n", @@ -325,7 +325,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5ba842f3", + "id": "227e584e", "metadata": {}, "outputs": [], "source": [ @@ -335,7 +335,7 @@ { "cell_type": "code", "execution_count": null, - "id": "166b02dd", + "id": "cddf1ec9", "metadata": {}, "outputs": [], "source": [ @@ -346,7 +346,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f9a74a7c", + "id": "9a12e8b5", "metadata": {}, "outputs": [], "source": [ @@ -356,7 +356,7 @@ }, { "cell_type": "markdown", - "id": "69628101", + "id": "97aa4334", "metadata": {}, "source": [ "### 📊 Analyze the generated data\n", @@ -369,7 +369,7 @@ { "cell_type": "code", "execution_count": null, - "id": "abba1989", + "id": "11a1bb87", "metadata": {}, "outputs": [], "source": [ @@ -379,7 +379,7 @@ }, { "cell_type": "markdown", - "id": "bb462c63", + "id": "857ca634", "metadata": {}, "source": [ "### 🆙 Scale up!\n", @@ -392,7 +392,7 @@ { "cell_type": "code", "execution_count": null, - "id": "966bf86e", + "id": "42fc4f63", "metadata": {}, "outputs": [], "source": [ @@ -402,7 +402,7 @@ { "cell_type": "code", "execution_count": null, - "id": "395c0e5b", + "id": "a329c7e9", "metadata": {}, "outputs": [], "source": [ @@ -415,7 +415,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0e254d59", + "id": "c9fe3e2f", "metadata": {}, "outputs": [], "source": [ @@ -427,7 +427,7 @@ }, { "cell_type": "markdown", - "id": "05a97070", + "id": "9ea737dd", "metadata": {}, "source": [ "## ⏭️ Next Steps\n", diff --git a/docs/colab_notebooks/4-providing-images-as-context.ipynb b/docs/colab_notebooks/4-providing-images-as-context.ipynb index 4771e543..1de070dd 100644 --- a/docs/colab_notebooks/4-providing-images-as-context.ipynb +++ b/docs/colab_notebooks/4-providing-images-as-context.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "badc7c3c", + "id": "92c252fe", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Providing Images as Context for Vision-Based Data Generation" @@ -10,7 +10,7 @@ }, { "cell_type": "markdown", - "id": "95f37174", + "id": "6dddba82", "metadata": {}, "source": [ "#### 📚 What you'll learn\n", @@ -25,7 +25,7 @@ }, { "cell_type": "markdown", - "id": "4e5edd38", + "id": "c676c7f6", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -37,7 +37,7 @@ }, { "cell_type": "markdown", - "id": "a10cc70d", + "id": "ea9c162d", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -48,7 +48,7 @@ { "cell_type": "code", "execution_count": null, - "id": "06094049", + "id": "9003e48c", "metadata": {}, "outputs": [], "source": [ @@ -59,7 +59,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3de8fe04", + "id": "b84124e6", "metadata": {}, "outputs": [], "source": [ @@ -77,7 +77,7 @@ { "cell_type": "code", "execution_count": null, - "id": "947c8ca8", + "id": "623b014f", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "8417ed40", + "id": "4fff6c6d", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -113,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3e4783c2", + "id": "fab8e68a", "metadata": {}, "outputs": [], "source": [ @@ -122,7 +122,7 @@ }, { "cell_type": "markdown", - "id": "9cca8959", + "id": "b95f0f43", "metadata": {}, "source": [ "### 🎛️ Define model configurations\n", @@ -139,7 +139,7 @@ { "cell_type": "code", "execution_count": null, - "id": "546ced2c", + "id": "c942693b", "metadata": {}, "outputs": [], "source": [ @@ -162,7 +162,7 @@ }, { "cell_type": "markdown", - "id": "e02918e5", + "id": "295a9be1", "metadata": {}, "source": [ "### 🏗️ Initialize the Data Designer Config Builder\n", @@ -177,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "684816d4", + "id": "d3895093", "metadata": {}, "outputs": [], "source": [ @@ -186,7 +186,7 @@ }, { "cell_type": "markdown", - "id": "b5dd8bb7", + "id": "18c1c8c0", "metadata": {}, "source": [ "### 🌱 Seed Dataset Creation\n", @@ -203,7 +203,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af5cbcf2", + "id": "4ff899e2", "metadata": {}, "outputs": [], "source": [ @@ -218,7 +218,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0a1ac12c", + "id": "53c29682", "metadata": {}, "outputs": [], "source": [ @@ -266,7 +266,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4ddd2e32", + "id": "6d221329", "metadata": {}, "outputs": [], "source": [ @@ -284,7 +284,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2171f019", + "id": "4e965420", "metadata": {}, "outputs": [], "source": [ @@ -294,7 +294,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5d3fcba6", + "id": "b3f30b9e", "metadata": {}, "outputs": [], "source": [ @@ -306,7 +306,7 @@ { "cell_type": "code", "execution_count": null, - "id": "81e1217b", + "id": "3d2d08e3", "metadata": {}, "outputs": [], "source": [ @@ -335,7 +335,7 @@ }, { "cell_type": "markdown", - "id": "684a5a40", + "id": "d420e3c5", "metadata": {}, "source": [ "### 🔁 Iteration is key – preview the dataset!\n", @@ -352,7 +352,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a30e634e", + "id": "653d0fed", "metadata": {}, "outputs": [], "source": [ @@ -362,7 +362,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9e6effa8", + "id": "8d0f2be7", "metadata": {}, "outputs": [], "source": [ @@ -373,7 +373,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4c8a78aa", + "id": "f3a438ec", "metadata": {}, "outputs": [], "source": [ @@ -383,7 +383,7 @@ }, { "cell_type": "markdown", - "id": "5186f4c5", + "id": "7842c013", "metadata": {}, "source": [ "### 📊 Analyze the generated data\n", @@ -396,7 +396,7 @@ { "cell_type": "code", "execution_count": null, - "id": "efa93f44", + "id": "a5544e9d", "metadata": {}, "outputs": [], "source": [ @@ -406,7 +406,7 @@ }, { "cell_type": "markdown", - "id": "fbcfd3d8", + "id": "a2f94c56", "metadata": {}, "source": [ "### 🔎 Visual Inspection\n", @@ -417,7 +417,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2538a89b", + "id": "c868dec7", "metadata": { "lines_to_next_cell": 2 }, @@ -441,7 +441,7 @@ }, { "cell_type": "markdown", - "id": "a7de9c71", + "id": "599cc8ad", "metadata": {}, "source": [ "### 🆙 Scale up!\n", @@ -454,7 +454,7 @@ { "cell_type": "code", "execution_count": null, - "id": "850d78d8", + "id": "6a155306", "metadata": {}, "outputs": [], "source": [ @@ -464,7 +464,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3e953262", + "id": "44a29388", "metadata": {}, "outputs": [], "source": [ @@ -477,7 +477,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0a5c00e6", + "id": "c31562b7", "metadata": {}, "outputs": [], "source": [ @@ -489,7 +489,7 @@ }, { "cell_type": "markdown", - "id": "a6654664", + "id": "3d6e086d", "metadata": {}, "source": [ "## ⏭️ Next Steps\n", diff --git a/docs/colab_notebooks/5-generating-images.ipynb b/docs/colab_notebooks/5-generating-images.ipynb index 345f8655..a130f017 100644 --- a/docs/colab_notebooks/5-generating-images.ipynb +++ b/docs/colab_notebooks/5-generating-images.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "dcc80c16", + "id": "949689cb", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Generating Images\n", @@ -24,7 +24,7 @@ }, { "cell_type": "markdown", - "id": "0fb5f66a", + "id": "4fd2547b", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -35,7 +35,7 @@ }, { "cell_type": "markdown", - "id": "062e3558", + "id": "f7d4c635", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -46,7 +46,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f739c774", + "id": "0d8093be", "metadata": {}, "outputs": [], "source": [ @@ -57,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6fe9738b", + "id": "0d7b2670", "metadata": {}, "outputs": [], "source": [ @@ -75,7 +75,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21635563", + "id": "3a7649ef", "metadata": {}, "outputs": [], "source": [ @@ -88,7 +88,7 @@ }, { "cell_type": "markdown", - "id": "19eb9d17", + "id": "44101a2b", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -99,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1c0715ea", + "id": "1368d828", "metadata": {}, "outputs": [], "source": [ @@ -108,7 +108,7 @@ }, { "cell_type": "markdown", - "id": "f5846789", + "id": "eb48a53b", "metadata": {}, "source": [ "### 🎛️ Define an image-generation model\n", @@ -120,7 +120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35c4fc72", + "id": "74d3635f", "metadata": {}, "outputs": [], "source": [ @@ -142,7 +142,7 @@ }, { "cell_type": "markdown", - "id": "18655cb0", + "id": "9e6f63b5", "metadata": {}, "source": [ "### 🏗️ Build the config: samplers + image column\n", @@ -153,7 +153,7 @@ { "cell_type": "code", "execution_count": null, - "id": "202e5463", + "id": "0b3f2138", "metadata": {}, "outputs": [], "source": [ @@ -326,7 +326,7 @@ }, { "cell_type": "markdown", - "id": "7d3bb84e", + "id": "0a83b06e", "metadata": {}, "source": [ "### 🔁 Preview: images as base64\n", @@ -337,7 +337,7 @@ { "cell_type": "code", "execution_count": null, - "id": "98a82605", + "id": "36c1b7a4", "metadata": {}, "outputs": [], "source": [ @@ -347,7 +347,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b16858e0", + "id": "9a61fbba", "metadata": {}, "outputs": [], "source": [ @@ -358,7 +358,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52d0cf2c", + "id": "7ada9a40", "metadata": {}, "outputs": [], "source": [ @@ -367,7 +367,7 @@ }, { "cell_type": "markdown", - "id": "7a2411ef", + "id": "08e9ac10", "metadata": {}, "source": [ "### 🆙 Create: images saved to disk\n", @@ -378,7 +378,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dfa4fcd0", + "id": "3530dd98", "metadata": {}, "outputs": [], "source": [ @@ -388,7 +388,7 @@ { "cell_type": "code", "execution_count": null, - "id": "691db6a6", + "id": "a69079d9", "metadata": {}, "outputs": [], "source": [ @@ -399,7 +399,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5e3dbb92", + "id": "362943d4", "metadata": {}, "outputs": [], "source": [ @@ -415,7 +415,7 @@ }, { "cell_type": "markdown", - "id": "46678986", + "id": "89ba40d4", "metadata": {}, "source": [ "## ⏭️ Next steps\n", diff --git a/docs/colab_notebooks/6-editing-images-with-image-context.ipynb b/docs/colab_notebooks/6-editing-images-with-image-context.ipynb index 9c22eb50..51dc6e47 100644 --- a/docs/colab_notebooks/6-editing-images-with-image-context.ipynb +++ b/docs/colab_notebooks/6-editing-images-with-image-context.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "20d2b9af", + "id": "e67d9bd6", "metadata": {}, "source": [ "# 🎨 Data Designer Tutorial: Image-to-Image Editing\n", @@ -25,7 +25,7 @@ }, { "cell_type": "markdown", - "id": "875e0ce0", + "id": "c8161027", "metadata": {}, "source": [ "### 📦 Import Data Designer\n", @@ -36,7 +36,7 @@ }, { "cell_type": "markdown", - "id": "2e02e601", + "id": "f43f67b0", "metadata": {}, "source": [ "### ⚡ Colab Setup\n", @@ -47,7 +47,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3b111078", + "id": "ea1f7730", "metadata": {}, "outputs": [], "source": [ @@ -58,7 +58,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0e5f317a", + "id": "5ea2b3bf", "metadata": {}, "outputs": [], "source": [ @@ -76,7 +76,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7ebd55d2", + "id": "2d804455", "metadata": {}, "outputs": [], "source": [ @@ -95,7 +95,7 @@ }, { "cell_type": "markdown", - "id": "0a63a3cd", + "id": "4986af56", "metadata": {}, "source": [ "### ⚙️ Initialize the Data Designer interface\n", @@ -106,7 +106,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ad455680", + "id": "fc1f8501", "metadata": {}, "outputs": [], "source": [ @@ -115,7 +115,7 @@ }, { "cell_type": "markdown", - "id": "3ce04024", + "id": "7efaf70f", "metadata": {}, "source": [ "### 🎛️ Define an image-editing model\n", @@ -131,7 +131,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5f5235b0", + "id": "a37fb71a", "metadata": {}, "outputs": [], "source": [ @@ -153,7 +153,7 @@ }, { "cell_type": "markdown", - "id": "10989e53", + "id": "a06fca7d", "metadata": {}, "source": [ "### 🌱 Load animal portraits from HuggingFace\n", @@ -166,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bd526010", + "id": "685c5384", "metadata": {}, "outputs": [], "source": [ @@ -199,7 +199,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6fad55cf", + "id": "d4f17648", "metadata": {}, "outputs": [], "source": [ @@ -216,7 +216,7 @@ }, { "cell_type": "markdown", - "id": "e2c2f374", + "id": "12993667", "metadata": {}, "source": [ "### 🏗️ Build the configuration\n", @@ -233,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "82f39386", + "id": "1d9acb93", "metadata": {}, "outputs": [], "source": [ @@ -321,7 +321,7 @@ }, { "cell_type": "markdown", - "id": "80f226b8", + "id": "1ee3daba", "metadata": {}, "source": [ "### 🔁 Preview: quick iteration\n", @@ -332,7 +332,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ab29a2af", + "id": "4e8d12a9", "metadata": {}, "outputs": [], "source": [ @@ -342,7 +342,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7b1ab5bb", + "id": "c83a0098", "metadata": {}, "outputs": [], "source": [ @@ -353,7 +353,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3d299b09", + "id": "8f62881c", "metadata": {}, "outputs": [], "source": [ @@ -362,7 +362,7 @@ }, { "cell_type": "markdown", - "id": "05351d89", + "id": "e690f53e", "metadata": { "lines_to_next_cell": 2 }, @@ -375,7 +375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33886f95", + "id": "f1e7371a", "metadata": {}, "outputs": [], "source": [ @@ -411,7 +411,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9ee971c8", + "id": "00901a02", "metadata": {}, "outputs": [], "source": [ @@ -421,7 +421,7 @@ }, { "cell_type": "markdown", - "id": "b6fd2b7a", + "id": "f832d3dd", "metadata": {}, "source": [ "### 🆙 Create at scale\n", @@ -432,7 +432,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44046581", + "id": "8023a215", "metadata": {}, "outputs": [], "source": [ @@ -442,7 +442,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23cf23c4", + "id": "d6c2e1b0", "metadata": {}, "outputs": [], "source": [ @@ -453,7 +453,7 @@ { "cell_type": "code", "execution_count": null, - "id": "934fd9f9", + "id": "34787071", "metadata": {}, "outputs": [], "source": [ @@ -463,7 +463,7 @@ }, { "cell_type": "markdown", - "id": "c5045115", + "id": "bb907145", "metadata": {}, "source": [ "## ⏭️ Next steps\n", diff --git a/docs/scripts/generate_colab_notebooks.py b/docs/scripts/generate_colab_notebooks.py index b490ccb9..c0a8819b 100644 --- a/docs/scripts/generate_colab_notebooks.py +++ b/docs/scripts/generate_colab_notebooks.py @@ -6,6 +6,9 @@ 1. Injects Colab-specific setup cells (pip install, API key from secrets) 2. Injects cells before the "Import the essentials" section 3. Saves the result as .ipynb files in docs/colab_notebooks + +When --executed-dir is provided and an executed .ipynb exists there, uses it as the +base (preserving cell outputs for display in Fern docs). Otherwise reads from source .py. """ from __future__ import annotations @@ -14,6 +17,7 @@ from pathlib import Path import jupytext +import nbformat from nbformat import NotebookNode from nbformat.v4 import new_code_cell, new_markdown_cell @@ -99,30 +103,41 @@ def process_notebook(notebook: NotebookNode, source_path: Path) -> NotebookNode: return notebook -def generate_colab_notebook(source_path: Path, output_dir: Path) -> Path: +def generate_colab_notebook( + source_path: Path, output_dir: Path, executed_dir: Path | None = None +) -> Path: """Generate a Colab-compatible notebook from a source file. Args: source_path: Path to the jupytext percent-format Python source file output_dir: Directory to save the output notebook + executed_dir: Optional directory with executed .ipynb files (preserves outputs) Returns: Path to the generated notebook """ - # Read the source file using jupytext - notebook = jupytext.read(source_path) + executed_path = ( + (executed_dir / f"{source_path.stem}.ipynb") if executed_dir else None + ) + + if executed_path and executed_path.exists(): + notebook = nbformat.read(executed_path, as_version=4) + else: + notebook = jupytext.read(source_path) - # Process the notebook for Colab notebook = process_notebook(notebook, source_path) - # Determine output path output_path = output_dir / f"{source_path.stem}.ipynb" - - # Ensure output directory exists output_dir.mkdir(parents=True, exist_ok=True) - # Write the notebook - jupytext.write(notebook, output_path, config={"metadata": {"jupytext": {"cell_metadata_filter": "-id"}}}) + if executed_path and executed_path.exists(): + nbformat.write(notebook, output_path) + else: + jupytext.write( + notebook, + output_path, + config={"metadata": {"jupytext": {"cell_metadata_filter": "-id"}}}, + ) return output_path @@ -142,6 +157,12 @@ def main() -> None: default=Path("docs/colab_notebooks"), help="Directory to save Colab notebooks (default: docs/colab_notebooks)", ) + parser.add_argument( + "--executed-dir", + type=Path, + default=None, + help="Directory with executed .ipynb files (preserves outputs for Fern docs)", + ) parser.add_argument( "--files", nargs="*", @@ -165,6 +186,8 @@ def main() -> None: print(f"📓 Generating Colab notebooks from {len(source_files)} source file(s)...") print(f" Source: {args.source_dir}") print(f" Output: {args.output_dir}") + if args.executed_dir: + print(f" Executed (outputs): {args.executed_dir}") print() for source_path in source_files: @@ -173,7 +196,9 @@ def main() -> None: continue try: - output_path = generate_colab_notebook(source_path, args.output_dir) + output_path = generate_colab_notebook( + source_path, args.output_dir, args.executed_dir + ) print(f"✅ {source_path.name} → {output_path.name}") except Exception as e: print(f"❌ {source_path.name}: {e}") diff --git a/fern/components/NotebookViewer.tsx b/fern/components/NotebookViewer.tsx index c942c6e0..b6c9c484 100644 --- a/fern/components/NotebookViewer.tsx +++ b/fern/components/NotebookViewer.tsx @@ -1,12 +1,16 @@ +import type { ReactNode } from "react"; + /** * NotebookViewer - Renders Jupyter notebook content in Fern docs. * + * Uses Fern's code block structure (fern-code, fern-code-block, etc.) so input + * and output cells match the default Fern code block styling. + * * Accepts notebook cells (markdown + code) and optionally a Colab URL. * Designed to work with Jupytext-generated notebooks from docs/notebook_source/*.py. * * NOTE: Fern's custom component pipeline uses the automatic JSX runtime. - * Do NOT import React -- the `react` module is not resolvable in Fern's build. - * This means class components (e.g. ErrorBoundary) are also not available. + * Only type-only imports from "react" are used (erased at compile time). * * Usage in MDX: * import { NotebookViewer } from "@/components/NotebookViewer"; @@ -27,6 +31,8 @@ export interface CellOutput { export interface NotebookCell { type: "markdown" | "code"; source: string; + /** Pre-rendered syntax-highlighted HTML (from Pygments). When present, used instead of escaped source. */ + source_html?: string; language?: string; outputs?: CellOutput[]; } @@ -89,18 +95,31 @@ function isSafeUrl(url: string): boolean { ); } +const UL_CLASS = + "[&>li]:relative [&>li]:before:text-(color:--grayscale-a10) mb-3 list-none pl-3 [&>li]:pl-3 [&>li]:before:absolute [&>li]:before:ml-[-22px] [&>li]:before:mt-[-1px] [&>li]:before:content-['⦁'] [&>li]:before:self-center"; +const OL_CLASS = "mb-3 list-outside list-decimal [&_ol]:!list-[lower-roman]"; + function renderMarkdown(markdown: string): string { if (typeof markdown !== "string") return ""; let html = markdown + .replace(//gi, "\u0000BR\u0000") .replace(/&/g, "&") .replace(//g, ">") - .replace(/\[([^\]]+)\]\(([^)]+)\)/g, (_, text, url) => - isSafeUrl(url) - ? `${text}` - : escapeHtml(`[${text}](${url})`) - ) - .replace(/\*\*(.*?)\*\*/g, "$1") + .replace(/\u0000BR\u0000/g, "
    ") + .replace(/\[([^\]]+)\]\(([^)]+)\)/g, (_, text, url) => { + if (!isSafeUrl(url)) return escapeHtml(`[${text}](${url})`); + const isInternal = url.startsWith("/") || url.startsWith("#"); + const attrs = isInternal + ? `href="${escapeHtml(url)}" class="fern-mdx-link"` + : `href="${escapeHtml(url)}" target="_blank" rel="noopener noreferrer" class="fern-mdx-link"`; + const icon = + isInternal + ? "" + : ''; + return `${text}${icon}`; + }) + .replace(/\*\*(.*?)\*\*/g, '$1') .replace(/\*(.*?)\*/g, "$1") .replace(/`([^`]+)`/g, "$1"); html = html @@ -110,13 +129,209 @@ function renderMarkdown(markdown: string): string { if (/^### (.*)$/.test(line)) return `

    ${line.slice(4)}

    `; if (/^## (.*)$/.test(line)) return `

    ${line.slice(3)}

    `; if (/^# (.*)$/.test(line)) return `

    ${line.slice(2)}

    `; - if (/^- (.*)$/.test(line)) return `
  • ${line.slice(2)}
  • `; - if (/^\d+\. (.*)$/.test(line)) return `
  • ${line.replace(/^\d+\. /, "")}
  • `; + if (/^- (.*)$/.test(line)) return `
  • ${line.slice(2)}
  • `; + if (/^\d+\. (.*)$/.test(line)) return `
  • ${line.replace(/^\d+\. /, "")}
  • `; if (line.trim() === "") return ""; return `

    ${line}

    `; }) .join("\n"); - return html.replace(/(
  • .*?<\/li>\n?)+/gs, (m) => `
      ${m}
    `); + html = html.replace( + /(
  • .*?<\/li>\s*)+/gs, + (m) => `
      ${m.replace(/ data-ol/g, "").trim()}
    ` + ); + html = html.replace( + /(
  • .*?<\/li>\s*)+/gs, + (m) => `
      ${m.replace(/ data-ul/g, "").trim()}
    ` + ); + return html; +} + +function handleCopy(content: string, button: HTMLButtonElement) { + navigator.clipboard.writeText(content).catch(() => {}); + const originalHtml = button.innerHTML; + const originalLabel = button.getAttribute("aria-label") ?? "Copy code"; + button.innerHTML = "Copied!"; + button.setAttribute("aria-label", "Copied to clipboard"); + setTimeout(() => { + button.innerHTML = originalHtml; + button.setAttribute("aria-label", originalLabel); + }, 1500); +} + +const FLAG_ICON = ( + + + + +); + +const SCROLL_AREA_STYLE = `[data-radix-scroll-area-viewport]{scrollbar-width:none;-ms-overflow-style:none;-webkit-overflow-scrolling:touch;}[data-radix-scroll-area-viewport]::-webkit-scrollbar{display:none}`; + +const BUTTON_BASE_CLASS = + "focus-visible:ring-(color:--accent) rounded-2 inline-flex items-center justify-center gap-2 whitespace-nowrap text-sm font-medium transition-colors hover:transition-none focus-visible:outline-none focus-visible:ring-1 disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg]:size-4 [&_svg]:shrink-0 text-(color:--grayscale-a11) hover:bg-(color:--accent-a3) hover:text-(color:--accent-11) pointer-coarse:size-9 size-7"; + +/** Fern code block structure – matches Fern docs (header with language + buttons, pre with scroll area). */ +function FernCodeBlock({ + title, + children, + className = "", + asPre = true, + copyContent, + showLineNumbers = false, + codeHtml, +}: { + title: string; + children: ReactNode; + className?: string; + /** Use div instead of pre for content (needed when children include block elements like img/div). */ + asPre?: boolean; + /** Raw text to copy when copy button is clicked. When provided, shows a copy button. */ + copyContent?: string; + /** Show line numbers in a table layout (matches Fern's code block structure). */ + showLineNumbers?: boolean; + /** Pre-rendered HTML for each line when showLineNumbers is true. Lines are split by newline. */ + codeHtml?: string; +}) { + const headerLabel = title === "Output" ? "Output" : title.charAt(0).toUpperCase() + title.slice(1); + const wrapperClasses = + "fern-code fern-code-block bg-card-background border-card-border rounded-3 shadow-card-grayscale relative mb-6 mt-4 flex w-full min-w-0 max-w-full flex-col border first:mt-0"; + const preStyle = { + backgroundColor: "rgb(255, 255, 255)", + ["--shiki-dark-bg" as string]: "#212121", + color: "rgb(36, 41, 46)", + ["--shiki-dark" as string]: "#EEFFFF", + }; + + const scrollAreaContent = () => { + if (codeHtml == null) return null; + const lines = codeHtml.split("\n"); + return ( +
    +