diff --git a/app/about-us/page.tsx b/app/about-us/page.tsx index 2adb84e..6133141 100644 --- a/app/about-us/page.tsx +++ b/app/about-us/page.tsx @@ -136,7 +136,7 @@ const milestones = [ year: "2024", title: "AI Engineering Focus", description: - "Deepened investment in AI engineering, AI security, and MLOps. Now shipping AI systems that reach production.", + "Deepened investment in AI engineering and MLOps. Now shipping AI systems that reach production.", }, { year: "2025", diff --git a/app/ai-safety/page.tsx b/app/ai-safety/page.tsx index 5db0011..c124721 100644 --- a/app/ai-safety/page.tsx +++ b/app/ai-safety/page.tsx @@ -685,10 +685,10 @@ export default function AISafetyPage() { - View AI Security Services + View AI Engineering Services @@ -1090,10 +1090,10 @@ export default function AISafetyPage() { - View AI Security Services + View AI Engineering Services diff --git a/app/layout.tsx b/app/layout.tsx index fa9d824..ef07838 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -176,7 +176,6 @@ export default function RootLayout({ knowsAbout: [ "Artificial Intelligence", "Machine Learning", - "AI Security", "LLM Applications", "Software Engineering", "Cloud Computing", diff --git a/app/page.tsx b/app/page.tsx index 02ca83c..e601713 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -81,10 +81,10 @@ const faqSchema = { }, { "@type": "Question", - name: "How do you handle AI security and compliance?", + name: "How do you handle security and compliance?", acceptedAnswer: { "@type": "Answer", - text: "Security is built into every AI system we develop. Not bolted on later. We implement secure LLM architectures, prevent prompt injection, ensure data privacy, and meet compliance requirements (SOC 2, HIPAA, GDPR). Our AI security expertise comes from building production systems that enterprises trust.", + text: "Security is built into every system we develop. Not bolted on later. We implement secure architectures, ensure data privacy, and meet compliance requirements (SOC 2, HIPAA, GDPR). Our security practices come from building production systems that enterprises trust.", }, }, ], diff --git a/app/services/[slug]/AISecurityPageClient.tsx b/app/services/[slug]/AISecurityPageClient.tsx deleted file mode 100644 index 76a994d..0000000 --- a/app/services/[slug]/AISecurityPageClient.tsx +++ /dev/null @@ -1,402 +0,0 @@ -"use client"; - -import { motion } from "framer-motion"; -import { PageHero } from "@/components/ui"; -import { CalInline } from "@/components/CalInline"; -import { RelatedExpertise, FAQSection } from "@/components/expertise"; - -interface RelatedPage { - slug: string; - title: string; - description: string; - badge: string; -} - -interface RiskItem { - title: string; - description: string; - icon: string; -} - -interface ServiceItem { - title: string; - description: string; - features: string[]; - output: string; - icon: string; -} - -interface ProcessStep { - number: number; - title: string; - description: string; -} - -interface FitItem { - text: string; -} - -interface FAQItem { - question: string; - answer: string; -} - -interface AISecurityPageData { - hero: { - badge: string; - headline: string; - headlineAccent: string; - description: string; - }; - risks: RiskItem[]; - services: ServiceItem[]; - process: ProcessStep[]; - goodFit: FitItem[]; - notFit: FitItem[]; - faqs: FAQItem[]; - compliance: string[]; -} - -interface Props { - data: AISecurityPageData; - relatedPages: RelatedPage[]; -} - -// Icon components -const RiskIcons: Record> = { - warning: ({ className }) => ( - - - - ), - lock: ({ className }) => ( - - - - ), - eye: ({ className }) => ( - - - - - ), - ban: ({ className }) => ( - - - - ), - shield: ({ className }) => ( - - - - ), - currency: ({ className }) => ( - - - - ), -}; - -const ServiceIcons: Record> = { - monitor: ({ className }) => ( - - - - ), - building: ({ className }) => ( - - - - ), - map: ({ className }) => ( - - - - ), - database: ({ className }) => ( - - - - ), - checkCircle: ({ className }) => ( - - - - ), - users: ({ className }) => ( - - - - ), -}; - -export default function AISecurityPageClient({ data, relatedPages }: Props) { - return ( -
- {/* Hero */} - - - {/* Risks Section */} -
-
- -

- The Risks -

-

- What Can Go Wrong With Unsecured AI? -

-
- -
- {data.risks.map((risk, idx) => { - const IconComponent = RiskIcons[risk.icon] || RiskIcons.warning; - return ( - -
- -
-
-

{risk.title}

-

{risk.description}

-
-
- ); - })} -
-
-
- - {/* Services Section */} -
-
- -

- What We Do -

-

- AI Security Services -

-
- -
- {data.services.map((service, idx) => { - const IconComponent = ServiceIcons[service.icon] || ServiceIcons.monitor; - return ( - -
- -
-

{service.title}

-

{service.description}

-
    - {service.features.map((feature, fIdx) => ( -
  • - - {feature} -
  • - ))} -
-
- Output:{" "} - {service.output} -
-
- ); - })} -
-
-
- - {/* Process Section */} -
-
- -

- How We Work -

-

- From Assessment to Remediation -

-
- -
- {data.process.map((step, idx) => ( - -
- {step.number} -
-

{step.title}

-

{step.description}

-
- ))} -
-
-
- - {/* Fit Section */} -
-
- -

- Is This For You? -

-

- Who We Work With -

-
- -
- {/* Good Fit */} - -

- - - - Good Fit -

-
    - {data.goodFit.map((item, idx) => ( -
  • - - {item.text} -
  • - ))} -
-
- - {/* Not a Fit */} - -

- - - - Not a Fit -

-
    - {data.notFit.map((item, idx) => ( -
  • - - {item.text} -
  • - ))} -
-
-
-
-
- - {/* FAQ Section */} - {data.faqs.length > 0 && } - - {/* Related Expertise */} - {relatedPages.length > 0 && ( - - )} - - {/* Book a Call Section */} -
-
- -

- Let's Find the Gaps{" "} - Before Attackers Do -

-

- Book a 30-minute call to scope your AI security assessment. -

-
- - - - -
-
- - {/* Compliance Badges */} -
-
-
- {data.compliance.map((item, idx) => ( -
- - - - {item} -
- ))} -
-
-
- -
- ); -} diff --git a/app/services/[slug]/ExpertisePageClient.tsx b/app/services/[slug]/ExpertisePageClient.tsx index 6fdf408..34e1d48 100644 --- a/app/services/[slug]/ExpertisePageClient.tsx +++ b/app/services/[slug]/ExpertisePageClient.tsx @@ -43,10 +43,6 @@ const bookingSubtext: Record = { "Share your AI project requirements, from model architecture to MLOps infrastructure. We'll outline a practical path from prototype to production-ready deployment.", "ai-agents": "Tell us about the workflows you want to automate with AI agents. We'll discuss tool integrations, safety guardrails, and a realistic timeline to deployment.", - "ai-security": - "Describe your AI system's architecture and security concerns. We'll assess prompt injection risks, output vulnerabilities, and recommend a defense strategy.", - "ai-privacy": - "Share your compliance requirements and data handling challenges. We'll discuss privacy-preserving architectures and how to meet GDPR, HIPAA, or SOC 2 standards.", "mobile-development": "Walk us through your mobile app vision, native or cross-platform. We'll help you choose the right approach and plan for a smooth App Store launch.", "frontend-development": @@ -95,8 +91,6 @@ export default function ExpertisePageClient({ serviceType: expertise.slug === "software-testing-and-qa" ? ["Software Testing", "Quality Assurance", "Test Automation", "Performance Testing", "API Testing"] - : expertise.slug === "ai-security" - ? ["AI Security", "LLM Security", "AI Threat Protection", "Secure AI Development"] : expertise.slug === "mobile-development" ? ["iOS App Development", "Android App Development", "Cross-Platform App Development", "Mobile Backend Development", "Mobile App Modernization"] : expertise.slug === "ai-engineering" @@ -127,8 +121,6 @@ export default function ExpertisePageClient({ category: expertise.slug === "software-testing-and-qa" ? ["Software Testing", "Quality Assurance", "Test Automation"] - : expertise.slug === "ai-security" - ? ["AI Security", "LLM Security", "AI Threat Modeling", "Secure AI Engineering"] : expertise.slug === "mobile-development" ? ["Mobile App Development", "iOS Development", "Android Development", "Cross-Platform Development"] : expertise.slug === "ai-engineering" @@ -218,8 +210,6 @@ export default function ExpertisePageClient({ [ "ai-engineering", "ai-agents", - "ai-security", - "ai-privacy", "dotnet", ].includes(expertise.slug) ? undefined diff --git a/app/services/[slug]/page.tsx b/app/services/[slug]/page.tsx index d1ace81..fbfa195 100644 --- a/app/services/[slug]/page.tsx +++ b/app/services/[slug]/page.tsx @@ -10,7 +10,6 @@ import { import { JsonLd } from "@/components/seo"; import ServicePageClient from "./ServicePageClient"; import ExpertisePageClient from "./ExpertisePageClient"; -import AISecurityPageClient from "./AISecurityPageClient"; interface Props { params: Promise<{ slug: string }>; @@ -107,38 +106,6 @@ export default async function ServicePage({ params }: Props) { unknown >; - // Special handling for AI Security page with custom layout - if (slug === "ai-security" && frontmatter.aiSecurityData) { - const aiSecurityData = frontmatter.aiSecurityData as { - hero: { - badge: string; - headline: string; - headlineAccent: string; - description: string; - }; - risks: Array<{ title: string; description: string; icon: string }>; - services: Array<{ - title: string; - description: string; - features: string[]; - output: string; - icon: string; - }>; - process: Array<{ number: number; title: string; description: string }>; - goodFit: Array<{ text: string }>; - notFit: Array<{ text: string }>; - faqs: Array<{ question: string; answer: string }>; - compliance: string[]; - }; - - const relatedExpertise = (frontmatter.relatedExpertise as string[]) || []; - const relatedPages = getRelatedExpertiseForListing(relatedExpertise); - - return ( - - ); - } - // Generate schema markup for the page const generateSchemas = (pageSlug: string, data: Record) => { const schemas: Array> = []; diff --git a/app/why-us/page.tsx b/app/why-us/page.tsx index 5505d59..724440d 100644 --- a/app/why-us/page.tsx +++ b/app/why-us/page.tsx @@ -30,9 +30,9 @@ const differentiators = [ id: "security", number: "03", headline: "Security Built In", - title: "AI Security From Day One", + title: "Security From Day One", description: - "Your AI systems are protected from the start. Prompt injection defense, data leakage prevention, and AI-specific security measures are built into every system we deliver. No security theater. Real protection for production AI.", + "Your systems are protected from the start. Data leakage prevention, access controls, and security measures are built into every system we deliver. No security theater. Real protection for production software.", stats: { value: "100%", label: "Security review pass rate" }, accentColor: "teal" as const, }, @@ -66,7 +66,7 @@ const comparisonData = { "3-6 month discovery phases", "Junior developers learning AI on your dime", "Strategy decks and POCs that never ship", - "No AI security considerations", + "No security considerations", "Prototypes that need complete rebuilds", "Ongoing dependency on vendor", ], @@ -78,7 +78,7 @@ const comparisonData = { "AI engineers deployed in 2-5 days", "Senior engineers from a certified best workplace", "Production AI from week one", - "AI security built in from day one", + "Security built in from day one", "Production-grade from first commit", "Full knowledge transfer, no lock-in", ], diff --git a/app/work/[slug]/CaseStudyDetailClient.tsx b/app/work/[slug]/CaseStudyDetailClient.tsx index 2beee20..c0fe83e 100644 --- a/app/work/[slug]/CaseStudyDetailClient.tsx +++ b/app/work/[slug]/CaseStudyDetailClient.tsx @@ -28,9 +28,6 @@ const clientLogos: Record = { "Aster": "/logos/client/aster.svg", "Workshop Ventures": "/logos/client/workshopventure.svg", "WorkshopVentures": "/logos/client/workshopventure.svg", - "Fellou AI": "/assets/case-studies/Fellou.webp", - "Fellou": "/assets/case-studies/Fellou.webp", - "FellouAI": "/assets/case-studies/Fellou.webp", "MCLabs": "/assets/case-studies/mclabs.png", "MC Labs": "/assets/case-studies/mclabs.png", "MC labs": "/assets/case-studies/mclabs.png", diff --git a/components/blog/BlogCTA.tsx b/components/blog/BlogCTA.tsx index 7271fd7..f52b700 100644 --- a/components/blog/BlogCTA.tsx +++ b/components/blog/BlogCTA.tsx @@ -52,14 +52,6 @@ const categoryCTAMap: Record = { secondaryLabel: "View Our Work", secondaryHref: "/work", }, - "ai-security": { - headingLine1: "Need to Secure Your", - headingLine2: "AI Systems?", - description: - "From prompt injection defense to model security, we help teams ship AI systems that are secure by design.", - secondaryLabel: "Explore AI Security Services", - secondaryHref: "/services/ai-security", - }, "cloud-infrastructure": { headingLine1: "Ready to Scale Your", headingLine2: "Cloud Infrastructure?", diff --git a/components/footer.tsx b/components/footer.tsx index a17f488..34318a2 100644 --- a/components/footer.tsx +++ b/components/footer.tsx @@ -8,8 +8,6 @@ const footerLinks = { aiServices: [ { label: "LLM Applications", href: "/services/ai-engineering" }, { label: "AI Agents", href: "/services/ai-agents" }, - { label: "Threat Protection", href: "/services/ai-security" }, - { label: "Data & Privacy", href: "/services/ai-privacy" }, ], productEngineering: [ { label: "Frontend Development", href: "/services/frontend-development" }, diff --git a/components/sections/FAQ.tsx b/components/sections/FAQ.tsx index 6bc73c8..fc535f2 100644 --- a/components/sections/FAQ.tsx +++ b/components/sections/FAQ.tsx @@ -24,11 +24,6 @@ const faqs = [ answer: "Timeline depends on scope and approach. AI Sprints deliver working prototypes in 2-4 weeks. Full product builds with embedded teams typically reach MVP in 8-12 weeks. Enterprise AI systems requiring compliance and security review take 17-20 weeks from architecture to production deployment.", }, - { - question: "How do you handle AI security?", - answer: - "AI security is built into every system we deliver. We implement prompt injection defense, data leakage prevention, access controls, and AI-specific security measures from day one. Your AI systems pass enterprise security review.", - }, { question: "What makes you different from AI consultancies?", answer: diff --git a/components/sections/Services.tsx b/components/sections/Services.tsx index d5318d3..47d7682 100644 --- a/components/sections/Services.tsx +++ b/components/sections/Services.tsx @@ -32,28 +32,6 @@ const services = [ "MLOps", ], }, - { - name: "AI Security", - description: - "Protect your AI systems from emerging threats. Your models stay safe from prompt injection, jailbreaks, and data leakage with security measures designed specifically for AI workloads.", - href: "/services/ai-security", - icon: ( - - - - ), - tags: ["Threat Protection", "Prompt Defense", "Data Privacy", "Compliance"], - }, { name: "Product Engineering", description: diff --git a/content/blog/_authors.yaml b/content/blog/_authors.yaml index 3b86039..d6b25ad 100644 --- a/content/blog/_authors.yaml +++ b/content/blog/_authors.yaml @@ -15,10 +15,3 @@ engineering-team: avatar: "/team/engineering.jpg" bio: "Insights from Procedure's team of AI and product engineers building production systems for enterprise clients." linkedin: "https://in.linkedin.com/company/procedurehq" - -ai-security-team: - name: "AI Security Team" - role: "Security Engineering" - avatar: "/team/security.jpg" - bio: "Procedure's dedicated AI security specialists focused on protecting production AI systems from emerging threats." - linkedin: "https://in.linkedin.com/company/procedurehq" diff --git a/content/blog/_categories.yaml b/content/blog/_categories.yaml index cc9f764..044fb07 100644 --- a/content/blog/_categories.yaml +++ b/content/blog/_categories.yaml @@ -7,11 +7,6 @@ llm-engineering: description: "Building reliable LLM applications at scale—prompting strategies, fine-tuning approaches, and inference optimization for production workloads." color: "teal" -ai-security: - name: "AI Security" - slug: "ai-security" - description: "Securing AI systems against adversarial attacks, prompt injection, model theft, and emerging threat vectors in production environments." - color: "blue" production-systems: name: "Production Systems" diff --git a/content/services/ai-agents.mdx b/content/services/ai-agents.mdx index e40cad0..4c94ccc 100644 --- a/content/services/ai-agents.mdx +++ b/content/services/ai-agents.mdx @@ -36,7 +36,6 @@ technologies: relatedExpertise: - ai-engineering - backend-development - - ai-security faqs: - question: "What is the difference between an AI agent and a chatbot?" answer: "Chatbots respond to queries. Agents take action. An AI agent can research information, update databases, send emails, schedule meetings, and execute multi-step workflows autonomously, not just suggest what you should do." diff --git a/content/services/ai-engineering.mdx b/content/services/ai-engineering.mdx index 4913c25..6da0539 100644 --- a/content/services/ai-engineering.mdx +++ b/content/services/ai-engineering.mdx @@ -34,8 +34,6 @@ technologies: - MLflow - Airflow relatedExpertise: - - ai-security - - ai-privacy - backend-development whoWeWorkWith: audiences: diff --git a/content/services/ai-privacy.mdx b/content/services/ai-privacy.mdx deleted file mode 100644 index 7e7e6d0..0000000 --- a/content/services/ai-privacy.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: "AI Privacy" -headline: "AI Privacy" -headlineAccent: "Without Compromising Performance" -tagline: "Protect sensitive data without sacrificing AI capabilities." -description: | - AI systems can memorize training data, leak PII in outputs, and send sensitive information to third-party APIs. We build AI applications that protect data privacy by design, meeting compliance requirements while delivering the AI capabilities your business needs. -capabilities: - - title: "Data Leakage Prevention" - description: "Implement output scanning, PII detection, and content filtering to prevent sensitive data from appearing in AI responses." - icon: "shield" - - title: "Privacy-Preserving Architectures" - description: "Design systems that minimize data exposure: on-premise deployments, data anonymization pipelines, and zero-trust API patterns." - icon: "lock" - - title: "GDPR & HIPAA Compliance" - description: "Build AI applications that meet regulatory requirements from day one. Data subject rights, audit trails, and consent management built in." - icon: "document" - - title: "Model Privacy Audits" - description: "Assess your models for memorization risks and data leakage vulnerabilities. Know what data your AI might expose." - icon: "eye" - - title: "Secure Data Pipelines" - description: "Build encrypted, access-controlled data pipelines for AI training and inference. Protect data at rest and in transit." - icon: "database" - - title: "Third-Party Risk Management" - description: "Evaluate and mitigate risks from LLM API providers. Understand what data leaves your environment and implement appropriate controls." - icon: "globe" -technologies: - - Private AI - - Presidio - - Microsoft Purview - - AWS Macie - - Vault - - SOPS - - Age -relatedExpertise: - - ai-security - - cloud - - backend-development -faqs: - - question: "Can we use LLM APIs while maintaining data privacy?" - answer: "Yes, with the right architecture. Options include using enterprise API tiers with data processing agreements, on-premise deployments, or hybrid approaches that keep sensitive data local while leveraging cloud AI for non-sensitive tasks." - - question: "How do you prevent AI from exposing PII?" - answer: "We implement multiple layers: input sanitization to remove PII before it reaches models, output scanning to catch leaked data, and architectural patterns that minimize PII exposure throughout the system." - - question: "What about AI and GDPR data subject rights?" - answer: "We build systems that support right to access, erasure, and portability for data used in AI systems, including mechanisms to track which data influenced model training." - - question: "Can you make existing AI systems compliant?" - answer: "Usually, yes. We audit current systems, identify gaps, and implement technical controls to meet compliance requirements. Some cases require architectural changes, which we can phase in without disrupting operations." -cta: - title: "Build AI That Protects Data" - description: "Talk to our engineers about building privacy-first AI systems. We'll help you meet compliance requirements without sacrificing AI capabilities." - buttonText: "Schedule a Call" - buttonLink: "/contact" -seo: - title: "AI Privacy & Data Protection | Compliance | Procedure" - description: "Build privacy-first AI systems that prevent data leakage and meet GDPR, HIPAA, and SOC 2 requirements. Enterprise AI privacy engineering." ---- - -## Why Choose Procedure for AI Privacy - -- **Compliance expertise**: We've built HIPAA-compliant and GDPR-ready AI systems -- **Technical depth**: Privacy isn't just policy. We implement the technical controls -- **Privacy by design**: We architect for privacy from the start, not as a patch -- **Pragmatic approach**: We balance privacy requirements with business needs diff --git a/content/services/ai-security.mdx b/content/services/ai-security.mdx deleted file mode 100644 index 840ec2d..0000000 --- a/content/services/ai-security.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: "AI Security Services" -relatedExpertise: - - ai-engineering - - ai-privacy - - backend-development -aiSecurityData: - hero: - badge: "AI Security Services" - headline: "Secure Your AI Systems Before They" - headlineAccent: "Become Liabilities" - description: "AI red teaming, security assessments, and architecture reviews for teams shipping LLMs to production." - risks: - - title: "Prompt Injection Attacks" - description: "Attackers manipulate your AI to leak data, bypass controls, or execute unintended actions." - icon: "warning" - - title: "Sensitive Data Exposure" - description: "Your LLM reveals customer PII, internal documents, API keys, or proprietary information." - icon: "lock" - - title: "System Prompt Leakage" - description: "Competitors or attackers extract your proprietary prompts, revealing business logic." - icon: "eye" - - title: "Jailbreaks & Safety Bypass" - description: "Users bypass safety controls to generate harmful, illegal, or reputation-damaging content." - icon: "ban" - - title: "Compliance Failures" - description: "EU AI Act violations, SOC 2 gaps, or industry-specific regulations breached." - icon: "shield" - - title: "Uncontrolled Costs" - description: "Attackers or bugs cause runaway API bills through resource exhaustion attacks." - icon: "currency" - services: - - title: "AI Red Teaming & Penetration Testing" - description: "We attack your AI systems before real attackers do." - features: - - "Prompt injection testing (direct & indirect)" - - "Jailbreak and safety bypass attempts" - - "System prompt extraction attacks" - - "Data exfiltration scenarios" - - "Abuse vector identification" - output: "Vulnerability report with severity ratings and fixes" - icon: "monitor" - - title: "LLM Security Architecture Review" - description: "Security review of your AI system design, before or after launch." - features: - - "Model access control & isolation" - - "API security & credential management" - - "Third-party model integration risks" - - "Input validation & output filtering" - - "Logging, monitoring & audit trails" - output: "Architecture recommendations with implementation guidance" - icon: "building" - - title: "AI Threat Modeling" - description: "Map every way your AI system can be attacked." - features: - - "Attack surface identification" - - "Threat actor profiling" - - "Risk prioritization by business impact" - - "Security control gap analysis" - - "Mitigation roadmap" - output: "Threat model document + prioritized risk register" - icon: "map" - - title: "AI Data Security & Privacy" - description: "Prevent your AI from leaking what it shouldn't." - features: - - "PII leakage detection & prevention" - - "Training data exposure risks" - - "Model memorization assessment" - - "Data extraction attack testing" - - "Privacy-preserving design guidance" - output: "Data security assessment + remediation plan" - icon: "database" - - title: "Compliance & Framework Alignment" - description: "Get your AI systems audit-ready." - features: - - "OWASP Top 10 for LLMs (2025)" - - "EU AI Act compliance assessment" - - "NIST AI Risk Management Framework" - - "ISO/IEC 42001 alignment" - - "Industry-specific: Healthcare, Finance" - output: "Compliance gap analysis + remediation roadmap" - icon: "checkCircle" - - title: "Ongoing AI Security Support" - description: "Security isn't one-time. We stay with you." - features: - - "Embedded security for AI teams" - - "Security review before releases" - - "AI incident response" - - "Continuous monitoring setup" - - "Team training on secure AI dev" - output: "Retainer-based support with SLAs" - icon: "users" - process: - - number: 1 - title: "Scope" - description: "Understand your AI system, tech stack, and threat model. Define assessment boundaries." - - number: 2 - title: "Assess" - description: "Red team tests, architecture review, code analysis. Find vulnerabilities before attackers do." - - number: 3 - title: "Report" - description: "Clear findings with severity ratings, proof-of-concept exploits, and remediation guidance." - - number: 4 - title: "Fix" - description: "Help implement fixes or verify your team's remediations. Retest to confirm closure." - goodFit: - - text: "Teams deploying LLMs to production (not just experimenting)" - - text: "Companies with compliance requirements (healthcare, finance, enterprise)" - - text: "Startups about to raise or facing security due diligence" - - text: "Teams that got burned by an AI security incident" - - text: "Engineering teams building AI-powered products" - notFit: - - text: "Just exploring AI with no production plans" - - text: "Looking for a checkbox audit (we do real testing)" - - text: "Need generic cybersecurity (we specialize in AI)" - - text: "Want theoretical consulting without hands-on work" - faqs: - - question: "What's the difference between AI security and regular application security?" - answer: "AI systems have unique attack vectors, including prompt injection, jailbreaks, data leakage through model outputs, and system prompt extraction, that traditional security testing doesn't cover. We specialize in these AI-specific risks." - - question: "Do you only work with companies using OpenAI/ChatGPT?" - answer: "No. We secure systems built on any LLM: OpenAI, Anthropic Claude, open-source models (Llama, Mistral), or custom fine-tuned models. The attack vectors are similar across providers." - - question: "How long does an AI security assessment take?" - answer: "Most assessments complete in 2-4 weeks depending on scope. A focused red team engagement on a single AI feature can be faster. Full architecture reviews of complex systems take longer." - - question: "Do you help with EU AI Act compliance?" - answer: "Yes. We assess your AI systems against EU AI Act requirements, help determine risk classification, and document compliance. We also align with ISO/IEC 42001 and NIST AI RMF." - - question: "What do we get at the end of an engagement?" - answer: "A detailed report with findings, severity ratings, proof-of-concept demonstrations where applicable, and specific remediation guidance. We don't just find problems. We help you fix them." - compliance: - - "OWASP LLM Top 10" - - "NIST AI RMF" - - "EU AI Act" - - "ISO/IEC 42001" -seo: - title: "AI Security Services | LLM Security | AI Red Teaming | Procedure" - description: "AI red teaming, security assessments, and architecture reviews for teams shipping LLMs to production. Prompt injection testing, compliance alignment, and ongoing security support." ---- - -## Get Started With Procedure - -Whether you need AI red teaming, architecture reviews, or ongoing security support, we're here to help. - -**→ [Schedule a call with our AI security team](/contact-us)** - -AI security for teams shipping LLMs to production. diff --git a/content/services/cloud.mdx b/content/services/cloud.mdx index 727b0b4..f1e8916 100644 --- a/content/services/cloud.mdx +++ b/content/services/cloud.mdx @@ -36,7 +36,6 @@ technologies: relatedExpertise: - kubernetes - backend-development - - ai-privacy faqs: - question: "Which cloud provider should we use?" answer: "It depends on your existing investments, team expertise, and specific workloads. AWS offers the broadest services, GCP excels at data and ML, Azure integrates well with Microsoft ecosystems. We help you choose, or design for multi-cloud when that makes sense." diff --git a/lib/content.ts b/lib/content.ts index 47250bd..80078f7 100644 --- a/lib/content.ts +++ b/lib/content.ts @@ -816,7 +816,7 @@ export function getExpertiseForListing( const { frontmatter, content } = expertise; // Return null if required fields for expertise pages are missing - // (e.g., ai-security uses custom layout without capabilities/technologies) + // (e.g., pages with custom layout without capabilities/technologies) if (!frontmatter.capabilities || !frontmatter.technologies) { return null; } diff --git a/lib/expertise-data.tsx b/lib/expertise-data.tsx index bffd834..87d9019 100644 --- a/lib/expertise-data.tsx +++ b/lib/expertise-data.tsx @@ -544,7 +544,7 @@ export const expertisePages: Record = { "Yes. We've integrated LLM applications with legacy systems, on-premise deployments, and every major cloud provider. Our engineers embed with your team and work in your codebase.", }, ], - relatedExpertise: ["ai-agents", "ai-security", "ai-privacy"], + relatedExpertise: ["ai-agents", "backend"], }, "ai-agents": { @@ -644,206 +644,7 @@ export const expertisePages: Record = { "Most clients see a working prototype within the first week. Production deployment typically takes 2-4 weeks, depending on the complexity of your workflows and integration requirements.", }, ], - relatedExpertise: ["llm-applications", "backend", "ai-security"], - }, - - "ai-security": { - slug: "ai-security", - category: "ai-ml", - meta: { - title: "AI Security & Threat Protection | LLM Safety | Procedure", - description: - "Protect your AI systems from prompt injection, jailbreaks, and adversarial attacks. Enterprise-grade LLM security from architecture to deployment.", - }, - hero: { - badge: "AI Security", - headline: "Secure AI Systems From", - headlineAccent: "Architecture to Production", - tagline: "LLM security isn't an afterthought. It's foundational.", - description: - "Prompt injection. Jailbreaks. Data exfiltration. The attack surface for AI systems is different from traditional software, and most teams aren't prepared. We build AI security into your systems from the ground up, protecting against both known threats and emerging attack vectors.", - }, - capabilities: [ - { - icon: "shield", - title: "Prompt Injection Defense", - description: - "Implement input sanitization, prompt isolation, and detection systems that identify and block injection attacks before they reach your models.", - }, - { - icon: "lock", - title: "Jailbreak Prevention", - description: - "Build multi-layer defenses against attempts to bypass safety guidelines. Our systems detect creative attacks that rule-based filters miss.", - }, - { - icon: "eye", - title: "Output Filtering & Validation", - description: - "Scan LLM outputs for sensitive data, harmful content, and policy violations before they reach users. Defense in depth for AI systems.", - }, - { - icon: "terminal", - title: "Red Team Testing", - description: - "Proactively attack your AI systems to find vulnerabilities before adversaries do. We bring deep expertise in AI-specific attack techniques.", - }, - { - icon: "layers", - title: "Security Architecture Review", - description: - "Audit your AI system architecture for security gaps. From data pipelines to model serving, we identify risks across your AI stack.", - }, - { - icon: "document", - title: "Incident Response Planning", - description: - "Develop playbooks for AI security incidents. When attacks happen, know exactly how to detect, contain, and recover.", - }, - ], - technologies: [ - "OWASP LLM", - "Guardrails AI", - "NeMo Guardrails", - "Rebuff", - "LLM Guard", - "Lakera", - "Prompt Armor", - ], - whyProcedure: [ - "AI-native security expertise: We understand attacks unique to LLM systems", - "Offensive + defensive: We've red-teamed AI systems and built the defenses", - "Production-tested: Our security patterns protect systems handling real data", - "Compliance-ready: We help you meet SOC 2, HIPAA, and emerging AI regulations", - ], - cta: { - headline: "Secure Your AI Before You Ship It", - description: - "Get a security assessment of your AI systems from engineers who understand both AI and security. Find vulnerabilities before attackers do.", - }, - faqs: [ - { - question: "What is prompt injection and why should I care?", - answer: - "Prompt injection is when attackers craft inputs that manipulate your LLM into ignoring its instructions. It can lead to data leakage, unauthorized actions, and reputation damage. It's the SQL injection of the AI era, and just as dangerous.", - }, - { - question: - "How is AI security different from traditional application security?", - answer: - "AI systems have unique attack surfaces: prompts can be manipulated, models can be confused by adversarial inputs, and outputs need filtering for harmful content. Traditional WAFs and security tools don't catch these AI-specific threats.", - }, - { - question: "Can you secure an existing AI application?", - answer: - "Yes. We can add security layers to deployed systems, though building security in from the start is more effective. We'll assess your current state and prioritize improvements based on risk.", - }, - { - question: "How do you stay current with AI security threats?", - answer: - "Our team actively researches emerging attack techniques, participates in red team exercises, and monitors the AI security community. We update our defense patterns as the threat landscape evolves.", - }, - ], - relatedExpertise: ["ai-privacy", "llm-applications", "cloud"], - }, - - "ai-privacy": { - slug: "ai-privacy", - category: "ai-ml", - meta: { - title: "AI Privacy & Data Protection | Compliance | Procedure", - description: - "Build privacy-first AI systems that prevent data leakage and meet GDPR, HIPAA, and SOC 2 requirements. Enterprise AI privacy engineering.", - }, - hero: { - badge: "AI Privacy", - headline: "Privacy-First AI Development That", - headlineAccent: "Doesn't Compromise Results", - tagline: "Protect sensitive data without sacrificing AI capabilities.", - description: - "AI systems can memorize training data, leak PII in outputs, and send sensitive information to third-party APIs. We build AI applications that protect data privacy by design, meeting compliance requirements while delivering the AI capabilities your business needs.", - }, - capabilities: [ - { - icon: "shield", - title: "Data Leakage Prevention", - description: - "Implement output scanning, PII detection, and content filtering to prevent sensitive data from appearing in AI responses.", - }, - { - icon: "lock", - title: "Privacy-Preserving Architectures", - description: - "Design systems that minimize data exposure: on-premise deployments, data anonymization pipelines, and zero-trust API patterns.", - }, - { - icon: "document", - title: "GDPR & HIPAA Compliance", - description: - "Build AI applications that meet regulatory requirements from day one. Data subject rights, audit trails, and consent management built in.", - }, - { - icon: "eye", - title: "Model Privacy Audits", - description: - "Assess your models for memorization risks and data leakage vulnerabilities. Know what data your AI might expose.", - }, - { - icon: "database", - title: "Secure Data Pipelines", - description: - "Build encrypted, access-controlled data pipelines for AI training and inference. Protect data at rest and in transit.", - }, - { - icon: "globe", - title: "Third-Party Risk Management", - description: - "Evaluate and mitigate risks from LLM API providers. Understand what data leaves your environment and implement appropriate controls.", - }, - ], - technologies: [ - "Private AI", - "Presidio", - "Microsoft Purview", - "AWS Macie", - "Vault", - "SOPS", - "Age", - ], - whyProcedure: [ - "Compliance expertise: We've built HIPAA-compliant and GDPR-ready AI systems", - "Technical depth: Privacy isn't just policy. We implement the technical controls", - "Privacy by design: We architect for privacy from the start, not as a patch", - "Pragmatic approach: We balance privacy requirements with business needs", - ], - cta: { - headline: "Build AI That Protects Data", - description: - "Talk to our engineers about building privacy-first AI systems. We'll help you meet compliance requirements without sacrificing AI capabilities.", - }, - faqs: [ - { - question: "Can we use LLM APIs while maintaining data privacy?", - answer: - "Yes, with the right architecture. Options include using enterprise API tiers with data processing agreements, on-premise deployments, or hybrid approaches that keep sensitive data local while leveraging cloud AI for non-sensitive tasks.", - }, - { - question: "How do you prevent AI from exposing PII?", - answer: - "We implement multiple layers: input sanitization to remove PII before it reaches models, output scanning to catch leaked data, and architectural patterns that minimize PII exposure throughout the system.", - }, - { - question: "What about AI and GDPR data subject rights?", - answer: - "We build systems that support right to access, erasure, and portability for data used in AI systems, including mechanisms to track which data influenced model training.", - }, - { - question: "Can you make existing AI systems compliant?", - answer: - "Usually, yes. We audit current systems, identify gaps, and implement technical controls to meet compliance requirements. Some cases require architectural changes, which we can phase in without disrupting operations.", - }, - ], - relatedExpertise: ["ai-security", "cloud", "backend"], + relatedExpertise: ["llm-applications", "backend", "cloud"], }, frontend: { @@ -1343,7 +1144,7 @@ export const expertisePages: Record = { "We can. Beyond initial architecture, we offer ongoing optimization, security monitoring, and infrastructure support as part of our engagement models.", }, ], - relatedExpertise: ["kubernetes", "backend", "ai-privacy"], + relatedExpertise: ["kubernetes", "backend", "cloud"], }, kubernetes: { diff --git a/lib/navigation-data.ts b/lib/navigation-data.ts index e6b421b..9838a9d 100644 --- a/lib/navigation-data.ts +++ b/lib/navigation-data.ts @@ -91,26 +91,7 @@ export const navigationData = { icon: "cpu", }, ], - sections: [ - { - title: "AI Security", - items: [ - { - label: "Threat Protection", - description: - "Guard against prompt injection, jailbreaks, and adversarial attacks.", - href: "/services/ai-security", - icon: "shield", - }, - { - label: "Data & Privacy", - description: "Prevent data leakage and ensure compliance.", - href: "/services/ai-privacy", - icon: "lock", - }, - ], - }, - ], + sections: [], }, { title: "Product Engineering", @@ -263,7 +244,7 @@ export const navigationData = { { label: "About Procedure", description: - "AI engineering and security with proven delivery excellence. Learn about our team.", + "AI engineering with proven delivery excellence. Learn about our team.", href: "/about-us", icon: "info", }, diff --git a/public/llms.txt b/public/llms.txt index f7186e6..f1d6e94 100644 --- a/public/llms.txt +++ b/public/llms.txt @@ -22,7 +22,6 @@ Procedure is an AI engineering company that builds production-ready AI products ### AI & Machine Learning - **LLM Applications**: ChatGPT, Claude, and open-source LLM integrations. RAG, fine-tuning, and prompt engineering - **AI Agents & Automation**: Autonomous agents, workflow automation, and AI-powered business processes -- **AI Security**: Guard against prompt injection, jailbreaks, and adversarial attacks - **AI Privacy**: Prevent data leakage and ensure compliance ### Product Engineering diff --git a/scripts/auto-interlink-blogs.ts b/scripts/auto-interlink-blogs.ts index a468e02..0ca984b 100644 --- a/scripts/auto-interlink-blogs.ts +++ b/scripts/auto-interlink-blogs.ts @@ -36,7 +36,6 @@ interface LinkSuggestion { // Service page mapping: keywords → service page const SERVICE_MAPPINGS: Record = { 'ai engineering': { url: '/services/ai-engineering', title: 'AI Engineering Services' }, - 'ai security': { url: '/services/ai-security', title: 'AI Security Services' }, 'ai agents': { url: '/services/ai-engineering', title: 'AI Engineering Services' }, 'llm': { url: '/services/ai-engineering', title: 'AI Engineering Services' }, 'machine learning': { url: '/services/ai-engineering', title: 'AI Engineering Services' }, @@ -58,10 +57,6 @@ const CONTENT_CLUSTERS: Record = { 'model-context-protocol-mcp-guide', 'notion-mcp-connect-ai-agents' ], - 'ai-security': [ - 'breaking-docker-llm-ai-security', - 'ai-security-best-practices' - ], 'llm-systems': [ 'server-sent-events-sse-llm-streaming', 'model-context-protocol-mcp-guide'