πŸ›‘οΈ SafeAgentSkills

Prompt Shield Publish

Β· v3.0.6

High Risk

"Prompt Injection Firewall for AI agents. 113 detection patterns, 14 threat categories, zero dependencies. Protects against fake authority, command injection, memory poisoning, skill malware, crypto spam, and more. Hash-chain tamper-proof whitelist with mandatory peer review. Claude Code hook integration."

H:4 D:2 A:1 C:1

⚠️ Hazard Flags

EXEC FS_READ_WORKSPACE FS_WRITE_WORKSPACE FS_WRITE_USER FS_WRITE_SYSTEM NET_EGRESS_ANY PI_WEB PI_DOCUMENTS

πŸ“‹ Capabilities

Execution

  • βœ… Shell execution
  • ❌ Code execution
  • ❌ Install dependencies
  • ❌ Persistence
  • Privilege: user

Filesystem

  • βœ… Read workspace
  • βœ… Write workspace
  • ❌ Read home
  • βœ… Write home
  • ❌ Read system
  • ❌ Delete

Network

  • Egress: any
  • ❌ Ingress

Credentials

  • ❌ Environment vars
  • ❌ Credential files
  • ❌ Browser data
  • ❌ Keychain

Actions

❌ send messages❌ post public❌ purchase❌ transfer money❌ deploy❌ delete external

πŸ”’ Containment

Level: maximum

Required:
  • SANDBOX_CONTAINER: Code execution capability
Recommended:
  • LOG_ACTIONS: Audit trail for all actions

⚑ Risks

Prompt injection patterns detected in: shield.py high

Mitigation: Review SKILL.md for hidden instructions. Do not use with untrusted input.

Unauthorized tool use: MCP_SYS_CRITICAL_ACCESS, INSTRUCTED_BINARY_INSTALL high

Mitigation: Avoid accessing system directories unless absolutely necessary.

Command injection risk: COMMAND_INJECTION_USER_INPUT medium

Mitigation: Validate and sanitize all user inputs before using in commands

Social engineering indicators: SOCIAL_ENG_VAGUE_DESCRIPTION low

Mitigation: Provide clear, detailed description of skill functionality

Want a deeper analysis?

This report was generated by static analysis. Get an LLM-powered deep review with behavioral reasoning and attack surface mapping.

🧠 Deep Analysis β€” $5.00

🚨 Incident Response

Kill switch: Stop the agent process

Containment: Review logs for unexpected actions

Recovery: Depends on skill capabilities

πŸ“„ Raw SSDS JSON click to expand
{
  "meta": {
    "document_id": "ssds:auto:prompt-shield:3.0.6",
    "ssds_version": "0.2.0",
    "scanner_version": "0.4.0+fe6fd9123d50",
    "created_at": "2026-03-05T03:19:54.982Z",
    "created_by": {
      "agent": "safeagentskills-cli/generate-ssds"
    },
    "language": "en",
    "notes": "Auto-generated SSDS. Manual review recommended."
  },
  "skill": {
    "name": "Prompt Shield Publish",
    "version": "3.0.6",
    "format": "agent_skill",
    "description": "\"Prompt Injection Firewall for AI agents. 113 detection patterns, 14 threat categories, zero dependencies. Protects against fake authority, command injection, memory poisoning, skill malware, crypto spam, and more. Hash-chain tamper-proof whitelist with mandatory peer review. Claude Code hook integration.\"",
    "publisher": "unknown",
    "source": {
      "channel": "local"
    },
    "artifact": {
      "sha256": "c3d16f61c1d9c046e391fef8953581e3bacbd42b4d53ace404fa3a73c23e91d5",
      "hash_method": "files_sorted"
    }
  },
  "capabilities": {
    "execution": {
      "can_exec_shell": true,
      "can_exec_code": false,
      "privilege_level": "user",
      "can_install_deps": false,
      "can_persist": false
    },
    "filesystem": {
      "reads_workspace": true,
      "reads_user_home": false,
      "reads_system": false,
      "writes_workspace": true,
      "writes_user_home": true,
      "writes_system": true,
      "can_delete": false
    },
    "network": {
      "egress": "any",
      "ingress": false
    },
    "credentials": {
      "reads_env_vars": false,
      "reads_credential_files": false,
      "reads_browser_data": false,
      "reads_keychain": false
    },
    "services": [],
    "actions": {
      "can_send_messages": false,
      "can_post_public": false,
      "can_purchase": false,
      "can_transfer_money": false,
      "can_deploy": false,
      "can_delete_external": false
    },
    "prompt_injection_surfaces": [
      "web",
      "documents"
    ],
    "content_types": [
      "general"
    ]
  },
  "hazards": {
    "hdac": {
      "H": 4,
      "D": 2,
      "A": 1,
      "C": 1
    },
    "flags": [
      "EXEC",
      "FS_READ_WORKSPACE",
      "FS_WRITE_WORKSPACE",
      "FS_WRITE_USER",
      "FS_WRITE_SYSTEM",
      "NET_EGRESS_ANY",
      "PI_WEB",
      "PI_DOCUMENTS"
    ],
    "custom_flags": [
      {
        "code": "PROMPT_INJECTION",
        "name": "Prompt Injection Risk",
        "description": "Contains prompt injection patterns in: shield.py"
      },
      {
        "code": "TOOL_ABUSE",
        "name": "Unauthorized Tool Use",
        "description": "MCP_SYS_CRITICAL_ACCESS, INSTRUCTED_BINARY_INSTALL: Access to critical system directories"
      },
      {
        "code": "SOCIAL_ENGINEERING",
        "name": "Social Engineering Risk",
        "description": "SOCIAL_ENG_VAGUE_DESCRIPTION: Skill description is too vague or missing"
      },
      {
        "code": "COMMAND_INJECTION",
        "name": "Command Injection Risk",
        "description": "COMMAND_INJECTION_USER_INPUT: User input used in command substitution - potential injection risk"
      }
    ],
    "confidence": {
      "level": "medium",
      "basis": [
        "static_analysis"
      ],
      "notes": "Detected 4 security patterns (6 vendored rule hits). Review recommended."
    },
    "rationale": {
      "H": "H4: Critical: Privilege escalation or malware detected",
      "D": "D2: Network + file access",
      "A": "A1: Local side effects only",
      "C": "C1: General content"
    }
  },
  "containment": {
    "level": "maximum",
    "required": [
      {
        "control": "SANDBOX_CONTAINER",
        "reason": "Code execution capability"
      }
    ],
    "recommended": [
      {
        "control": "LOG_ACTIONS",
        "reason": "Audit trail for all actions"
      }
    ],
    "uncontained_risk": "Risk level depends on manual review of actual capabilities."
  },
  "risks": {
    "risks": [
      {
        "risk": "Prompt injection patterns detected in: shield.py",
        "severity": "high",
        "mitigation": "Review SKILL.md for hidden instructions. Do not use with untrusted input."
      },
      {
        "risk": "Unauthorized tool use: MCP_SYS_CRITICAL_ACCESS, INSTRUCTED_BINARY_INSTALL",
        "severity": "high",
        "mitigation": "Avoid accessing system directories unless absolutely necessary."
      },
      {
        "risk": "Command injection risk: COMMAND_INJECTION_USER_INPUT",
        "severity": "medium",
        "mitigation": "Validate and sanitize all user inputs before using in commands"
      },
      {
        "risk": "Social engineering indicators: SOCIAL_ENG_VAGUE_DESCRIPTION",
        "severity": "low",
        "mitigation": "Provide clear, detailed description of skill functionality"
      }
    ],
    "limitations": [
      "Static analysis only - runtime behavior not verified"
    ]
  },
  "incident_response": {
    "kill_switch": [
      "Stop the agent process"
    ],
    "containment": [
      "Review logs for unexpected actions"
    ],
    "recovery": [
      "Depends on skill capabilities"
    ]
  },
  "evidence": [
    {
      "evidence_id": "EV:file-1",
      "type": "file_excerpt",
      "title": "prompt-shield-hook.sh",
      "file_path": "prompt-shield-hook.sh"
    },
    {
      "evidence_id": "EV:file-2",
      "type": "file_excerpt",
      "title": "shield.py",
      "file_path": "shield.py"
    },
    {
      "evidence_id": "EV:file-3",
      "type": "file_excerpt",
      "title": "SKILL.md",
      "file_path": "SKILL.md"
    },
    {
      "evidence_id": "EV:file-4",
      "type": "file_excerpt",
      "title": "_meta.json",
      "file_path": "_meta.json"
    },
    {
      "evidence_id": "EV:cisco-1",
      "type": "file_excerpt",
      "title": "COMMAND_INJECTION_USER_INPUT [MEDIUM] prompt-shield-hook.sh:9: SHIELD_PATH=\"$(dirname \"$(readlink -f \"$0\")\")/shield.py\"",
      "file_path": "prompt-shield-hook.sh"
    },
    {
      "evidence_id": "EV:cisco-2",
      "type": "file_excerpt",
      "title": "MCP_SYS_CRITICAL_ACCESS [HIGH] prompt-shield-hook.sh:1: #!/bin/bash",
      "file_path": "prompt-shield-hook.sh"
    },
    {
      "evidence_id": "EV:cisco-3",
      "type": "file_excerpt",
      "title": "MCP_SCRIPT_ANSI_DECEPTION [HIGH] shield.py:725: \"CLEAN\": \"\\033[92m\",   # GrΓΌn",
      "file_path": "shield.py"
    },
    {
      "evidence_id": "EV:cisco-4",
      "type": "file_excerpt",
      "title": "TIRITH_ANSI_ESCAPE_IN_STRING [HIGH] shield.py:725: \"CLEAN\": \"\\033[92m\",   # GrΓΌn",
      "file_path": "shield.py"
    },
    {
      "evidence_id": "EV:cisco-5",
      "type": "file_excerpt",
      "title": "INSTRUCTED_BINARY_INSTALL [HIGH] SKILL.md:19: **Dependencies:** PyYAML (`pip install pyyaml`)",
      "file_path": "SKILL.md"
    },
    {
      "evidence_id": "EV:cisco-6",
      "type": "file_excerpt",
      "title": "SOCIAL_ENG_VAGUE_DESCRIPTION [LOW] SKILL.md:149: ",
      "file_path": "SKILL.md"
    }
  ]
}