input-guard
ยท v1.0.1
Scan untrusted external text (web pages, tweets, search results, API responses) for prompt injection attacks. Returns severity levels and alerts on dangerous content. Use BEFORE processing any text from untrusted sources.
โ ๏ธ Hazard Flags
๐ Capabilities
Execution
- โ Shell execution
- โ Code execution
- โ Install dependencies
- โ Persistence
- Privilege: admin
Filesystem
- โ Read workspace
- โ Write workspace
- โ Read home
- โ Write home
- โ Read system
- โ Delete
Network
- Egress: any
- โ Ingress
Credentials
- โ Environment vars
- โ Credential files
- โ Browser data
- โ Keychain
Actions
๐ Containment
Level: maximum
- SANDBOX_CONTAINER: Code execution capability
- LOG_ACTIONS: Audit trail for all actions
โก Risks
Mitigation: Review SKILL.md for hidden instructions. Do not use with untrusted input.
Mitigation: Use user-level installs without sudo
Mitigation: Remove embedded script tags and encoded payloads.
Mitigation: Provide clear, detailed description of skill functionality
Mitigation: Remove references to sensitive data collection.
Mitigation: Ensure network access is necessary and documented
Want a deeper analysis?
This report was generated by static analysis. Get an LLM-powered deep review with behavioral reasoning and attack surface mapping.
๐ง Deep Analysis โ $5.00๐จ Incident Response
Kill switch: Stop the agent process
Containment: Review logs for unexpected actions
Recovery: Depends on skill capabilities
๐ Raw SSDS JSON click to expand
{
"meta": {
"document_id": "ssds:auto:input-guard:1.0.1",
"ssds_version": "0.2.0",
"scanner_version": "0.4.0+fe6fd9123d50",
"created_at": "2026-03-05T02:07:17.885Z",
"created_by": {
"agent": "safeagentskills-cli/generate-ssds"
},
"language": "en",
"notes": "Auto-generated SSDS. Manual review recommended."
},
"skill": {
"name": "input-guard",
"version": "1.0.1",
"format": "agent_skill",
"description": "Scan untrusted external text (web pages, tweets, search results, API responses) for prompt injection attacks. Returns severity levels and alerts on dangerous content. Use BEFORE processing any text from untrusted sources.",
"publisher": "ClawHub",
"source": {
"channel": "local"
},
"artifact": {
"sha256": "41762e278f93aae99910e0e88f0464a5eb3401dd5e37372bc5d6ccd388b16689",
"hash_method": "files_sorted"
}
},
"capabilities": {
"execution": {
"can_exec_shell": true,
"can_exec_code": false,
"privilege_level": "admin",
"can_install_deps": false,
"can_persist": false
},
"filesystem": {
"reads_workspace": true,
"reads_user_home": true,
"reads_system": false,
"writes_workspace": true,
"writes_user_home": true,
"writes_system": false,
"can_delete": true
},
"network": {
"egress": "any",
"ingress": false
},
"credentials": {
"reads_env_vars": true,
"reads_credential_files": true,
"reads_browser_data": false,
"reads_keychain": false
},
"services": [],
"actions": {
"can_send_messages": false,
"can_post_public": false,
"can_purchase": false,
"can_transfer_money": false,
"can_deploy": false,
"can_delete_external": false
},
"prompt_injection_surfaces": [
"web",
"documents"
],
"content_types": [
"general"
]
},
"hazards": {
"hdac": {
"H": 4,
"D": 4,
"A": 1,
"C": 1
},
"flags": [
"EXEC",
"PRIVILEGED",
"FS_READ_WORKSPACE",
"FS_READ_USER",
"FS_WRITE_WORKSPACE",
"FS_WRITE_USER",
"FS_DELETE",
"NET_EGRESS_ANY",
"CREDS_ENV",
"CREDS_FILES",
"PI_WEB",
"PI_DOCUMENTS"
],
"custom_flags": [
{
"code": "PROMPT_INJECTION",
"name": "Prompt Injection Risk",
"description": "Contains prompt injection patterns in: SKILL.md"
},
{
"code": "PRIVILEGE_ESCALATION",
"name": "Privilege Escalation",
"description": "Uses elevated privileges (sudo/root) in: README.md, scripts/scan.py"
},
{
"code": "FILE_DELETE",
"name": "File Deletion",
"description": "Can delete files in: README.md, scripts/get_taxonomy.py, SKILL.md"
},
{
"code": "TOOL_ABUSE",
"name": "Unauthorized Tool Use",
"description": "TOOL_ABUSE_SYSTEM_PACKAGE_INSTALL, INSTRUCTED_BINARY_INSTALL, MCP_SYS_FILE_DESTRUCTION, MCP_SYS_CRITICAL_ACCESS: Attempting to install system packages with elevated privileges"
},
{
"code": "SOCIAL_ENGINEERING",
"name": "Social Engineering Risk",
"description": "SOCIAL_ENG_VAGUE_DESCRIPTION, SOCIAL_ENG_ANTHROPIC_IMPERSONATION: Skill description is too vague or missing"
},
{
"code": "COMMAND_INJECTION",
"name": "Command Injection Risk",
"description": "MCP_SCRIPT_TAGS: Script tags, VBScript, or encoded script data URIs"
},
{
"code": "TOOL_POISONING",
"name": "Tool Poisoning",
"description": "Hidden secondary behavior detected: MCP_TOOL_POISONING_SENSITIVE_DATA"
},
{
"code": "DATA_EXFILTRATION",
"name": "Data Exfiltration Risk",
"description": "DATA_EXFIL_NETWORK_REQUESTS, DATA_EXFIL_ENV_VARS, DATA_EXFIL_HTTP_POST: HTTP client library imports that enable external communication"
}
],
"confidence": {
"level": "medium",
"basis": [
"static_analysis"
],
"notes": "Detected 8 security patterns (17 vendored rule hits). Review recommended."
},
"rationale": {
"H": "H4: Critical: Privilege escalation or malware detected",
"D": "D4: Critical: Credential theft or data exfiltration",
"A": "A1: Local side effects only",
"C": "C1: General content"
}
},
"containment": {
"level": "maximum",
"required": [
{
"control": "SANDBOX_CONTAINER",
"reason": "Code execution capability"
}
],
"recommended": [
{
"control": "LOG_ACTIONS",
"reason": "Audit trail for all actions"
}
],
"uncontained_risk": "Risk level depends on manual review of actual capabilities."
},
"risks": {
"risks": [
{
"risk": "Prompt injection patterns detected in: SKILL.md",
"severity": "high",
"mitigation": "Review SKILL.md for hidden instructions. Do not use with untrusted input."
},
{
"risk": "Unauthorized tool use: TOOL_ABUSE_SYSTEM_PACKAGE_INSTALL, INSTRUCTED_BINARY_INSTALL, MCP_SYS_FILE_DESTRUCTION, MCP_SYS_CRITICAL_ACCESS",
"severity": "critical",
"mitigation": "Use user-level installs without sudo"
},
{
"risk": "Command injection risk: MCP_SCRIPT_TAGS",
"severity": "high",
"mitigation": "Remove embedded script tags and encoded payloads."
},
{
"risk": "Social engineering indicators: SOCIAL_ENG_VAGUE_DESCRIPTION, SOCIAL_ENG_ANTHROPIC_IMPERSONATION",
"severity": "low",
"mitigation": "Provide clear, detailed description of skill functionality"
},
{
"risk": "Tool poisoning: hidden behaviors detected (MCP_TOOL_POISONING_SENSITIVE_DATA)",
"severity": "high",
"mitigation": "Remove references to sensitive data collection."
},
{
"risk": "Data exfiltration patterns: DATA_EXFIL_NETWORK_REQUESTS, DATA_EXFIL_ENV_VARS, DATA_EXFIL_HTTP_POST",
"severity": "critical",
"mitigation": "Ensure network access is necessary and documented"
}
],
"limitations": [
"Static analysis only - runtime behavior not verified"
]
},
"incident_response": {
"kill_switch": [
"Stop the agent process"
],
"containment": [
"Review logs for unexpected actions"
],
"recovery": [
"Depends on skill capabilities"
]
},
"evidence": [
{
"evidence_id": "EV:file-1",
"type": "file_excerpt",
"title": "evals/run.py",
"file_path": "evals/run.py"
},
{
"evidence_id": "EV:file-2",
"type": "file_excerpt",
"title": "_meta.json",
"file_path": "_meta.json"
},
{
"evidence_id": "EV:file-3",
"type": "file_excerpt",
"title": "README.md",
"file_path": "README.md"
},
{
"evidence_id": "EV:file-4",
"type": "file_excerpt",
"title": "scripts/get_taxonomy.py",
"file_path": "scripts/get_taxonomy.py"
},
{
"evidence_id": "EV:file-5",
"type": "file_excerpt",
"title": "scripts/llm_scanner.py",
"file_path": "scripts/llm_scanner.py"
},
{
"evidence_id": "EV:file-6",
"type": "file_excerpt",
"title": "scripts/report-to-molthreats.sh",
"file_path": "scripts/report-to-molthreats.sh"
},
{
"evidence_id": "EV:file-7",
"type": "file_excerpt",
"title": "scripts/scan.py",
"file_path": "scripts/scan.py"
},
{
"evidence_id": "EV:file-8",
"type": "file_excerpt",
"title": "scripts/scan.sh",
"file_path": "scripts/scan.sh"
},
{
"evidence_id": "EV:file-9",
"type": "file_excerpt",
"title": "SKILL.md",
"file_path": "SKILL.md"
},
{
"evidence_id": "EV:cisco-1",
"type": "file_excerpt",
"title": "TOOL_ABUSE_SYSTEM_PACKAGE_INSTALL [MEDIUM] README.md:25: sudo apt-get install python3-pip # Debian/Ubuntu",
"file_path": "README.md"
},
{
"evidence_id": "EV:cisco-2",
"type": "file_excerpt",
"title": "INSTRUCTED_BINARY_INSTALL [HIGH] README.md:26: brew install python3 # macOS (includes pip)",
"file_path": "README.md"
},
{
"evidence_id": "EV:cisco-3",
"type": "file_excerpt",
"title": "MCP_SYS_FILE_DESTRUCTION [CRITICAL] README.md:241: rm -rf skills/input-guard",
"file_path": "README.md"
},
{
"evidence_id": "EV:cisco-4",
"type": "file_excerpt",
"title": "DATA_EXFIL_NETWORK_REQUESTS [MEDIUM] scripts/get_taxonomy.py:15: import requests",
"file_path": "scripts/get_taxonomy.py"
},
{
"evidence_id": "EV:cisco-5",
"type": "file_excerpt",
"title": "DATA_EXFIL_ENV_VARS [MEDIUM] scripts/get_taxonomy.py:28: return os.environ.get(\"PROMPTINTEL_API_KEY\")",
"file_path": "scripts/get_taxonomy.py"
},
{
"evidence_id": "EV:cisco-6",
"type": "file_excerpt",
"title": "DATA_EXFIL_NETWORK_REQUESTS [MEDIUM] scripts/llm_scanner.py:25: import requests",
"file_path": "scripts/llm_scanner.py"
},
{
"evidence_id": "EV:cisco-7",
"type": "file_excerpt",
"title": "DATA_EXFIL_HTTP_POST [CRITICAL] scripts/llm_scanner.py:179: response = requests.post(",
"file_path": "scripts/llm_scanner.py"
},
{
"evidence_id": "EV:cisco-8",
"type": "file_excerpt",
"title": "DATA_EXFIL_ENV_VARS [MEDIUM] scripts/llm_scanner.py:144: openai_key = os.environ.get(\"OPENAI_API_KEY\")",
"file_path": "scripts/llm_scanner.py"
},
{
"evidence_id": "EV:cisco-9",
"type": "file_excerpt",
"title": "MCP_SYS_CRITICAL_ACCESS [HIGH] scripts/report-to-molthreats.sh:1: #!/bin/bash",
"file_path": "scripts/report-to-molthreats.sh"
},
{
"evidence_id": "EV:cisco-10",
"type": "file_excerpt",
"title": "MCP_SCRIPT_TAGS [HIGH] scripts/scan.py:204: (r\"<script[^>]*>\", \"xss_attempt\", Severity.HIGH),",
"file_path": "scripts/scan.py"
}
]
}