๐Ÿ›ก๏ธ SafeAgentSkills

Safe Memory Manager

View on ClawHub โ†— ยท v1.0.7

โฌ‡ 167 downloads

Medium Risk

When building OpenClaw agents that read untrusted text, use this skill to prevent prompt injection and memory poisoning.

H:3 D:2 A:0 C:1

โš ๏ธ Hazard Flags

EXEC FS_READ_WORKSPACE FS_DELETE NET_EGRESS_ANY PI_WEB PI_DOCUMENTS

๐Ÿ“‹ Capabilities

Execution

  • โœ… Shell execution
  • โŒ Code execution
  • โŒ Install dependencies
  • โŒ Persistence
  • Privilege: user

Filesystem

  • โœ… Read workspace
  • โŒ Write workspace
  • โŒ Read home
  • โŒ Write home
  • โŒ Read system
  • โœ… Delete

Network

  • Egress: any
  • โŒ Ingress

Credentials

  • โŒ Environment vars
  • โŒ Credential files
  • โŒ Browser data
  • โŒ Keychain

Actions

โŒ send messagesโŒ post publicโŒ purchaseโŒ transfer moneyโŒ deployโŒ delete external

๐Ÿ”’ Containment

Level: elevated

Required:
  • SANDBOX_CONTAINER: Code execution capability
Recommended:
  • LOG_ACTIONS: Audit trail for all actions

โšก Risks

Prompt injection patterns detected in: SKILL.md, safe_memory.py high

Mitigation: Review SKILL.md for hidden instructions. Do not use with untrusted input.

Unauthorized tool use: MCP_SYS_FILE_DESTRUCTION critical

Mitigation: Remove destructive file operations.

Social engineering indicators: SOCIAL_ENG_VAGUE_DESCRIPTION low

Mitigation: Provide clear, detailed description of skill functionality

Want a deeper analysis?

This report was generated by static analysis. Get an LLM-powered deep review with behavioral reasoning and attack surface mapping.

๐Ÿง  Deep Analysis โ€” $5.00

๐Ÿšจ Incident Response

Kill switch: Stop the agent process

Containment: Review logs for unexpected actions

Recovery: Depends on skill capabilities

๐Ÿ“„ Raw SSDS JSON click to expand
{
  "meta": {
    "document_id": "ssds:auto:safe-memory-manager:1.0.7",
    "ssds_version": "0.2.0",
    "scanner_version": "0.4.0+fe6fd9123d50",
    "created_at": "2026-03-28T09:35:51.657Z",
    "created_by": {
      "agent": "safeagentskills-cli/generate-ssds"
    },
    "language": "en",
    "notes": "Auto-generated SSDS. Manual review recommended."
  },
  "skill": {
    "name": "Safe Memory Manager",
    "version": "1.0.7",
    "format": "agent_skill",
    "description": "When building OpenClaw agents that read untrusted text, use this skill to prevent prompt injection and memory poisoning.",
    "publisher": "ClawHub",
    "source": {
      "channel": "clawhub",
      "slug": "safe-memory-manager",
      "owner": "horn111",
      "downloads": 167,
      "stars": 0
    },
    "artifact": {
      "sha256": "775b44e7a81efb70c73a0f1655d8840603273d8dfa66b481facd2317f4656c24",
      "hash_method": "files_sorted"
    }
  },
  "capabilities": {
    "execution": {
      "can_exec_shell": true,
      "can_exec_code": false,
      "privilege_level": "user",
      "can_install_deps": false,
      "can_persist": false
    },
    "filesystem": {
      "reads_workspace": true,
      "reads_user_home": false,
      "reads_system": false,
      "writes_workspace": false,
      "writes_user_home": false,
      "writes_system": false,
      "can_delete": true
    },
    "network": {
      "egress": "any",
      "ingress": false
    },
    "credentials": {
      "reads_env_vars": false,
      "reads_credential_files": false,
      "reads_browser_data": false,
      "reads_keychain": false
    },
    "services": [],
    "actions": {
      "can_send_messages": false,
      "can_post_public": false,
      "can_purchase": false,
      "can_transfer_money": false,
      "can_deploy": false,
      "can_delete_external": false
    },
    "prompt_injection_surfaces": [
      "web",
      "documents"
    ],
    "content_types": [
      "general"
    ]
  },
  "hazards": {
    "hdac": {
      "H": 3,
      "D": 2,
      "A": 0,
      "C": 1
    },
    "flags": [
      "EXEC",
      "FS_READ_WORKSPACE",
      "FS_DELETE",
      "NET_EGRESS_ANY",
      "PI_WEB",
      "PI_DOCUMENTS"
    ],
    "custom_flags": [
      {
        "code": "PROMPT_INJECTION",
        "name": "Prompt Injection Risk",
        "description": "Contains prompt injection patterns in: SKILL.md, safe_memory.py"
      },
      {
        "code": "FILE_DELETE",
        "name": "File Deletion",
        "description": "Can delete files in: safe_memory.py"
      },
      {
        "code": "TOOL_ABUSE",
        "name": "Unauthorized Tool Use",
        "description": "MCP_SYS_FILE_DESTRUCTION: File destruction or wiping commands"
      },
      {
        "code": "SOCIAL_ENGINEERING",
        "name": "Social Engineering Risk",
        "description": "SOCIAL_ENG_VAGUE_DESCRIPTION: Skill description is too vague or missing"
      }
    ],
    "confidence": {
      "level": "medium",
      "basis": [
        "static_analysis"
      ],
      "notes": "Detected 4 security patterns (3 vendored rule hits). Review recommended."
    },
    "rationale": {
      "H": "H3: Shell/code execution or persistence detected",
      "D": "D2: Network + file access",
      "A": "A0: No side effects detected",
      "C": "C1: General content"
    }
  },
  "containment": {
    "level": "elevated",
    "required": [
      {
        "control": "SANDBOX_CONTAINER",
        "reason": "Code execution capability"
      }
    ],
    "recommended": [
      {
        "control": "LOG_ACTIONS",
        "reason": "Audit trail for all actions"
      }
    ],
    "uncontained_risk": "Risk level depends on manual review of actual capabilities."
  },
  "risks": {
    "risks": [
      {
        "risk": "Prompt injection patterns detected in: SKILL.md, safe_memory.py",
        "severity": "high",
        "mitigation": "Review SKILL.md for hidden instructions. Do not use with untrusted input."
      },
      {
        "risk": "Unauthorized tool use: MCP_SYS_FILE_DESTRUCTION",
        "severity": "critical",
        "mitigation": "Remove destructive file operations."
      },
      {
        "risk": "Social engineering indicators: SOCIAL_ENG_VAGUE_DESCRIPTION",
        "severity": "low",
        "mitigation": "Provide clear, detailed description of skill functionality"
      }
    ],
    "limitations": [
      "Static analysis only - runtime behavior not verified"
    ]
  },
  "incident_response": {
    "kill_switch": [
      "Stop the agent process"
    ],
    "containment": [
      "Review logs for unexpected actions"
    ],
    "recovery": [
      "Depends on skill capabilities"
    ]
  },
  "evidence": [
    {
      "evidence_id": "EV:file-1",
      "type": "file_excerpt",
      "title": "_meta.json",
      "file_path": "_meta.json"
    },
    {
      "evidence_id": "EV:file-2",
      "type": "file_excerpt",
      "title": "SKILL.md",
      "file_path": "SKILL.md"
    },
    {
      "evidence_id": "EV:file-3",
      "type": "file_excerpt",
      "title": "safe_memory.py",
      "file_path": "safe_memory.py"
    },
    {
      "evidence_id": "EV:file-4",
      "type": "file_excerpt",
      "title": "package.json",
      "file_path": "package.json"
    },
    {
      "evidence_id": "EV:cisco-1",
      "type": "file_excerpt",
      "title": "PROMPT_INJECTION_IGNORE_INSTRUCTIONS [HIGH] SKILL.md:18: 2. **Input Sanitization:** Automatically detects and neutralizes common prompt i",
      "file_path": "SKILL.md"
    },
    {
      "evidence_id": "EV:cisco-2",
      "type": "file_excerpt",
      "title": "SOCIAL_ENG_VAGUE_DESCRIPTION [LOW] SKILL.md:1: ---",
      "file_path": "SKILL.md"
    },
    {
      "evidence_id": "EV:cisco-3",
      "type": "file_excerpt",
      "title": "MCP_SYS_FILE_DESTRUCTION [CRITICAL] safe_memory.py:27: re.compile(r\"```(bash|sh|python)\\n[\\s\\S]*?(rm -rf|wget|curl)[\\s\\S]*?```\"),",
      "file_path": "safe_memory.py"
    }
  ]
}