Compare commits
37 Commits
37b154bc94
...
mathieu/ma
| Author | SHA1 | Date | |
|---|---|---|---|
| 473f1620d5 | |||
| 58626c64e5 | |||
| f05b4a6b4c | |||
| 0e3dc23639 | |||
| 20ee23c1c3 | |||
| 796c22f736 | |||
| 562f9bb65e | |||
| 9d5d852860 | |||
| e05b54ec1b | |||
| cb474b2d99 | |||
| cfe29d2c0b | |||
| 4ea4592d75 | |||
| 62bf37d481 | |||
| ed6517cc24 | |||
| 2061dfe63b | |||
| 5aeb9c86c0 | |||
| 626eaa1895 | |||
| 18c18ec3a8 | |||
| f4c9422f77 | |||
| c19370f8b3 | |||
| b55b3364af | |||
| 70d0685c97 | |||
| a470f86ee4 | |||
| 7e85083c38 | |||
| 267c82f4bd | |||
| 3dd772d35a | |||
| 631db40665 | |||
| 5bb42db57a | |||
| dc5487c965 | |||
| da3a2ac3a4 | |||
| 481616455a | |||
| 9cb65151ee | |||
|
|
da5f61e390 | ||
|
|
b8b12ebe31 | ||
|
|
9db1ae8b54 | ||
|
|
7cc9fb3427 | ||
|
|
8630789c39 |
101
.gitea/workflows/benchmark.yml
Normal file
101
.gitea/workflows/benchmark.yml
Normal file
@@ -0,0 +1,101 @@
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
min_runs:
|
||||
description: "Minimum benchmark runs"
|
||||
required: false
|
||||
default: "30"
|
||||
quick:
|
||||
description: "Quick mode (fewer runs)"
|
||||
required: false
|
||||
default: "false"
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
benchmark-linux:
|
||||
name: Benchmark (Linux)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
bubblewrap \
|
||||
socat \
|
||||
uidmap \
|
||||
curl \
|
||||
netcat-openbsd \
|
||||
ripgrep \
|
||||
hyperfine \
|
||||
jq \
|
||||
bc
|
||||
# Configure subuid/subgid
|
||||
echo "$(whoami):100000:65536" | sudo tee -a /etc/subuid
|
||||
echo "$(whoami):100000:65536" | sudo tee -a /etc/subgid
|
||||
sudo chmod u+s $(which bwrap)
|
||||
|
||||
- name: Install benchstat
|
||||
run: go install golang.org/x/perf/cmd/benchstat@latest
|
||||
|
||||
- name: Build greywall
|
||||
run: make build-ci
|
||||
|
||||
- name: Run Go microbenchmarks
|
||||
run: |
|
||||
mkdir -p benchmarks
|
||||
go test -run=^$ -bench=. -benchmem -count=10 ./internal/sandbox/... | tee benchmarks/go-bench-linux.txt
|
||||
|
||||
- name: Run CLI benchmarks
|
||||
run: |
|
||||
MIN_RUNS="${{ github.event.inputs.min_runs || '30' }}"
|
||||
QUICK="${{ github.event.inputs.quick || 'false' }}"
|
||||
|
||||
if [[ "$QUICK" == "true" ]]; then
|
||||
./scripts/benchmark.sh -q -o benchmarks
|
||||
else
|
||||
./scripts/benchmark.sh -n "$MIN_RUNS" -o benchmarks
|
||||
fi
|
||||
|
||||
- name: Upload benchmark results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: benchmark-results-linux
|
||||
path: benchmarks/
|
||||
retention-days: 30
|
||||
|
||||
- name: Display results
|
||||
run: |
|
||||
echo "=== Linux Benchmark Results ==="
|
||||
echo ""
|
||||
|
||||
for f in benchmarks/*.md; do
|
||||
[[ -f "$f" ]] && cat "$f"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "=== Go Microbenchmarks ==="
|
||||
grep -E '^Benchmark|^ok|^PASS' benchmarks/go-bench-linux.txt | head -50 || true
|
||||
115
.gitea/workflows/main.yml
Normal file
115
.gitea/workflows/main.yml
Normal file
@@ -0,0 +1,115 @@
|
||||
name: Build and test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
|
||||
- name: Build
|
||||
run: make build-ci
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
install-mode: goinstall
|
||||
version: v1.64.8
|
||||
|
||||
test-linux:
|
||||
name: Test (Linux)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
|
||||
- name: Install Linux sandbox dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
bubblewrap \
|
||||
socat \
|
||||
uidmap \
|
||||
curl \
|
||||
netcat-openbsd \
|
||||
ripgrep
|
||||
# Configure subuid/subgid for the runner user (required for unprivileged user namespaces)
|
||||
echo "$(whoami):100000:65536" | sudo tee -a /etc/subuid
|
||||
echo "$(whoami):100000:65536" | sudo tee -a /etc/subgid
|
||||
# Make bwrap setuid so it can create namespaces as non-root user
|
||||
sudo chmod u+s $(which bwrap)
|
||||
|
||||
- name: Verify sandbox dependencies
|
||||
run: |
|
||||
echo "=== Checking sandbox dependencies ==="
|
||||
bwrap --version
|
||||
socat -V | head -1
|
||||
echo "User namespaces enabled: $(cat /proc/sys/kernel/unprivileged_userns_clone 2>/dev/null || echo 'check not available')"
|
||||
echo "Kernel version: $(uname -r)"
|
||||
echo "uidmap installed: $(which newuidmap 2>/dev/null && echo yes || echo no)"
|
||||
echo "subuid configured: $(grep $(whoami) /etc/subuid 2>/dev/null || echo 'not configured')"
|
||||
echo "bwrap setuid: $(ls -la $(which bwrap) | grep -q '^-rws' && echo yes || echo no)"
|
||||
echo "=== Testing bwrap basic functionality ==="
|
||||
bwrap --ro-bind / / -- /bin/echo "bwrap works!"
|
||||
echo "=== Testing bwrap with user namespace ==="
|
||||
bwrap --ro-bind / / --unshare-user --uid 0 --gid 0 -- /bin/echo "bwrap user namespace works!"
|
||||
|
||||
- name: Run unit and integration tests
|
||||
run: make test-ci
|
||||
|
||||
- name: Build binary for smoke tests
|
||||
run: make build-ci
|
||||
|
||||
- name: Run smoke tests
|
||||
run: GREYWALL_TEST_NETWORK=1 ./scripts/smoke_test.sh ./greywall
|
||||
62
.gitea/workflows/release.yml
Normal file
62
.gitea/workflows/release.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
run-name: "Release ${{ github.ref_name }}"
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: "~> v2"
|
||||
args: release --clean
|
||||
env:
|
||||
GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
|
||||
GORELEASER_FORCE_TOKEN: gitea
|
||||
|
||||
publish-version:
|
||||
needs: [goreleaser]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout gh-pages
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: gh-pages
|
||||
|
||||
- name: Update latest version
|
||||
run: |
|
||||
echo "${{ github.ref_name }}" > latest.txt
|
||||
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${{ github.ref_name }}",
|
||||
"published_at": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"url": "https://gitea.app.monadical.io/monadical/greywall/releases/tag/${{ github.ref_name }}"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Commit and push to gh-pages
|
||||
run: |
|
||||
git config user.name "gitea-actions[bot]"
|
||||
git config user.email "gitea-actions[bot]@noreply.gitea.app.monadical.io"
|
||||
git add latest.txt latest.json
|
||||
git commit -m "Update latest version to ${{ github.ref_name }}" || echo "No changes to commit"
|
||||
git push origin gh-pages
|
||||
6
.github/workflows/benchmark.yml
vendored
6
.github/workflows/benchmark.yml
vendored
@@ -19,7 +19,7 @@ on:
|
||||
# paths:
|
||||
# - "internal/sandbox/**"
|
||||
# - "internal/proxy/**"
|
||||
# - "cmd/fence/**"
|
||||
# - "cmd/greywall/**"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
- name: Install benchstat
|
||||
run: go install golang.org/x/perf/cmd/benchstat@latest
|
||||
|
||||
- name: Build fence
|
||||
- name: Build greywall
|
||||
run: make build-ci
|
||||
|
||||
- name: Run Go microbenchmarks
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
- name: Install benchstat
|
||||
run: go install golang.org/x/perf/cmd/benchstat@latest
|
||||
|
||||
- name: Build fence
|
||||
- name: Build greywall
|
||||
run: make build-ci
|
||||
|
||||
- name: Run Go microbenchmarks
|
||||
|
||||
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -115,7 +115,7 @@ jobs:
|
||||
run: make build-ci
|
||||
|
||||
- name: Run smoke tests
|
||||
run: FENCE_TEST_NETWORK=1 ./scripts/smoke_test.sh ./fence
|
||||
run: GREYWALL_TEST_NETWORK=1 ./scripts/smoke_test.sh ./greywall
|
||||
|
||||
test-macos:
|
||||
name: Test (macOS)
|
||||
@@ -160,4 +160,4 @@ jobs:
|
||||
run: make build-ci
|
||||
|
||||
- name: Run smoke tests
|
||||
run: FENCE_TEST_NETWORK=1 ./scripts/smoke_test.sh ./fence
|
||||
run: GREYWALL_TEST_NETWORK=1 ./scripts/smoke_test.sh ./greywall
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -75,7 +75,7 @@ jobs:
|
||||
{
|
||||
"version": "${{ github.ref_name }}",
|
||||
"published_at": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"url": "https://github.com/Use-Tusk/fence/releases/tag/${{ github.ref_name }}"
|
||||
"url": "https://gitea.app.monadical.io/monadical/greywall/releases/tag/${{ github.ref_name }}"
|
||||
}
|
||||
EOF
|
||||
|
||||
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -1,7 +1,7 @@
|
||||
# Binary (only at root, not cmd/fence or pkg/fence)
|
||||
/fence
|
||||
/fence_*
|
||||
/fence-*
|
||||
# Binary (only at root, not cmd/greywall or pkg/greywall)
|
||||
/greywall
|
||||
/greywall_*
|
||||
/greywall-*
|
||||
|
||||
# Tar archives
|
||||
*.tar.gz
|
||||
@@ -29,3 +29,9 @@ coverage.out
|
||||
cpu.out
|
||||
mem.out
|
||||
|
||||
# Embedded binaries (downloaded at build time)
|
||||
internal/sandbox/bin/tun2socks-*
|
||||
|
||||
# tun2socks source/build directory
|
||||
/tun2socks/
|
||||
|
||||
|
||||
@@ -1,40 +1,49 @@
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 5m
|
||||
modules-download-mode: readonly
|
||||
|
||||
linters-settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/Use-Tusk/fence)
|
||||
gofmt:
|
||||
simplify: true
|
||||
goimports:
|
||||
local-prefixes: github.com/Use-Tusk/fence
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- singleCaseSwitch
|
||||
revive:
|
||||
rules:
|
||||
- name: exported
|
||||
disabled: true
|
||||
|
||||
linters:
|
||||
enable-all: false
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- staticcheck
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- unused
|
||||
- ineffassign
|
||||
- gosec
|
||||
- gocritic
|
||||
- revive
|
||||
- gofumpt
|
||||
- gosec
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
- revive
|
||||
- staticcheck
|
||||
- unused
|
||||
settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- singleCaseSwitch
|
||||
revive:
|
||||
rules:
|
||||
- name: exported
|
||||
disabled: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(gitea.app.monadical.io/monadical/greywall)
|
||||
gofmt:
|
||||
simplify: true
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- gitea.app.monadical.io/monadical/greywall
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
version: 2
|
||||
|
||||
gitea_urls:
|
||||
api: https://gitea.app.monadical.io/api/v1
|
||||
download: https://gitea.app.monadical.io
|
||||
skip_tls_verify: false
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
@@ -18,8 +23,8 @@ builds:
|
||||
- -X main.version={{.Version}}
|
||||
- -X main.buildTime={{.Date}}
|
||||
- -X main.gitCommit={{.Commit}}
|
||||
binary: fence
|
||||
main: ./cmd/fence
|
||||
binary: greywall
|
||||
main: ./cmd/greywall
|
||||
|
||||
archives:
|
||||
- formats: [tar.gz]
|
||||
@@ -42,7 +47,7 @@ checksum:
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
use: github
|
||||
use: gitea
|
||||
format: "{{ .SHA }}: {{ .Message }}{{ with .AuthorUsername }} (@{{ . }}){{ end }}"
|
||||
filters:
|
||||
exclude:
|
||||
@@ -76,8 +81,8 @@ changelog:
|
||||
order: 9999
|
||||
|
||||
release:
|
||||
github:
|
||||
owner: Use-Tusk
|
||||
name: fence
|
||||
gitea:
|
||||
owner: monadical
|
||||
name: greywall
|
||||
draft: false
|
||||
prerelease: auto
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Architecture
|
||||
|
||||
Fence restricts network, filesystem, and command access for arbitrary commands. It works by:
|
||||
Greywall restricts network, filesystem, and command access for arbitrary commands. It works by:
|
||||
|
||||
1. **Blocking commands** via configurable deny/allow lists before execution
|
||||
2. **Intercepting network traffic** via HTTP/SOCKS5 proxies that filter by domain
|
||||
@@ -9,7 +9,7 @@ Fence restricts network, filesystem, and command access for arbitrary commands.
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Fence
|
||||
subgraph Greywall
|
||||
Config["Config<br/>(JSON)"]
|
||||
Manager
|
||||
CmdCheck["Command<br/>Blocking"]
|
||||
@@ -30,8 +30,8 @@ flowchart TB
|
||||
## Project Structure
|
||||
|
||||
```text
|
||||
fence/
|
||||
├── cmd/fence/ # CLI entry point
|
||||
greywall/
|
||||
├── cmd/greywall/ # CLI entry point
|
||||
│ └── main.go # Includes --landlock-apply wrapper mode
|
||||
├── internal/ # Private implementation
|
||||
│ ├── config/ # Configuration loading/validation
|
||||
@@ -52,8 +52,8 @@ fence/
|
||||
│ ├── dangerous.go # Protected file/directory lists
|
||||
│ ├── shell.go # Shell quoting utilities
|
||||
│ └── utils.go # Path normalization
|
||||
└── pkg/fence/ # Public Go API
|
||||
└── fence.go
|
||||
└── pkg/greywall/ # Public Go API
|
||||
└── greywall.go
|
||||
```
|
||||
|
||||
## Core Components
|
||||
@@ -71,7 +71,7 @@ type Config struct {
|
||||
}
|
||||
```
|
||||
|
||||
- Loads from XDG config dir (`~/.config/fence/fence.json` or `~/Library/Application Support/fence/fence.json`) or custom path
|
||||
- Loads from XDG config dir (`~/.config/greywall/greywall.json` or `~/Library/Application Support/greywall/greywall.json`) or custom path
|
||||
- Falls back to restrictive defaults (block all network, default command deny list)
|
||||
- Validates paths and normalizes them
|
||||
|
||||
@@ -181,7 +181,7 @@ flowchart TB
|
||||
SOCKS["SOCKS Proxy<br/>:random"]
|
||||
HSOCAT["socat<br/>(HTTP bridge)"]
|
||||
SSOCAT["socat<br/>(SOCKS bridge)"]
|
||||
USOCK["Unix Sockets<br/>/tmp/fence-*.sock"]
|
||||
USOCK["Unix Sockets<br/>/tmp/greywall-*.sock"]
|
||||
end
|
||||
|
||||
subgraph Sandbox ["Sandbox (bwrap --unshare-net)"]
|
||||
@@ -221,7 +221,7 @@ flowchart TB
|
||||
|
||||
subgraph Host
|
||||
HSOCAT["socat<br/>TCP-LISTEN:8888"]
|
||||
USOCK["Unix Socket<br/>/tmp/fence-rev-8888-*.sock"]
|
||||
USOCK["Unix Socket<br/>/tmp/greywall-rev-8888-*.sock"]
|
||||
end
|
||||
|
||||
subgraph Sandbox
|
||||
@@ -286,7 +286,7 @@ flowchart TD
|
||||
|
||||
### Linux Security Layers
|
||||
|
||||
On Linux, fence uses multiple security layers with graceful fallback:
|
||||
On Linux, greywall uses multiple security layers with graceful fallback:
|
||||
|
||||
1. bubblewrap (core isolation via Linux namespaces)
|
||||
2. seccomp (syscall filtering)
|
||||
@@ -306,15 +306,15 @@ The `-m` (monitor) flag enables real-time visibility into blocked operations. Th
|
||||
|
||||
| Prefix | Source | Description |
|
||||
|--------|--------|-------------|
|
||||
| `[fence:http]` | Both | HTTP/HTTPS proxy (blocked requests only in monitor mode) |
|
||||
| `[fence:socks]` | Both | SOCKS5 proxy (blocked requests only in monitor mode) |
|
||||
| `[fence:logstream]` | macOS only | Kernel-level sandbox violations from `log stream` |
|
||||
| `[fence:ebpf]` | Linux only | Filesystem/syscall failures (requires CAP_BPF or root) |
|
||||
| `[fence:filter]` | Both | Domain filter rule matches (debug mode only) |
|
||||
| `[greywall:http]` | Both | HTTP/HTTPS proxy (blocked requests only in monitor mode) |
|
||||
| `[greywall:socks]` | Both | SOCKS5 proxy (blocked requests only in monitor mode) |
|
||||
| `[greywall:logstream]` | macOS only | Kernel-level sandbox violations from `log stream` |
|
||||
| `[greywall:ebpf]` | Linux only | Filesystem/syscall failures (requires CAP_BPF or root) |
|
||||
| `[greywall:filter]` | Both | Domain filter rule matches (debug mode only) |
|
||||
|
||||
### macOS Log Stream
|
||||
|
||||
On macOS, fence spawns `log stream` with a predicate to capture sandbox violations:
|
||||
On macOS, greywall spawns `log stream` with a predicate to capture sandbox violations:
|
||||
|
||||
```bash
|
||||
log stream --predicate 'eventMessage ENDSWITH "_SBX"' --style compact
|
||||
@@ -344,4 +344,4 @@ Filtered out (too noisy):
|
||||
|
||||
## Security Model
|
||||
|
||||
See [`docs/security-model.md`](docs/security-model.md) for Fence's threat model, guarantees, and limitations.
|
||||
See [`docs/security-model.md`](docs/security-model.md) for Greywall's threat model, guarantees, and limitations.
|
||||
|
||||
79
CLAUDE.md
Normal file
79
CLAUDE.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Greywall
|
||||
|
||||
Sandboxing layer for GreyHaven that wraps commands in restrictive sandbox environments. Blocks network access by default (allowlist-based), restricts filesystem operations, and controls command execution. Supports macOS (sandbox-exec/Seatbelt) and Linux (bubblewrap + seccomp/Landlock/eBPF).
|
||||
|
||||
## Build & Run
|
||||
|
||||
```bash
|
||||
make setup # install deps + lint tools (first time)
|
||||
make build # compile binary (downloads tun2socks)
|
||||
make run # build and run
|
||||
./greywall --help # CLI usage
|
||||
```
|
||||
|
||||
## Test
|
||||
|
||||
```bash
|
||||
make test # all unit + integration tests
|
||||
make test-ci # with coverage and race detection (-race -coverprofile)
|
||||
GREYWALL_TEST_NETWORK=1 ./scripts/smoke_test.sh ./greywall # smoke tests
|
||||
```
|
||||
|
||||
## Lint & Format
|
||||
|
||||
```bash
|
||||
make fmt # format with gofumpt
|
||||
make lint # golangci-lint (staticcheck, errcheck, gosec, govet, revive, gofumpt, misspell, etc.)
|
||||
```
|
||||
|
||||
Always run `make fmt && make lint` before committing.
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
cmd/greywall/ CLI entry point
|
||||
internal/
|
||||
config/ Configuration loading & validation
|
||||
platform/ OS detection
|
||||
sandbox/ Platform-specific sandboxing (~7k lines)
|
||||
manager.go Sandbox lifecycle orchestration
|
||||
command.go Command blocking/allow lists
|
||||
linux.go bubblewrap + bridges (ProxyBridge, DnsBridge)
|
||||
macos.go sandbox-exec Seatbelt profiles
|
||||
linux_seccomp.go Seccomp BPF syscall filtering
|
||||
linux_landlock.go Landlock filesystem control
|
||||
linux_ebpf.go eBPF violation monitoring
|
||||
sanitize.go Environment variable hardening
|
||||
dangerous.go Protected files/dirs lists
|
||||
pkg/greywall/ Public Go API
|
||||
docs/ Full documentation
|
||||
scripts/ Smoke tests, benchmarks, release
|
||||
```
|
||||
|
||||
## Code Conventions
|
||||
|
||||
- **Language:** Go 1.25+
|
||||
- **Formatter:** `gofumpt` (enforced in CI)
|
||||
- **Linter:** `golangci-lint` v2 (config in `.golangci.yml`)
|
||||
- **Import order:** stdlib, third-party, local (`gitea.app.monadical.io/monadical/greywall`)
|
||||
- **Platform code:** build tags (`//go:build linux`, `//go:build darwin`) with `*_stub.go` for unsupported platforms
|
||||
- **Error handling:** custom error types (e.g., `CommandBlockedError`)
|
||||
- **Logging:** stderr with `[greywall:component]` prefixes
|
||||
- **Config:** JSON with comments (via `tidwall/jsonc`), optional pointer fields for three-state booleans
|
||||
|
||||
## Dependencies
|
||||
|
||||
4 direct deps: `doublestar` (glob matching), `cobra` (CLI), `jsonc` (config parsing), `golang.org/x/sys`.
|
||||
|
||||
Runtime (Linux): `bubblewrap`, `socat`, embedded `tun2socks` v2.5.2.
|
||||
|
||||
## CI
|
||||
|
||||
GitHub Actions workflows: `main.yml` (build/lint/test on Linux+macOS), `release.yml` (GoReleaser + SLSA provenance), `benchmark.yml`.
|
||||
|
||||
## Release
|
||||
|
||||
```bash
|
||||
make release # patch (v0.0.X)
|
||||
make release-minor # minor (v0.X.0)
|
||||
```
|
||||
@@ -1,6 +1,6 @@
|
||||
# Contributing
|
||||
|
||||
Thanks for helping improve `fence`!
|
||||
Thanks for helping improve `greywall`!
|
||||
|
||||
If you have any questions, feel free to open an issue.
|
||||
|
||||
@@ -12,11 +12,11 @@ If you have any questions, feel free to open an issue.
|
||||
- Clone and prepare:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Use-Tusk/fence
|
||||
cd fence
|
||||
git clone https://gitea.app.monadical.io/monadical/greywall
|
||||
cd greywall
|
||||
make setup # Install deps and lint tools
|
||||
make build # Build the binary
|
||||
./fence --help
|
||||
./greywall --help
|
||||
```
|
||||
|
||||
## Dev workflow
|
||||
@@ -25,7 +25,7 @@ Common targets:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `make build` | Build the binary (`./fence`) |
|
||||
| `make build` | Build the binary (`./greywall`) |
|
||||
| `make run` | Build and run |
|
||||
| `make test` | Run tests |
|
||||
| `make test-ci` | Run tests with coverage |
|
||||
@@ -63,14 +63,14 @@ make test-ci
|
||||
|
||||
```bash
|
||||
# Test blocked network request
|
||||
./fence curl https://example.com
|
||||
./greywall curl https://example.com
|
||||
|
||||
# Test with allowed domain
|
||||
echo '{"network":{"allowedDomains":["example.com"]}}' > /tmp/test.json
|
||||
./fence -s /tmp/test.json curl https://example.com
|
||||
./greywall -s /tmp/test.json curl https://example.com
|
||||
|
||||
# Test monitor mode
|
||||
./fence -m -c "touch /etc/test"
|
||||
./greywall -m -c "touch /etc/test"
|
||||
```
|
||||
|
||||
### Testing on Linux
|
||||
@@ -82,7 +82,7 @@ Requires `bubblewrap` and `socat`:
|
||||
sudo apt install bubblewrap socat
|
||||
|
||||
# Test in Colima or VM
|
||||
./fence curl https://example.com
|
||||
./greywall curl https://example.com
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
1
LICENSE
1
LICENSE
@@ -187,6 +187,7 @@
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2025 Tusk AI, Inc
|
||||
Copyright 2026 GreyHaven
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
77
Makefile
77
Makefile
@@ -3,90 +3,110 @@ GOBUILD=$(GOCMD) build
|
||||
GOCLEAN=$(GOCMD) clean
|
||||
GOTEST=$(GOCMD) test
|
||||
GOMOD=$(GOCMD) mod
|
||||
BINARY_NAME=fence
|
||||
BINARY_NAME=greywall
|
||||
BINARY_UNIX=$(BINARY_NAME)_unix
|
||||
TUN2SOCKS_VERSION=v2.5.2
|
||||
TUN2SOCKS_BIN_DIR=internal/sandbox/bin
|
||||
|
||||
.PHONY: all build build-ci build-linux test test-ci clean deps install-lint-tools setup setup-ci run fmt lint release release-minor help
|
||||
.PHONY: all build build-ci build-linux build-darwin test test-ci clean deps install-lint-tools setup setup-ci run fmt lint release release-minor download-tun2socks help
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
@echo "🔨 Building $(BINARY_NAME)..."
|
||||
$(GOBUILD) -o $(BINARY_NAME) -v ./cmd/fence
|
||||
TUN2SOCKS_PLATFORMS=linux-amd64 linux-arm64 darwin-amd64 darwin-arm64
|
||||
|
||||
build-ci:
|
||||
@echo "🏗️ CI: Building $(BINARY_NAME) with version info..."
|
||||
download-tun2socks:
|
||||
@mkdir -p $(TUN2SOCKS_BIN_DIR)
|
||||
@for platform in $(TUN2SOCKS_PLATFORMS); do \
|
||||
if [ ! -f $(TUN2SOCKS_BIN_DIR)/tun2socks-$$platform ]; then \
|
||||
echo "Downloading tun2socks-$$platform $(TUN2SOCKS_VERSION)..."; \
|
||||
curl -sL "https://github.com/xjasonlyu/tun2socks/releases/download/$(TUN2SOCKS_VERSION)/tun2socks-$$platform.zip" -o /tmp/tun2socks-$$platform.zip; \
|
||||
unzip -o -q /tmp/tun2socks-$$platform.zip -d /tmp/tun2socks-$$platform; \
|
||||
mv /tmp/tun2socks-$$platform/tun2socks-$$platform $(TUN2SOCKS_BIN_DIR)/tun2socks-$$platform; \
|
||||
chmod +x $(TUN2SOCKS_BIN_DIR)/tun2socks-$$platform; \
|
||||
rm -rf /tmp/tun2socks-$$platform.zip /tmp/tun2socks-$$platform; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
build: download-tun2socks
|
||||
@echo "Building $(BINARY_NAME)..."
|
||||
$(GOBUILD) -o $(BINARY_NAME) -v ./cmd/greywall
|
||||
|
||||
build-ci: download-tun2socks
|
||||
@echo "CI: Building $(BINARY_NAME) with version info..."
|
||||
$(eval VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev"))
|
||||
$(eval BUILD_TIME := $(shell date -u '+%Y-%m-%dT%H:%M:%SZ'))
|
||||
$(eval GIT_COMMIT := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown"))
|
||||
$(GOBUILD) -ldflags "-s -w -X main.version=$(VERSION) -X main.buildTime=$(BUILD_TIME) -X main.gitCommit=$(GIT_COMMIT)" -o $(BINARY_NAME) -v ./cmd/fence
|
||||
$(GOBUILD) -ldflags "-s -w -X main.version=$(VERSION) -X main.buildTime=$(BUILD_TIME) -X main.gitCommit=$(GIT_COMMIT)" -o $(BINARY_NAME) -v ./cmd/greywall
|
||||
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
@echo "Running tests..."
|
||||
$(GOTEST) -v ./...
|
||||
|
||||
test-ci:
|
||||
@echo "🧪 CI: Running tests with coverage..."
|
||||
@echo "CI: Running tests with coverage..."
|
||||
$(GOTEST) -v -race -coverprofile=coverage.out ./...
|
||||
|
||||
clean:
|
||||
@echo "🧹 Cleaning..."
|
||||
@echo "Cleaning..."
|
||||
$(GOCLEAN)
|
||||
rm -f $(BINARY_NAME)
|
||||
rm -f $(BINARY_UNIX)
|
||||
rm -f coverage.out
|
||||
rm -f $(TUN2SOCKS_BIN_DIR)/tun2socks-linux-*
|
||||
rm -f $(TUN2SOCKS_BIN_DIR)/tun2socks-darwin-*
|
||||
|
||||
deps:
|
||||
@echo "📦 Downloading dependencies..."
|
||||
@echo "Downloading dependencies..."
|
||||
$(GOMOD) download
|
||||
$(GOMOD) tidy
|
||||
|
||||
build-linux:
|
||||
@echo "🐧 Building for Linux..."
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GOBUILD) -o $(BINARY_UNIX) -v ./cmd/fence
|
||||
build-linux: download-tun2socks
|
||||
@echo "Building for Linux..."
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GOBUILD) -o $(BINARY_UNIX) -v ./cmd/greywall
|
||||
|
||||
build-darwin:
|
||||
@echo "🍎 Building for macOS..."
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(GOBUILD) -o $(BINARY_NAME)_darwin -v ./cmd/fence
|
||||
build-darwin: download-tun2socks
|
||||
@echo "Building for macOS..."
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(GOBUILD) -o $(BINARY_NAME)_darwin -v ./cmd/greywall
|
||||
|
||||
install-lint-tools:
|
||||
@echo "📦 Installing linting tools..."
|
||||
@echo "Installing linting tools..."
|
||||
go install mvdan.cc/gofumpt@latest
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
@echo "✅ Linting tools installed"
|
||||
go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@latest
|
||||
@echo "Linting tools installed"
|
||||
|
||||
setup: deps install-lint-tools
|
||||
@echo "✅ Development environment ready"
|
||||
@echo "Development environment ready"
|
||||
|
||||
setup-ci: deps install-lint-tools
|
||||
@echo "✅ CI environment ready"
|
||||
@echo "CI environment ready"
|
||||
|
||||
run: build
|
||||
./$(BINARY_NAME)
|
||||
|
||||
fmt:
|
||||
@echo "📝 Formatting code..."
|
||||
@echo "Formatting code..."
|
||||
gofumpt -w .
|
||||
|
||||
lint:
|
||||
@echo "🔍 Linting code..."
|
||||
@echo "Linting code..."
|
||||
golangci-lint run --allow-parallel-runners
|
||||
|
||||
release:
|
||||
@echo "🚀 Creating patch release..."
|
||||
@echo "Creating patch release..."
|
||||
./scripts/release.sh patch
|
||||
|
||||
release-minor:
|
||||
@echo "🚀 Creating minor release..."
|
||||
@echo "Creating minor release..."
|
||||
./scripts/release.sh minor
|
||||
|
||||
help:
|
||||
@echo "Available targets:"
|
||||
@echo " all - build (default)"
|
||||
@echo " build - Build the binary"
|
||||
@echo " build - Build the binary (downloads tun2socks if needed)"
|
||||
@echo " build-ci - Build for CI with version info"
|
||||
@echo " build-linux - Build for Linux"
|
||||
@echo " build-darwin - Build for macOS"
|
||||
@echo " download-tun2socks - Download tun2socks binaries for embedding"
|
||||
@echo " test - Run tests"
|
||||
@echo " test-ci - Run tests for CI with coverage"
|
||||
@echo " clean - Clean build artifacts"
|
||||
@@ -100,4 +120,3 @@ help:
|
||||
@echo " release - Create patch release (v0.0.X)"
|
||||
@echo " release-minor - Create minor release (v0.X.0)"
|
||||
@echo " help - Show this help"
|
||||
|
||||
|
||||
148
README.md
148
README.md
@@ -1,32 +1,28 @@
|
||||

|
||||
# Greywall
|
||||
|
||||
<div align="center">
|
||||
**The sandboxing layer of the GreyHaven platform.**
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
Fence wraps commands in a sandbox that blocks network access by default and restricts filesystem operations based on configurable rules. It's most useful for running semi-trusted code (package installs, build scripts, CI jobs, unfamiliar repos) with controlled side effects, and it can also complement AI coding agents as defense-in-depth.
|
||||
Greywall wraps commands in a sandbox that blocks network access by default and restricts filesystem operations. On Linux, it uses tun2socks for truly transparent proxying: all TCP/UDP traffic is captured at the kernel level via a TUN device and forwarded through an external SOCKS5 proxy. No application awareness needed.
|
||||
|
||||
```bash
|
||||
# Block all network access (default)
|
||||
fence curl https://example.com # → 403 Forbidden
|
||||
# Block all network access (default — no proxy running = no connectivity)
|
||||
greywall -- curl https://example.com
|
||||
|
||||
# Allow specific domains
|
||||
fence -t code npm install # → uses 'code' template with npm/pypi/etc allowed
|
||||
# Route traffic through an external SOCKS5 proxy
|
||||
greywall --proxy socks5://localhost:1080 -- curl https://example.com
|
||||
|
||||
# Block dangerous commands
|
||||
fence -c "rm -rf /" # → blocked by command deny rules
|
||||
greywall -c "rm -rf /" # → blocked by command deny rules
|
||||
```
|
||||
|
||||
You can also think of Fence as a permission manager for your CLI agents. **Fence works with popular coding agents like Claude Code, Codex, Gemini CLI, Cursor Agent, OpenCode, Factory (Droid) CLI, etc.** See [agents.md](./docs/agents.md) for more details.
|
||||
Greywall also works as a permission manager for CLI agents. See [agents.md](./docs/agents.md) for integration with Claude Code, Codex, Gemini CLI, OpenCode, and others.
|
||||
|
||||
## Install
|
||||
|
||||
**macOS / Linux:**
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/Use-Tusk/fence/main/install.sh | sh
|
||||
curl -fsSL https://gitea.app.monadical.io/monadical/greywall/raw/branch/main/install.sh | sh
|
||||
```
|
||||
|
||||
<details>
|
||||
@@ -35,92 +31,134 @@ curl -fsSL https://raw.githubusercontent.com/Use-Tusk/fence/main/install.sh | sh
|
||||
**Go install:**
|
||||
|
||||
```bash
|
||||
go install github.com/Use-Tusk/fence/cmd/fence@latest
|
||||
go install gitea.app.monadical.io/monadical/greywall/cmd/greywall@latest
|
||||
```
|
||||
|
||||
**Build from source:**
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Use-Tusk/fence
|
||||
cd fence
|
||||
go build -o fence ./cmd/fence
|
||||
git clone https://gitea.app.monadical.io/monadical/greywall
|
||||
cd greywall
|
||||
make setup && make build
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
**Additional requirements for Linux:**
|
||||
**Linux dependencies:**
|
||||
|
||||
- `bubblewrap` (for sandboxing)
|
||||
- `socat` (for network bridging)
|
||||
- `bpftrace` (optional, for filesystem violation visibility when monitoring with `-m`)
|
||||
- `bubblewrap` — container-free sandboxing (required)
|
||||
- `socat` — network bridging (required)
|
||||
|
||||
Check dependency status with `greywall --version`.
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic
|
||||
### Basic commands
|
||||
|
||||
```bash
|
||||
# Run command with all network blocked (no domains allowed by default)
|
||||
fence curl https://example.com
|
||||
# Run with all network blocked (default)
|
||||
greywall -- curl https://example.com
|
||||
|
||||
# Run with shell expansion
|
||||
fence -c "echo hello && ls"
|
||||
greywall -c "echo hello && ls"
|
||||
|
||||
# Route through a SOCKS5 proxy
|
||||
greywall --proxy socks5://localhost:1080 -- npm install
|
||||
|
||||
# Expose a port for inbound connections (e.g., dev servers)
|
||||
greywall -p 3000 -c "npm run dev"
|
||||
|
||||
# Enable debug logging
|
||||
fence -d curl https://example.com
|
||||
greywall -d -- curl https://example.com
|
||||
|
||||
# Use a template
|
||||
fence -t code -- claude # Runs Claude Code using `code` template config
|
||||
# Monitor sandbox violations
|
||||
greywall -m -- npm install
|
||||
|
||||
# Monitor mode (shows violations)
|
||||
fence -m npm install
|
||||
# Show available Linux security features
|
||||
greywall --linux-features
|
||||
|
||||
# Show all commands and options
|
||||
fence --help
|
||||
# Show version and dependency status
|
||||
greywall --version
|
||||
```
|
||||
|
||||
### Learning mode
|
||||
|
||||
Greywall can trace a command's filesystem access and generate a config template automatically:
|
||||
|
||||
```bash
|
||||
# Run in learning mode — traces file access via strace
|
||||
greywall --learning -- opencode
|
||||
|
||||
# List generated templates
|
||||
greywall templates list
|
||||
|
||||
# Show a template's content
|
||||
greywall templates show opencode
|
||||
|
||||
# Next run auto-loads the learned template
|
||||
greywall -- opencode
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Fence reads from `~/.config/fence/fence.json` by default (or `~/Library/Application Support/fence/fence.json` on macOS).
|
||||
Greywall reads from `~/.config/greywall/greywall.json` by default (or `~/Library/Application Support/greywall/greywall.json` on macOS).
|
||||
|
||||
```json
|
||||
```jsonc
|
||||
{
|
||||
"extends": "code",
|
||||
"network": { "allowedDomains": ["private.company.com"] },
|
||||
"filesystem": { "allowWrite": ["."] },
|
||||
"command": { "deny": ["git push", "npm publish"] }
|
||||
// Route traffic through an external SOCKS5 proxy
|
||||
"network": {
|
||||
"proxyUrl": "socks5://localhost:1080",
|
||||
"dnsAddr": "localhost:5353"
|
||||
},
|
||||
// Control filesystem access
|
||||
"filesystem": {
|
||||
"defaultDenyRead": true,
|
||||
"allowRead": ["~/.config/myapp"],
|
||||
"allowWrite": ["."],
|
||||
"denyWrite": ["~/.ssh/**"],
|
||||
"denyRead": ["~/.ssh/id_*", ".env"]
|
||||
},
|
||||
// Block dangerous commands
|
||||
"command": {
|
||||
"deny": ["git push", "npm publish"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Use `fence --settings ./custom.json` to specify a different config.
|
||||
Use `greywall --settings ./custom.json` to specify a different config file.
|
||||
|
||||
### Import from Claude Code
|
||||
|
||||
```bash
|
||||
fence import --claude --save
|
||||
```
|
||||
By default (when connected to GreyHaven), traffic routes through the GreyHaven SOCKS5 proxy at `localhost:42052` with DNS via `localhost:42053`.
|
||||
|
||||
## Features
|
||||
|
||||
- **Network isolation** - All outbound blocked by default; allowlist domains via config
|
||||
- **Filesystem restrictions** - Control read/write access paths
|
||||
- **Command blocking** - Deny dangerous commands like `rm -rf /`, `git push`
|
||||
- **SSH Command Filtering** - Control which hosts and commands are allowed over SSH
|
||||
- **Built-in templates** - Pre-configured rulesets for common workflows
|
||||
- **Violation monitoring** - Real-time logging of blocked requests (`-m`)
|
||||
- **Cross-platform** - macOS (sandbox-exec) + Linux (bubblewrap)
|
||||
- **Transparent proxy** — All TCP/UDP traffic captured at the kernel level via tun2socks and routed through an external SOCKS5 proxy (Linux)
|
||||
- **Network isolation** — All outbound blocked by default; traffic only flows when a proxy is available
|
||||
- **Filesystem restrictions** — Deny-by-default read mode, controlled write paths, sensitive file protection
|
||||
- **Learning mode** — Trace filesystem access with strace and auto-generate config templates
|
||||
- **Command blocking** — Deny dangerous commands (`rm -rf /`, `git push`, `shutdown`, etc.)
|
||||
- **SSH filtering** — Control which hosts and commands are allowed over SSH
|
||||
- **Environment hardening** — Strips dangerous env vars (`LD_PRELOAD`, `DYLD_*`, etc.)
|
||||
- **Violation monitoring** — Real-time logging of sandbox violations (`-m`)
|
||||
- **Shell completions** — `greywall completion bash|zsh|fish|powershell`
|
||||
- **Cross-platform** — Linux (bubblewrap + seccomp + Landlock + eBPF) and macOS (sandbox-exec)
|
||||
|
||||
Fence can be used as a Go package or CLI tool.
|
||||
Greywall can also be used as a [Go package](docs/library.md).
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Index](/docs/README.md)
|
||||
- [Documentation Index](docs/README.md)
|
||||
- [Quickstart Guide](docs/quickstart.md)
|
||||
- [Why Greywall](docs/why-greywall.md)
|
||||
- [Configuration Reference](docs/configuration.md)
|
||||
- [Security Model](docs/security-model.md)
|
||||
- [Architecture](ARCHITECTURE.md)
|
||||
- [Linux Security Features](docs/linux-security-features.md)
|
||||
- [AI Agent Integration](docs/agents.md)
|
||||
- [Library Usage (Go)](docs/library.md)
|
||||
- [Examples](examples/)
|
||||
- [Troubleshooting](docs/troubleshooting.md)
|
||||
|
||||
## Attribution
|
||||
|
||||
Greywall is based on [Fence](https://github.com/Use-Tusk/fence) by Use-Tusk.
|
||||
|
||||
Inspired by Anthropic's [sandbox-runtime](https://github.com/anthropic-experimental/sandbox-runtime).
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a security issue, please email [support@usetusk.ai](support@usetusk.ai) with a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue. This project follows a 90 day disclosure timeline.
|
||||
To report a security issue, please email [support@greyhaven.co](mailto:support@greyhaven.co) with a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue. This project follows a 90 day disclosure timeline.
|
||||
|
||||
This security policy applies to the latest version of our main branch.
|
||||
|
||||
|
||||
1151
analysis.md
Normal file
1151
analysis.md
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 407 KiB |
@@ -1,577 +0,0 @@
|
||||
// Package main implements the fence CLI.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"github.com/Use-Tusk/fence/internal/importer"
|
||||
"github.com/Use-Tusk/fence/internal/platform"
|
||||
"github.com/Use-Tusk/fence/internal/sandbox"
|
||||
"github.com/Use-Tusk/fence/internal/templates"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Build-time variables (set via -ldflags)
|
||||
var (
|
||||
version = "dev"
|
||||
buildTime = "unknown"
|
||||
gitCommit = "unknown"
|
||||
)
|
||||
|
||||
var (
|
||||
debug bool
|
||||
monitor bool
|
||||
settingsPath string
|
||||
templateName string
|
||||
listTemplates bool
|
||||
cmdString string
|
||||
exposePorts []string
|
||||
exitCode int
|
||||
showVersion bool
|
||||
linuxFeatures bool
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Check for internal --landlock-apply mode (used inside sandbox)
|
||||
// This must be checked before cobra to avoid flag conflicts
|
||||
if len(os.Args) >= 2 && os.Args[1] == "--landlock-apply" {
|
||||
runLandlockWrapper()
|
||||
return
|
||||
}
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "fence [flags] -- [command...]",
|
||||
Short: "Run commands in a sandbox with network and filesystem restrictions",
|
||||
Long: `fence is a command-line tool that runs commands in a sandboxed environment
|
||||
with network and filesystem restrictions.
|
||||
|
||||
By default, all network access is blocked. Configure allowed domains in
|
||||
~/.config/fence/fence.json (or ~/Library/Application Support/fence/fence.json on macOS)
|
||||
or pass a settings file with --settings, or use a built-in template with --template.
|
||||
|
||||
Examples:
|
||||
fence curl https://example.com # Will be blocked (no domains allowed)
|
||||
fence -- curl -s https://example.com # Use -- to separate fence flags from command
|
||||
fence -c "echo hello && ls" # Run with shell expansion
|
||||
fence --settings config.json npm install
|
||||
fence -t npm-install npm install # Use built-in npm-install template
|
||||
fence -t ai-coding-agents -- agent-cmd # Use AI coding agents template
|
||||
fence -p 3000 -c "npm run dev" # Expose port 3000 for inbound connections
|
||||
fence --list-templates # Show available built-in templates
|
||||
|
||||
Configuration file format:
|
||||
{
|
||||
"network": {
|
||||
"allowedDomains": ["github.com", "*.npmjs.org"],
|
||||
"deniedDomains": []
|
||||
},
|
||||
"filesystem": {
|
||||
"denyRead": [],
|
||||
"allowWrite": ["."],
|
||||
"denyWrite": []
|
||||
},
|
||||
"command": {
|
||||
"deny": ["git push", "npm publish"]
|
||||
}
|
||||
}`,
|
||||
RunE: runCommand,
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
Args: cobra.ArbitraryArgs,
|
||||
}
|
||||
|
||||
rootCmd.Flags().BoolVarP(&debug, "debug", "d", false, "Enable debug logging")
|
||||
rootCmd.Flags().BoolVarP(&monitor, "monitor", "m", false, "Monitor and log sandbox violations (macOS: log stream, all: proxy denials)")
|
||||
rootCmd.Flags().StringVarP(&settingsPath, "settings", "s", "", "Path to settings file (default: OS config directory)")
|
||||
rootCmd.Flags().StringVarP(&templateName, "template", "t", "", "Use built-in template (e.g., ai-coding-agents, npm-install)")
|
||||
rootCmd.Flags().BoolVar(&listTemplates, "list-templates", false, "List available templates")
|
||||
rootCmd.Flags().StringVarP(&cmdString, "c", "c", "", "Run command string directly (like sh -c)")
|
||||
rootCmd.Flags().StringArrayVarP(&exposePorts, "port", "p", nil, "Expose port for inbound connections (can be used multiple times)")
|
||||
rootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "Show version information")
|
||||
rootCmd.Flags().BoolVar(&linuxFeatures, "linux-features", false, "Show available Linux security features and exit")
|
||||
|
||||
rootCmd.Flags().SetInterspersed(true)
|
||||
|
||||
rootCmd.AddCommand(newImportCmd())
|
||||
rootCmd.AddCommand(newCompletionCmd(rootCmd))
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
exitCode = 1
|
||||
}
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func runCommand(cmd *cobra.Command, args []string) error {
|
||||
if showVersion {
|
||||
fmt.Printf("fence - lightweight, container-free sandbox for running untrusted commands\n")
|
||||
fmt.Printf(" Version: %s\n", version)
|
||||
fmt.Printf(" Built: %s\n", buildTime)
|
||||
fmt.Printf(" Commit: %s\n", gitCommit)
|
||||
return nil
|
||||
}
|
||||
|
||||
if linuxFeatures {
|
||||
sandbox.PrintLinuxFeatures()
|
||||
return nil
|
||||
}
|
||||
|
||||
if listTemplates {
|
||||
printTemplates()
|
||||
return nil
|
||||
}
|
||||
|
||||
var command string
|
||||
switch {
|
||||
case cmdString != "":
|
||||
command = cmdString
|
||||
case len(args) > 0:
|
||||
command = strings.Join(args, " ")
|
||||
default:
|
||||
return fmt.Errorf("no command specified. Use -c <command> or provide command arguments")
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence] Command: %s\n", command)
|
||||
}
|
||||
|
||||
var ports []int
|
||||
for _, p := range exposePorts {
|
||||
port, err := strconv.Atoi(p)
|
||||
if err != nil || port < 1 || port > 65535 {
|
||||
return fmt.Errorf("invalid port: %s", p)
|
||||
}
|
||||
ports = append(ports, port)
|
||||
}
|
||||
|
||||
if debug && len(ports) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "[fence] Exposing ports: %v\n", ports)
|
||||
}
|
||||
|
||||
// Load config: template > settings file > default path
|
||||
var cfg *config.Config
|
||||
var err error
|
||||
|
||||
switch {
|
||||
case templateName != "":
|
||||
cfg, err = templates.Load(templateName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load template: %w\nUse --list-templates to see available templates", err)
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence] Using template: %s\n", templateName)
|
||||
}
|
||||
case settingsPath != "":
|
||||
cfg, err = config.Load(settingsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
absPath, _ := filepath.Abs(settingsPath)
|
||||
cfg, err = templates.ResolveExtendsWithBaseDir(cfg, filepath.Dir(absPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve extends: %w", err)
|
||||
}
|
||||
default:
|
||||
configPath := config.DefaultConfigPath()
|
||||
cfg, err = config.Load(configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
if cfg == nil {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence] No config found at %s, using default (block all network)\n", configPath)
|
||||
}
|
||||
cfg = config.Default()
|
||||
} else {
|
||||
cfg, err = templates.ResolveExtendsWithBaseDir(cfg, filepath.Dir(configPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve extends: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
manager := sandbox.NewManager(cfg, debug, monitor)
|
||||
manager.SetExposedPorts(ports)
|
||||
defer manager.Cleanup()
|
||||
|
||||
if err := manager.Initialize(); err != nil {
|
||||
return fmt.Errorf("failed to initialize sandbox: %w", err)
|
||||
}
|
||||
|
||||
var logMonitor *sandbox.LogMonitor
|
||||
if monitor {
|
||||
logMonitor = sandbox.NewLogMonitor(sandbox.GetSessionSuffix())
|
||||
if logMonitor != nil {
|
||||
if err := logMonitor.Start(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[fence] Warning: failed to start log monitor: %v\n", err)
|
||||
} else {
|
||||
defer logMonitor.Stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sandboxedCommand, err := manager.WrapCommand(command)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wrap command: %w", err)
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence] Sandboxed command: %s\n", sandboxedCommand)
|
||||
}
|
||||
|
||||
hardenedEnv := sandbox.GetHardenedEnv()
|
||||
if debug {
|
||||
if stripped := sandbox.GetStrippedEnvVars(os.Environ()); len(stripped) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "[fence] Stripped dangerous env vars: %v\n", stripped)
|
||||
}
|
||||
}
|
||||
|
||||
execCmd := exec.Command("sh", "-c", sandboxedCommand) //nolint:gosec // sandboxedCommand is constructed from user input - intentional
|
||||
execCmd.Env = hardenedEnv
|
||||
execCmd.Stdin = os.Stdin
|
||||
execCmd.Stdout = os.Stdout
|
||||
execCmd.Stderr = os.Stderr
|
||||
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Start the command (non-blocking) so we can get the PID
|
||||
if err := execCmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start command: %w", err)
|
||||
}
|
||||
|
||||
// Start Linux monitors (eBPF tracing for filesystem violations)
|
||||
var linuxMonitors *sandbox.LinuxMonitors
|
||||
if monitor && execCmd.Process != nil {
|
||||
linuxMonitors, _ = sandbox.StartLinuxMonitor(execCmd.Process.Pid, sandbox.LinuxSandboxOptions{
|
||||
Monitor: true,
|
||||
Debug: debug,
|
||||
UseEBPF: true,
|
||||
})
|
||||
if linuxMonitors != nil {
|
||||
defer linuxMonitors.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Landlock is NOT applied here because:
|
||||
// 1. The sandboxed command is already running (Landlock only affects future children)
|
||||
// 2. Proper Landlock integration requires applying restrictions inside the sandbox
|
||||
// For now, filesystem isolation relies on bwrap mount namespaces.
|
||||
// Landlock code exists for future integration (e.g., via a wrapper binary).
|
||||
|
||||
go func() {
|
||||
sigCount := 0
|
||||
for sig := range sigChan {
|
||||
sigCount++
|
||||
if execCmd.Process == nil {
|
||||
continue
|
||||
}
|
||||
// First signal: graceful termination; second signal: force kill
|
||||
if sigCount >= 2 {
|
||||
_ = execCmd.Process.Kill()
|
||||
} else {
|
||||
_ = execCmd.Process.Signal(sig)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for command to finish
|
||||
if err := execCmd.Wait(); err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
// Set exit code but don't os.Exit() here - let deferred cleanup run
|
||||
exitCode = exitErr.ExitCode()
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("command failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newImportCmd creates the import subcommand.
|
||||
func newImportCmd() *cobra.Command {
|
||||
var (
|
||||
claudeMode bool
|
||||
inputFile string
|
||||
outputFile string
|
||||
saveFlag bool
|
||||
forceFlag bool
|
||||
extendTmpl string
|
||||
noExtend bool
|
||||
)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "import",
|
||||
Short: "Import settings from other tools",
|
||||
Long: `Import permission settings from other tools and convert them to fence config.
|
||||
|
||||
Currently supported sources:
|
||||
--claude Import from Claude Code settings
|
||||
|
||||
By default, imports extend the "code" template which provides sensible defaults
|
||||
for network access (npm, GitHub, LLM providers) and filesystem protections.
|
||||
Use --no-extend for a minimal config, or --extend to choose a different template.
|
||||
|
||||
Examples:
|
||||
# Preview import (prints JSON to stdout)
|
||||
fence import --claude
|
||||
|
||||
# Save to the default config path
|
||||
# Linux: ~/.config/fence/fence.json
|
||||
# macOS: ~/Library/Application Support/fence/fence.json
|
||||
fence import --claude --save
|
||||
|
||||
# Save to a specific output file
|
||||
fence import --claude -o ./fence.json
|
||||
|
||||
# Import from a specific Claude Code settings file
|
||||
fence import --claude -f ~/.claude/settings.json --save
|
||||
|
||||
# Import without extending any template (minimal config)
|
||||
fence import --claude --no-extend --save
|
||||
|
||||
# Import and extend a different template
|
||||
fence import --claude --extend local-dev-server --save`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if !claudeMode {
|
||||
return fmt.Errorf("no import source specified. Use --claude to import from Claude Code")
|
||||
}
|
||||
|
||||
opts := importer.DefaultImportOptions()
|
||||
if noExtend {
|
||||
opts.Extends = ""
|
||||
} else if extendTmpl != "" {
|
||||
opts.Extends = extendTmpl
|
||||
}
|
||||
|
||||
result, err := importer.ImportFromClaude(inputFile, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to import Claude settings: %w", err)
|
||||
}
|
||||
|
||||
for _, warning := range result.Warnings {
|
||||
fmt.Fprintf(os.Stderr, "Warning: %s\n", warning)
|
||||
}
|
||||
if len(result.Warnings) > 0 {
|
||||
fmt.Fprintln(os.Stderr)
|
||||
}
|
||||
|
||||
// Determine output destination
|
||||
var destPath string
|
||||
if saveFlag {
|
||||
destPath = config.DefaultConfigPath()
|
||||
} else if outputFile != "" {
|
||||
destPath = outputFile
|
||||
}
|
||||
|
||||
if destPath != "" {
|
||||
if !forceFlag {
|
||||
if _, err := os.Stat(destPath); err == nil {
|
||||
fmt.Printf("File %q already exists. Overwrite? [y/N] ", destPath)
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
response, _ := reader.ReadString('\n')
|
||||
response = strings.TrimSpace(strings.ToLower(response))
|
||||
if response != "y" && response != "yes" {
|
||||
fmt.Println("Aborted.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(destPath), 0o750); err != nil {
|
||||
return fmt.Errorf("failed to create config directory: %w", err)
|
||||
}
|
||||
|
||||
if err := importer.WriteConfig(result.Config, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Imported %d rules from %s\n", result.RulesImported, result.SourcePath)
|
||||
fmt.Printf("Written to %q\n", destPath)
|
||||
} else {
|
||||
// Print clean JSON to stdout, helpful info to stderr (don't interfere with piping)
|
||||
data, err := importer.MarshalConfigJSON(result.Config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal config: %w", err)
|
||||
}
|
||||
fmt.Println(string(data))
|
||||
if result.Config.Extends != "" {
|
||||
fmt.Fprintf(os.Stderr, "\n# Extends %q - inherited rules not shown\n", result.Config.Extends)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "# Imported %d rules from %s\n", result.RulesImported, result.SourcePath)
|
||||
fmt.Fprintf(os.Stderr, "# Use --save to write to the default config path\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&claudeMode, "claude", false, "Import from Claude Code settings")
|
||||
cmd.Flags().StringVarP(&inputFile, "file", "f", "", "Path to settings file (default: ~/.claude/settings.json for --claude)")
|
||||
cmd.Flags().StringVarP(&outputFile, "output", "o", "", "Output file path")
|
||||
cmd.Flags().BoolVar(&saveFlag, "save", false, "Save to the default config path")
|
||||
cmd.Flags().BoolVarP(&forceFlag, "force", "y", false, "Overwrite existing file without prompting")
|
||||
cmd.Flags().StringVar(&extendTmpl, "extend", "", "Template to extend (default: code)")
|
||||
cmd.Flags().BoolVar(&noExtend, "no-extend", false, "Don't extend any template (minimal config)")
|
||||
cmd.MarkFlagsMutuallyExclusive("extend", "no-extend")
|
||||
cmd.MarkFlagsMutuallyExclusive("save", "output")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// newCompletionCmd creates the completion subcommand for shell completions.
|
||||
func newCompletionCmd(rootCmd *cobra.Command) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "completion [bash|zsh|fish|powershell]",
|
||||
Short: "Generate shell completion scripts",
|
||||
Long: `Generate shell completion scripts for fence.
|
||||
|
||||
Examples:
|
||||
# Bash (load in current session)
|
||||
source <(fence completion bash)
|
||||
|
||||
# Zsh (load in current session)
|
||||
source <(fence completion zsh)
|
||||
|
||||
# Fish (load in current session)
|
||||
fence completion fish | source
|
||||
|
||||
# PowerShell (load in current session)
|
||||
fence completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
To persist completions, redirect output to the appropriate completions
|
||||
directory for your shell (e.g., /etc/bash_completion.d/ for bash,
|
||||
${fpath[1]}/_fence for zsh, ~/.config/fish/completions/fence.fish for fish).
|
||||
`,
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
switch args[0] {
|
||||
case "bash":
|
||||
return rootCmd.GenBashCompletionV2(os.Stdout, true)
|
||||
case "zsh":
|
||||
return rootCmd.GenZshCompletion(os.Stdout)
|
||||
case "fish":
|
||||
return rootCmd.GenFishCompletion(os.Stdout, true)
|
||||
case "powershell":
|
||||
return rootCmd.GenPowerShellCompletionWithDesc(os.Stdout)
|
||||
default:
|
||||
return fmt.Errorf("unsupported shell: %s", args[0])
|
||||
}
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// printTemplates prints all available templates to stdout.
|
||||
func printTemplates() {
|
||||
fmt.Println("Available templates:")
|
||||
fmt.Println()
|
||||
for _, t := range templates.List() {
|
||||
fmt.Printf(" %-20s %s\n", t.Name, t.Description)
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("Usage: fence -t <template> <command>")
|
||||
fmt.Println("Example: fence -t code -- code")
|
||||
}
|
||||
|
||||
// runLandlockWrapper runs in "wrapper mode" inside the sandbox.
|
||||
// It applies Landlock restrictions and then execs the user command.
|
||||
// Usage: fence --landlock-apply [--debug] -- <command...>
|
||||
// Config is passed via FENCE_CONFIG_JSON environment variable.
|
||||
func runLandlockWrapper() {
|
||||
// Parse arguments: --landlock-apply [--debug] -- <command...>
|
||||
args := os.Args[2:] // Skip "fence" and "--landlock-apply"
|
||||
|
||||
var debugMode bool
|
||||
var cmdStart int
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
switch args[i] {
|
||||
case "--debug":
|
||||
debugMode = true
|
||||
case "--":
|
||||
cmdStart = i + 1
|
||||
goto parseCommand
|
||||
default:
|
||||
// Assume rest is the command
|
||||
cmdStart = i
|
||||
goto parseCommand
|
||||
}
|
||||
}
|
||||
|
||||
parseCommand:
|
||||
if cmdStart >= len(args) {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock-wrapper] Error: no command specified\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
command := args[cmdStart:]
|
||||
|
||||
if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock-wrapper] Applying Landlock restrictions\n")
|
||||
}
|
||||
|
||||
// Only apply Landlock on Linux
|
||||
if platform.Detect() == platform.Linux {
|
||||
// Load config from environment variable (passed by parent fence process)
|
||||
var cfg *config.Config
|
||||
if configJSON := os.Getenv("FENCE_CONFIG_JSON"); configJSON != "" {
|
||||
cfg = &config.Config{}
|
||||
if err := json.Unmarshal([]byte(configJSON), cfg); err != nil {
|
||||
if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock-wrapper] Warning: failed to parse config: %v\n", err)
|
||||
}
|
||||
cfg = nil
|
||||
}
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = config.Default()
|
||||
}
|
||||
|
||||
// Get current working directory for relative path resolution
|
||||
cwd, _ := os.Getwd()
|
||||
|
||||
// Apply Landlock restrictions
|
||||
err := sandbox.ApplyLandlockFromConfig(cfg, cwd, nil, debugMode)
|
||||
if err != nil {
|
||||
if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock-wrapper] Warning: Landlock not applied: %v\n", err)
|
||||
}
|
||||
// Continue without Landlock - bwrap still provides isolation
|
||||
} else if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock-wrapper] Landlock restrictions applied\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Find the executable
|
||||
execPath, err := exec.LookPath(command[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock-wrapper] Error: command not found: %s\n", command[0])
|
||||
os.Exit(127)
|
||||
}
|
||||
|
||||
if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock-wrapper] Exec: %s %v\n", execPath, command[1:])
|
||||
}
|
||||
|
||||
// Sanitize environment (strips LD_PRELOAD, etc.)
|
||||
hardenedEnv := sandbox.FilterDangerousEnv(os.Environ())
|
||||
|
||||
// Exec the command (replaces this process)
|
||||
err = syscall.Exec(execPath, command, hardenedEnv) //nolint:gosec
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock-wrapper] Exec failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
301
cmd/greywall/daemon.go
Normal file
301
cmd/greywall/daemon.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/kardianos/service"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/daemon"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/sandbox"
|
||||
)
|
||||
|
||||
// newDaemonCmd creates the daemon subcommand tree:
|
||||
//
|
||||
// greywall daemon
|
||||
// install - Install the LaunchDaemon (requires root)
|
||||
// uninstall - Uninstall the LaunchDaemon (requires root)
|
||||
// run - Run the daemon (called by LaunchDaemon plist)
|
||||
// start - Start the daemon service
|
||||
// stop - Stop the daemon service
|
||||
// restart - Restart the daemon service
|
||||
// status - Show daemon status
|
||||
func newDaemonCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "daemon",
|
||||
Short: "Manage the greywall background daemon",
|
||||
Long: `Manage the greywall LaunchDaemon for transparent network sandboxing on macOS.
|
||||
|
||||
The daemon runs as a system service and manages the tun2socks tunnel, DNS relay,
|
||||
and pf rules that enable transparent proxy routing for sandboxed processes.
|
||||
|
||||
Commands:
|
||||
sudo greywall daemon install Install and start the daemon
|
||||
sudo greywall daemon uninstall Stop and remove the daemon
|
||||
sudo greywall daemon start Start the daemon service
|
||||
sudo greywall daemon stop Stop the daemon service
|
||||
sudo greywall daemon restart Restart the daemon service
|
||||
greywall daemon status Check daemon status
|
||||
greywall daemon run Run the daemon (used by LaunchDaemon)`,
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
newDaemonInstallCmd(),
|
||||
newDaemonUninstallCmd(),
|
||||
newDaemonRunCmd(),
|
||||
newDaemonStartCmd(),
|
||||
newDaemonStopCmd(),
|
||||
newDaemonRestartCmd(),
|
||||
newDaemonStatusCmd(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// newDaemonInstallCmd creates the "daemon install" subcommand.
|
||||
func newDaemonInstallCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "install",
|
||||
Short: "Install the greywall LaunchDaemon (requires root)",
|
||||
Long: `Install greywall as a macOS LaunchDaemon. This command:
|
||||
1. Creates a system user (_greywall) for sandboxed process isolation
|
||||
2. Copies the greywall binary to /usr/local/bin/greywall
|
||||
3. Extracts and installs the tun2socks binary
|
||||
4. Installs a LaunchDaemon plist for automatic startup
|
||||
5. Loads and starts the daemon
|
||||
|
||||
Requires root privileges: sudo greywall daemon install`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
exePath, err := os.Executable()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to determine executable path: %w", err)
|
||||
}
|
||||
exePath, err = filepath.EvalSymlinks(exePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve executable path: %w", err)
|
||||
}
|
||||
|
||||
// Extract embedded tun2socks binary to a temp file.
|
||||
tun2socksPath, err := sandbox.ExtractTun2Socks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract tun2socks: %w", err)
|
||||
}
|
||||
defer os.Remove(tun2socksPath) //nolint:errcheck // temp file cleanup
|
||||
|
||||
if err := daemon.Install(exePath, tun2socksPath, debug); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("To check status: greywall daemon status")
|
||||
fmt.Println("To uninstall: sudo greywall daemon uninstall")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newDaemonUninstallCmd creates the "daemon uninstall" subcommand.
|
||||
func newDaemonUninstallCmd() *cobra.Command {
|
||||
var force bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "uninstall",
|
||||
Short: "Uninstall the greywall LaunchDaemon (requires root)",
|
||||
Long: `Uninstall the greywall LaunchDaemon. This command:
|
||||
1. Stops and unloads the daemon
|
||||
2. Removes the LaunchDaemon plist
|
||||
3. Removes installed files
|
||||
4. Removes the _greywall system user and group
|
||||
|
||||
Requires root privileges: sudo greywall daemon uninstall`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if !force {
|
||||
fmt.Println("The following will be removed:")
|
||||
fmt.Printf(" - LaunchDaemon plist: %s\n", daemon.LaunchDaemonPlistPath)
|
||||
fmt.Printf(" - Binary: %s\n", daemon.InstallBinaryPath)
|
||||
fmt.Printf(" - Lib directory: %s\n", daemon.InstallLibDir)
|
||||
fmt.Printf(" - Socket: %s\n", daemon.DefaultSocketPath)
|
||||
fmt.Printf(" - Sudoers file: %s\n", daemon.SudoersFilePath)
|
||||
fmt.Printf(" - System user/group: %s\n", daemon.SandboxUserName)
|
||||
fmt.Println()
|
||||
fmt.Print("Proceed with uninstall? [y/N] ")
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
answer, _ := reader.ReadString('\n')
|
||||
answer = strings.TrimSpace(strings.ToLower(answer))
|
||||
if answer != "y" && answer != "yes" {
|
||||
fmt.Println("Uninstall cancelled.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := daemon.Uninstall(debug); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("The greywall daemon has been uninstalled.")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVarP(&force, "force", "f", false, "Skip confirmation prompt")
|
||||
return cmd
|
||||
}
|
||||
|
||||
// newDaemonRunCmd creates the "daemon run" subcommand. This is invoked by
|
||||
// the LaunchDaemon plist and should not normally be called manually.
|
||||
func newDaemonRunCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "run",
|
||||
Short: "Run the daemon process (called by LaunchDaemon)",
|
||||
Hidden: true, // Not intended for direct user invocation.
|
||||
RunE: runDaemon,
|
||||
}
|
||||
}
|
||||
|
||||
// newDaemonStartCmd creates the "daemon start" subcommand.
|
||||
func newDaemonStartCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start the daemon service",
|
||||
Long: `Start the greywall daemon service. Requires root privileges.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return daemonControl("start")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newDaemonStopCmd creates the "daemon stop" subcommand.
|
||||
func newDaemonStopCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "stop",
|
||||
Short: "Stop the daemon service",
|
||||
Long: `Stop the greywall daemon service. Requires root privileges.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return daemonControl("stop")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newDaemonRestartCmd creates the "daemon restart" subcommand.
|
||||
func newDaemonRestartCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "restart",
|
||||
Short: "Restart the daemon service",
|
||||
Long: `Restart the greywall daemon service. Requires root privileges.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return daemonControl("restart")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// daemonControl sends a control action (start/stop/restart) to the daemon
|
||||
// service via kardianos/service.
|
||||
func daemonControl(action string) error {
|
||||
p := daemon.NewProgram(daemon.DefaultSocketPath, daemon.DefaultTun2socksPath(), debug)
|
||||
s, err := service.New(p, daemon.NewServiceConfig())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create service: %w", err)
|
||||
}
|
||||
if err := service.Control(s, action); err != nil {
|
||||
return fmt.Errorf("failed to %s daemon: %w", action, err)
|
||||
}
|
||||
fmt.Printf("Daemon %sed successfully.\n", action)
|
||||
return nil
|
||||
}
|
||||
|
||||
// newDaemonStatusCmd creates the "daemon status" subcommand.
|
||||
func newDaemonStatusCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show the daemon status",
|
||||
Long: `Check whether the greywall daemon is installed and running. Does not require root.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
installed := daemon.IsInstalled()
|
||||
|
||||
// Try kardianos/service status first for reliable state detection.
|
||||
serviceState := daemonServiceState()
|
||||
|
||||
running := serviceState == "running"
|
||||
|
||||
fmt.Printf("Greywall daemon status:\n")
|
||||
fmt.Printf(" Installed: %s\n", boolStatus(installed))
|
||||
fmt.Printf(" Running: %s\n", boolStatus(running))
|
||||
fmt.Printf(" Service: %s\n", serviceState)
|
||||
fmt.Printf(" Plist: %s\n", daemon.LaunchDaemonPlistPath)
|
||||
fmt.Printf(" Binary: %s\n", daemon.InstallBinaryPath)
|
||||
fmt.Printf(" User: %s\n", daemon.SandboxUserName)
|
||||
fmt.Printf(" Group: %s (pf routing)\n", daemon.SandboxGroupName)
|
||||
fmt.Printf(" Sudoers: %s\n", daemon.SudoersFilePath)
|
||||
fmt.Printf(" Socket: %s\n", daemon.DefaultSocketPath)
|
||||
|
||||
if !installed {
|
||||
fmt.Println()
|
||||
fmt.Println("The daemon is not installed. Run: sudo greywall daemon install")
|
||||
} else if !running {
|
||||
fmt.Println()
|
||||
fmt.Println("The daemon is installed but not running.")
|
||||
fmt.Printf("Check logs: cat /var/log/greywall.log\n")
|
||||
fmt.Printf("Start it: sudo greywall daemon start\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// runDaemon is the main entry point for the daemon process. It uses
|
||||
// kardianos/service to manage the lifecycle, handling signals and
|
||||
// calling Start/Stop on the program.
|
||||
func runDaemon(cmd *cobra.Command, args []string) error {
|
||||
p := daemon.NewProgram(daemon.DefaultSocketPath, daemon.DefaultTun2socksPath(), debug)
|
||||
s, err := service.New(p, daemon.NewServiceConfig())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create service: %w", err)
|
||||
}
|
||||
return s.Run()
|
||||
}
|
||||
|
||||
// daemonServiceState returns the daemon's service state as a string.
|
||||
// It tries kardianos/service status first, then falls back to socket check.
|
||||
func daemonServiceState() string {
|
||||
p := daemon.NewProgram(daemon.DefaultSocketPath, daemon.DefaultTun2socksPath(), debug)
|
||||
s, err := service.New(p, daemon.NewServiceConfig())
|
||||
if err != nil {
|
||||
if daemon.IsRunning() {
|
||||
return "running"
|
||||
}
|
||||
return "stopped"
|
||||
}
|
||||
|
||||
status, err := s.Status()
|
||||
if err != nil {
|
||||
// Fall back to socket check.
|
||||
if daemon.IsRunning() {
|
||||
return "running"
|
||||
}
|
||||
return "stopped"
|
||||
}
|
||||
|
||||
switch status {
|
||||
case service.StatusRunning:
|
||||
return "running"
|
||||
case service.StatusStopped:
|
||||
return "stopped"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// boolStatus returns a human-readable string for a boolean status value.
|
||||
func boolStatus(b bool) string {
|
||||
if b {
|
||||
return "yes"
|
||||
}
|
||||
return "no"
|
||||
}
|
||||
632
cmd/greywall/main.go
Normal file
632
cmd/greywall/main.go
Normal file
@@ -0,0 +1,632 @@
|
||||
// Package main implements the greywall CLI.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/platform"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/sandbox"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Build-time variables (set via -ldflags)
|
||||
var (
|
||||
version = "dev"
|
||||
buildTime = "unknown"
|
||||
gitCommit = "unknown"
|
||||
)
|
||||
|
||||
var (
|
||||
debug bool
|
||||
monitor bool
|
||||
settingsPath string
|
||||
proxyURL string
|
||||
httpProxyURL string
|
||||
dnsAddr string
|
||||
cmdString string
|
||||
exposePorts []string
|
||||
exitCode int
|
||||
showVersion bool
|
||||
linuxFeatures bool
|
||||
learning bool
|
||||
templateName string
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Check for internal --landlock-apply mode (used inside sandbox)
|
||||
// This must be checked before cobra to avoid flag conflicts
|
||||
if len(os.Args) >= 2 && os.Args[1] == "--landlock-apply" {
|
||||
runLandlockWrapper()
|
||||
return
|
||||
}
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "greywall [flags] -- [command...]",
|
||||
Short: "Run commands in a sandbox with network and filesystem restrictions",
|
||||
Long: `greywall is a command-line tool that runs commands in a sandboxed environment
|
||||
with network and filesystem restrictions.
|
||||
|
||||
By default, traffic is routed through the GreyHaven SOCKS5 proxy at localhost:42051
|
||||
with DNS via localhost:42053. Use --proxy and --dns to override, or configure in
|
||||
your settings file at ~/.config/greywall/greywall.json (or ~/Library/Application Support/greywall/greywall.json on macOS).
|
||||
|
||||
On Linux, greywall uses tun2socks for truly transparent proxying: all TCP/UDP traffic
|
||||
from any binary is captured at the kernel level via a TUN device and forwarded
|
||||
through the external SOCKS5 proxy. No application awareness needed.
|
||||
|
||||
On macOS, greywall uses environment variables (best-effort) to direct traffic
|
||||
to the proxy.
|
||||
|
||||
Examples:
|
||||
greywall -- curl https://example.com # Blocked (no proxy)
|
||||
greywall --proxy socks5://localhost:1080 -- curl https://example.com # Via proxy
|
||||
greywall -- curl -s https://example.com # Use -- to separate flags
|
||||
greywall -c "echo hello && ls" # Run with shell expansion
|
||||
greywall --settings config.json npm install
|
||||
greywall -p 3000 -c "npm run dev" # Expose port 3000
|
||||
greywall --learning -- opencode # Learn filesystem needs
|
||||
|
||||
Configuration file format:
|
||||
{
|
||||
"network": {
|
||||
"proxyUrl": "socks5://localhost:1080"
|
||||
},
|
||||
"filesystem": {
|
||||
"denyRead": [],
|
||||
"allowWrite": ["."],
|
||||
"denyWrite": []
|
||||
},
|
||||
"command": {
|
||||
"deny": ["git push", "npm publish"]
|
||||
}
|
||||
}`,
|
||||
RunE: runCommand,
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
Args: cobra.ArbitraryArgs,
|
||||
}
|
||||
|
||||
rootCmd.Flags().BoolVarP(&debug, "debug", "d", false, "Enable debug logging")
|
||||
rootCmd.Flags().BoolVarP(&monitor, "monitor", "m", false, "Monitor and log sandbox violations")
|
||||
rootCmd.Flags().StringVarP(&settingsPath, "settings", "s", "", "Path to settings file (default: OS config directory)")
|
||||
rootCmd.Flags().StringVar(&proxyURL, "proxy", "", "External SOCKS5 proxy URL (default: socks5://localhost:42052)")
|
||||
rootCmd.Flags().StringVar(&httpProxyURL, "http-proxy", "", "HTTP CONNECT proxy URL (default: http://localhost:42051)")
|
||||
rootCmd.Flags().StringVar(&dnsAddr, "dns", "", "DNS server address on host (default: localhost:42053)")
|
||||
rootCmd.Flags().StringVarP(&cmdString, "c", "c", "", "Run command string directly (like sh -c)")
|
||||
rootCmd.Flags().StringArrayVarP(&exposePorts, "port", "p", nil, "Expose port for inbound connections (can be used multiple times)")
|
||||
rootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "Show version information")
|
||||
rootCmd.Flags().BoolVar(&linuxFeatures, "linux-features", false, "Show available Linux security features and exit")
|
||||
rootCmd.Flags().BoolVar(&learning, "learning", false, "Run in learning mode: trace filesystem access and generate a config template")
|
||||
rootCmd.Flags().StringVar(&templateName, "template", "", "Load a specific learned template by name (see: greywall templates list)")
|
||||
|
||||
rootCmd.Flags().SetInterspersed(true)
|
||||
|
||||
rootCmd.AddCommand(newCompletionCmd(rootCmd))
|
||||
rootCmd.AddCommand(newTemplatesCmd())
|
||||
rootCmd.AddCommand(newDaemonCmd())
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
exitCode = 1
|
||||
}
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func runCommand(cmd *cobra.Command, args []string) error {
|
||||
if showVersion {
|
||||
fmt.Printf("greywall - lightweight, container-free sandbox for running untrusted commands\n")
|
||||
fmt.Printf(" Version: %s\n", version)
|
||||
fmt.Printf(" Built: %s\n", buildTime)
|
||||
fmt.Printf(" Commit: %s\n", gitCommit)
|
||||
sandbox.PrintDependencyStatus()
|
||||
return nil
|
||||
}
|
||||
|
||||
if linuxFeatures {
|
||||
sandbox.PrintLinuxFeatures()
|
||||
return nil
|
||||
}
|
||||
|
||||
var command string
|
||||
switch {
|
||||
case cmdString != "":
|
||||
command = cmdString
|
||||
case len(args) > 0:
|
||||
command = sandbox.ShellQuote(args)
|
||||
default:
|
||||
return fmt.Errorf("no command specified. Use -c <command> or provide command arguments")
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Command: %s\n", command)
|
||||
}
|
||||
|
||||
var ports []int
|
||||
for _, p := range exposePorts {
|
||||
port, err := strconv.Atoi(p)
|
||||
if err != nil || port < 1 || port > 65535 {
|
||||
return fmt.Errorf("invalid port: %s", p)
|
||||
}
|
||||
ports = append(ports, port)
|
||||
}
|
||||
|
||||
if debug && len(ports) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Exposing ports: %v\n", ports)
|
||||
}
|
||||
|
||||
// Load config: settings file > default path > default config
|
||||
var cfg *config.Config
|
||||
var err error
|
||||
|
||||
switch {
|
||||
case settingsPath != "":
|
||||
cfg, err = config.Load(settingsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
default:
|
||||
configPath := config.DefaultConfigPath()
|
||||
cfg, err = config.Load(configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
if cfg == nil {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] No config found at %s, using default (block all network)\n", configPath)
|
||||
}
|
||||
cfg = config.Default()
|
||||
}
|
||||
}
|
||||
|
||||
// Extract command name for learned template lookup
|
||||
cmdName := extractCommandName(args, cmdString)
|
||||
|
||||
// Load learned template (when NOT in learning mode)
|
||||
if !learning {
|
||||
// Determine which template to load: --template flag takes priority
|
||||
var templatePath string
|
||||
var templateLabel string
|
||||
if templateName != "" {
|
||||
templatePath = sandbox.LearnedTemplatePath(templateName)
|
||||
templateLabel = templateName
|
||||
} else if cmdName != "" {
|
||||
templatePath = sandbox.LearnedTemplatePath(cmdName)
|
||||
templateLabel = cmdName
|
||||
}
|
||||
|
||||
if templatePath != "" {
|
||||
learnedCfg, loadErr := config.Load(templatePath)
|
||||
switch {
|
||||
case loadErr != nil:
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Warning: failed to load learned template: %v\n", loadErr)
|
||||
}
|
||||
case learnedCfg != nil:
|
||||
cfg = config.Merge(cfg, learnedCfg)
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Auto-loaded learned template for %q\n", templateLabel)
|
||||
}
|
||||
case templateName != "":
|
||||
// Explicit --template but file doesn't exist
|
||||
return fmt.Errorf("learned template %q not found at %s\nRun: greywall templates list", templateName, templatePath)
|
||||
case cmdName != "":
|
||||
// No template found for this command - suggest creating one
|
||||
fmt.Fprintf(os.Stderr, "[greywall] No learned template for %q. Run with --learning to create one.\n", cmdName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CLI flags override config
|
||||
if proxyURL != "" {
|
||||
cfg.Network.ProxyURL = proxyURL
|
||||
}
|
||||
if httpProxyURL != "" {
|
||||
cfg.Network.HTTPProxyURL = httpProxyURL
|
||||
}
|
||||
if dnsAddr != "" {
|
||||
cfg.Network.DnsAddr = dnsAddr
|
||||
}
|
||||
|
||||
// GreyHaven defaults: when no proxy or DNS is configured (neither via CLI
|
||||
// nor config file), use the standard GreyHaven infrastructure ports.
|
||||
if cfg.Network.ProxyURL == "" {
|
||||
cfg.Network.ProxyURL = "socks5://localhost:42052"
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Defaulting proxy to socks5://localhost:42052\n")
|
||||
}
|
||||
}
|
||||
if cfg.Network.HTTPProxyURL == "" {
|
||||
cfg.Network.HTTPProxyURL = "http://localhost:42051"
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Defaulting HTTP proxy to http://localhost:42051\n")
|
||||
}
|
||||
}
|
||||
if cfg.Network.DnsAddr == "" {
|
||||
cfg.Network.DnsAddr = "localhost:42053"
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Defaulting DNS to localhost:42053\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-inject proxy credentials so the proxy can identify the sandboxed command.
|
||||
// - If a command name is available, use it as the username with "proxy" as password.
|
||||
// - If no command name, default to "proxy:proxy" (required by gost for auth).
|
||||
// This always overrides any existing credentials in the URL.
|
||||
if cfg.Network.ProxyURL != "" {
|
||||
if u, err := url.Parse(cfg.Network.ProxyURL); err == nil {
|
||||
proxyUser := "proxy"
|
||||
if cmdName != "" {
|
||||
proxyUser = cmdName
|
||||
}
|
||||
u.User = url.UserPassword(proxyUser, "proxy")
|
||||
cfg.Network.ProxyURL = u.String()
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Auto-set proxy credentials to %q:proxy\n", proxyUser)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Learning mode setup
|
||||
if learning {
|
||||
if err := sandbox.CheckLearningAvailable(); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Learning mode: tracing filesystem access for %q\n", cmdName)
|
||||
fmt.Fprintf(os.Stderr, "[greywall] WARNING: The sandbox filesystem is relaxed during learning. Do not use for untrusted code.\n")
|
||||
}
|
||||
|
||||
manager := sandbox.NewManager(cfg, debug, monitor)
|
||||
manager.SetExposedPorts(ports)
|
||||
if learning {
|
||||
manager.SetLearning(true)
|
||||
manager.SetCommandName(cmdName)
|
||||
}
|
||||
defer manager.Cleanup()
|
||||
|
||||
if err := manager.Initialize(); err != nil {
|
||||
return fmt.Errorf("failed to initialize sandbox: %w", err)
|
||||
}
|
||||
|
||||
var logMonitor *sandbox.LogMonitor
|
||||
if monitor {
|
||||
logMonitor = sandbox.NewLogMonitor(sandbox.GetSessionSuffix())
|
||||
if logMonitor != nil {
|
||||
if err := logMonitor.Start(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Warning: failed to start log monitor: %v\n", err)
|
||||
} else {
|
||||
defer logMonitor.Stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sandboxedCommand, err := manager.WrapCommand(command)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wrap command: %w", err)
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Sandboxed command: %s\n", sandboxedCommand)
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Executing: sh -c %q\n", sandboxedCommand)
|
||||
}
|
||||
|
||||
hardenedEnv := sandbox.GetHardenedEnv()
|
||||
if debug {
|
||||
if stripped := sandbox.GetStrippedEnvVars(os.Environ()); len(stripped) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Stripped dangerous env vars: %v\n", stripped)
|
||||
}
|
||||
}
|
||||
|
||||
execCmd := exec.Command("sh", "-c", sandboxedCommand) //nolint:gosec // sandboxedCommand is constructed from user input - intentional
|
||||
execCmd.Env = hardenedEnv
|
||||
execCmd.Stdin = os.Stdin
|
||||
execCmd.Stdout = os.Stdout
|
||||
execCmd.Stderr = os.Stderr
|
||||
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Start the command (non-blocking) so we can get the PID
|
||||
if err := execCmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start command: %w", err)
|
||||
}
|
||||
|
||||
// Record root PID for macOS learning mode (eslogger uses this for process tree tracking)
|
||||
if learning && platform.Detect() == platform.MacOS && execCmd.Process != nil {
|
||||
manager.SetLearningRootPID(execCmd.Process.Pid)
|
||||
}
|
||||
|
||||
// Start Linux monitors (eBPF tracing for filesystem violations)
|
||||
var linuxMonitors *sandbox.LinuxMonitors
|
||||
if monitor && execCmd.Process != nil {
|
||||
linuxMonitors, _ = sandbox.StartLinuxMonitor(execCmd.Process.Pid, sandbox.LinuxSandboxOptions{
|
||||
Monitor: true,
|
||||
Debug: debug,
|
||||
UseEBPF: true,
|
||||
})
|
||||
if linuxMonitors != nil {
|
||||
defer linuxMonitors.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
sigCount := 0
|
||||
for sig := range sigChan {
|
||||
sigCount++
|
||||
if execCmd.Process == nil {
|
||||
continue
|
||||
}
|
||||
// First signal: graceful termination; second signal: force kill
|
||||
if sigCount >= 2 {
|
||||
_ = execCmd.Process.Kill()
|
||||
} else {
|
||||
_ = execCmd.Process.Signal(sig)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for command to finish
|
||||
commandFailed := false
|
||||
if err := execCmd.Wait(); err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
// Set exit code but don't os.Exit() here - let deferred cleanup run
|
||||
exitCode = exitErr.ExitCode()
|
||||
commandFailed = true
|
||||
} else {
|
||||
return fmt.Errorf("command failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate learned template after command completes successfully.
|
||||
// Skip template generation if the command failed — the strace trace
|
||||
// is likely incomplete and would produce an unreliable template.
|
||||
if learning && manager.IsLearning() {
|
||||
if commandFailed {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Skipping template generation: command exited with code %d\n", exitCode)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Analyzing filesystem access patterns...\n")
|
||||
templatePath, genErr := manager.GenerateLearnedTemplate(cmdName)
|
||||
if genErr != nil {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Warning: failed to generate template: %v\n", genErr)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Template saved to: %s\n", templatePath)
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Next run will auto-load this template.\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractCommandName extracts a human-readable command name from the arguments.
|
||||
// For args like ["opencode"], returns "opencode".
|
||||
// For -c "opencode --foo", returns "opencode".
|
||||
// Strips path prefixes (e.g., /usr/bin/opencode -> opencode).
|
||||
func extractCommandName(args []string, cmdStr string) string {
|
||||
var name string
|
||||
switch {
|
||||
case len(args) > 0:
|
||||
name = args[0]
|
||||
case cmdStr != "":
|
||||
// Take first token from the command string
|
||||
parts := strings.Fields(cmdStr)
|
||||
if len(parts) > 0 {
|
||||
name = parts[0]
|
||||
}
|
||||
}
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
// Strip path prefix
|
||||
return filepath.Base(name)
|
||||
}
|
||||
|
||||
// newCompletionCmd creates the completion subcommand for shell completions.
|
||||
func newCompletionCmd(rootCmd *cobra.Command) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "completion [bash|zsh|fish|powershell]",
|
||||
Short: "Generate shell completion scripts",
|
||||
Long: `Generate shell completion scripts for greywall.
|
||||
|
||||
Examples:
|
||||
# Bash (load in current session)
|
||||
source <(greywall completion bash)
|
||||
|
||||
# Zsh (load in current session)
|
||||
source <(greywall completion zsh)
|
||||
|
||||
# Fish (load in current session)
|
||||
greywall completion fish | source
|
||||
|
||||
# PowerShell (load in current session)
|
||||
greywall completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
To persist completions, redirect output to the appropriate completions
|
||||
directory for your shell (e.g., /etc/bash_completion.d/ for bash,
|
||||
${fpath[1]}/_greywall for zsh, ~/.config/fish/completions/greywall.fish for fish).
|
||||
`,
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
switch args[0] {
|
||||
case "bash":
|
||||
return rootCmd.GenBashCompletionV2(os.Stdout, true)
|
||||
case "zsh":
|
||||
return rootCmd.GenZshCompletion(os.Stdout)
|
||||
case "fish":
|
||||
return rootCmd.GenFishCompletion(os.Stdout, true)
|
||||
case "powershell":
|
||||
return rootCmd.GenPowerShellCompletionWithDesc(os.Stdout)
|
||||
default:
|
||||
return fmt.Errorf("unsupported shell: %s", args[0])
|
||||
}
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// newTemplatesCmd creates the templates subcommand for managing learned templates.
|
||||
func newTemplatesCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "templates",
|
||||
Short: "Manage learned sandbox templates",
|
||||
Long: `List and inspect learned sandbox templates.
|
||||
|
||||
Templates are created by running greywall with --learning and are stored in:
|
||||
` + sandbox.LearnedTemplateDir() + `
|
||||
|
||||
Examples:
|
||||
greywall templates list # List all learned templates
|
||||
greywall templates show opencode # Show the content of a template`,
|
||||
}
|
||||
|
||||
listCmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all learned templates",
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
templates, err := sandbox.ListLearnedTemplates()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list templates: %w", err)
|
||||
}
|
||||
if len(templates) == 0 {
|
||||
fmt.Println("No learned templates found.")
|
||||
fmt.Printf("Create one with: greywall --learning -- <command>\n")
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("Learned templates (%s):\n\n", sandbox.LearnedTemplateDir())
|
||||
for _, t := range templates {
|
||||
fmt.Printf(" %s\n", t.Name)
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("Show a template: greywall templates show <name>")
|
||||
fmt.Println("Use a template: greywall --template <name> -- <command>")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
showCmd := &cobra.Command{
|
||||
Use: "show <name>",
|
||||
Short: "Show the content of a learned template",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
name := args[0]
|
||||
templatePath := sandbox.LearnedTemplatePath(name)
|
||||
data, err := os.ReadFile(templatePath) //nolint:gosec // user-specified template path - intentional
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("template %q not found\nRun: greywall templates list", name)
|
||||
}
|
||||
return fmt.Errorf("failed to read template: %w", err)
|
||||
}
|
||||
fmt.Printf("Template: %s\n", name)
|
||||
fmt.Printf("Path: %s\n\n", templatePath)
|
||||
fmt.Print(string(data))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(listCmd, showCmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// runLandlockWrapper runs in "wrapper mode" inside the sandbox.
|
||||
// It applies Landlock restrictions and then execs the user command.
|
||||
// Usage: greywall --landlock-apply [--debug] -- <command...>
|
||||
// Config is passed via GREYWALL_CONFIG_JSON environment variable.
|
||||
func runLandlockWrapper() {
|
||||
// Parse arguments: --landlock-apply [--debug] -- <command...>
|
||||
args := os.Args[2:] // Skip "greywall" and "--landlock-apply"
|
||||
|
||||
var debugMode bool
|
||||
var cmdStart int
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
switch args[i] {
|
||||
case "--debug":
|
||||
debugMode = true
|
||||
case "--":
|
||||
cmdStart = i + 1
|
||||
goto parseCommand
|
||||
default:
|
||||
// Assume rest is the command
|
||||
cmdStart = i
|
||||
goto parseCommand
|
||||
}
|
||||
}
|
||||
|
||||
parseCommand:
|
||||
if cmdStart >= len(args) {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock-wrapper] Error: no command specified\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
command := args[cmdStart:]
|
||||
|
||||
if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock-wrapper] Applying Landlock restrictions\n")
|
||||
}
|
||||
|
||||
// Only apply Landlock on Linux
|
||||
if platform.Detect() == platform.Linux {
|
||||
// Load config from environment variable (passed by parent greywall process)
|
||||
var cfg *config.Config
|
||||
if configJSON := os.Getenv("GREYWALL_CONFIG_JSON"); configJSON != "" {
|
||||
cfg = &config.Config{}
|
||||
if err := json.Unmarshal([]byte(configJSON), cfg); err != nil {
|
||||
if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock-wrapper] Warning: failed to parse config: %v\n", err)
|
||||
}
|
||||
cfg = nil
|
||||
}
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = config.Default()
|
||||
}
|
||||
|
||||
// Get current working directory for relative path resolution
|
||||
cwd, _ := os.Getwd()
|
||||
|
||||
// Apply Landlock restrictions
|
||||
err := sandbox.ApplyLandlockFromConfig(cfg, cwd, nil, debugMode)
|
||||
if err != nil {
|
||||
if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock-wrapper] Warning: Landlock not applied: %v\n", err)
|
||||
}
|
||||
// Continue without Landlock - bwrap still provides isolation
|
||||
} else if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock-wrapper] Landlock restrictions applied\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Find the executable
|
||||
execPath, err := exec.LookPath(command[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock-wrapper] Error: command not found: %s\n", command[0]) //nolint:gosec // logging to stderr, not web output
|
||||
os.Exit(127)
|
||||
}
|
||||
|
||||
if debugMode {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock-wrapper] Exec: %s %v\n", execPath, command[1:]) //nolint:gosec // logging to stderr, not web output
|
||||
}
|
||||
|
||||
// Sanitize environment (strips LD_PRELOAD, etc.)
|
||||
hardenedEnv := sandbox.FilterDangerousEnv(os.Environ())
|
||||
|
||||
// Exec the command (replaces this process)
|
||||
err = syscall.Exec(execPath, command, hardenedEnv) //nolint:gosec
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock-wrapper] Exec failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,26 +1,26 @@
|
||||
# Fence Documentation
|
||||
# Greywall Documentation
|
||||
|
||||
Fence is a sandboxing tool that restricts network and filesystem access for arbitrary commands. It's most useful for running semi-trusted code (package installs, build scripts, CI jobs, unfamiliar repos) with controlled side effects.
|
||||
Greywall is a sandboxing tool that restricts network and filesystem access for arbitrary commands. It's most useful for running semi-trusted code (package installs, build scripts, CI jobs, unfamiliar repos) with controlled side effects.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- [Quickstart](quickstart.md) - Install fence and run your first sandboxed command in 5 minutes
|
||||
- [Why Fence](why-fence.md) - What problem it solves (and what it doesn't)
|
||||
- [Quickstart](quickstart.md) - Install greywall and run your first sandboxed command in 5 minutes
|
||||
- [Why Greywall](why-greywall.md) - What problem it solves (and what it doesn't)
|
||||
|
||||
## Guides
|
||||
|
||||
- [Concepts](concepts.md) - Mental model: OS sandbox + local proxies + config
|
||||
- [Troubleshooting](troubleshooting.md) - Common failure modes and fixes
|
||||
- [Using Fence with AI agents](agents.md) - Defense-in-depth and policy standardization
|
||||
- [Using Greywall with AI agents](agents.md) - Defense-in-depth and policy standardization
|
||||
- [Recipes](recipes/README.md) - Common workflows (npm/pip/git/CI)
|
||||
- [Templates](./templates.md) - Copy/paste templates you can start from
|
||||
|
||||
## Reference
|
||||
|
||||
- [README](../README.md) - CLI usage
|
||||
- [Library Usage (Go)](library.md) - Using Fence as a Go package
|
||||
- [Configuration](./configuration.md) - How to configure Fence
|
||||
- [Architecture](../ARCHITECTURE.md) - How fence works under the hood
|
||||
- [Library Usage (Go)](library.md) - Using Greywall as a Go package
|
||||
- [Configuration](./configuration.md) - How to configure Greywall
|
||||
- [Architecture](../ARCHITECTURE.md) - How greywall works under the hood
|
||||
- [Security model](security-model.md) - Threat model, guarantees, and limitations
|
||||
- [Linux security features](linux-security-features.md) - Landlock, seccomp, eBPF details and fallback behavior
|
||||
- [Testing](testing.md) - How to run tests and write new ones
|
||||
@@ -36,20 +36,20 @@ See [`examples/`](../examples/README.md) for runnable demos.
|
||||
|
||||
```bash
|
||||
# Block all network (default)
|
||||
fence <command>
|
||||
greywall <command>
|
||||
|
||||
# Use custom config
|
||||
fence --settings ./fence.json <command>
|
||||
greywall --settings ./greywall.json <command>
|
||||
|
||||
# Debug mode (verbose output)
|
||||
fence -d <command>
|
||||
greywall -d <command>
|
||||
|
||||
# Monitor mode (show blocked requests)
|
||||
fence -m <command>
|
||||
greywall -m <command>
|
||||
|
||||
# Expose port for servers
|
||||
fence -p 3000 <command>
|
||||
greywall -p 3000 <command>
|
||||
|
||||
# Run shell command
|
||||
fence -c "echo hello && ls"
|
||||
greywall -c "echo hello && ls"
|
||||
```
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Using Fence with AI Agents
|
||||
# Using Greywall with AI Agents
|
||||
|
||||
Many popular coding agents already include sandboxing. Fence can still be useful when you want a tool-agnostic policy layer that works the same way across:
|
||||
Many popular coding agents already include sandboxing. Greywall can still be useful when you want a tool-agnostic policy layer that works the same way across:
|
||||
|
||||
- local developer machines
|
||||
- CI jobs
|
||||
@@ -15,7 +15,7 @@ Treat an agent as "semi-trusted automation":
|
||||
- Allowlist only the network destinations you actually need
|
||||
- Use `-m` (monitor mode) to audit blocked attempts and tighten policy
|
||||
|
||||
Fence can also reduce the risk of running agents with fewer interactive permission prompts (e.g. "skip permissions"), as long as your Fence config tightly scopes writes and outbound destinations. It's defense-in-depth, not a substitute for the agent's own safeguards.
|
||||
Greywall can also reduce the risk of running agents with fewer interactive permission prompts (e.g. "skip permissions"), as long as your Greywall config tightly scopes writes and outbound destinations. It's defense-in-depth, not a substitute for the agent's own safeguards.
|
||||
|
||||
## Example: API-only agent
|
||||
|
||||
@@ -33,7 +33,7 @@ Fence can also reduce the risk of running agents with fewer interactive permissi
|
||||
Run:
|
||||
|
||||
```bash
|
||||
fence --settings ./fence.json <agent-command>
|
||||
greywall --settings ./greywall.json <agent-command>
|
||||
```
|
||||
|
||||
## Popular CLI coding agents
|
||||
@@ -43,7 +43,7 @@ We provide these template for guardrailing CLI coding agents:
|
||||
- [`code`](/internal/templates/code.json) - Strict deny-by-default network filtering via proxy. Works with agents that respect `HTTP_PROXY`. Blocks cloud metadata APIs, protects secrets, restricts dangerous commands.
|
||||
- [`code-relaxed`](/internal/templates/code-relaxed.json) - Allows direct network connections for agents that ignore `HTTP_PROXY`. Same filesystem/command protections as `code`, but `deniedDomains` only enforced for proxy-respecting apps.
|
||||
|
||||
You can use it like `fence -t code -- claude`.
|
||||
You can use it like `greywall -t code -- claude`.
|
||||
|
||||
| Agent | Works with template | Notes |
|
||||
|-------|--------| ----- |
|
||||
@@ -60,7 +60,7 @@ Note: On Linux, if OpenCode or Gemini CLI is installed via Linuxbrew, Landlock c
|
||||
|
||||
## Protecting your environment
|
||||
|
||||
Fence includes additional "dangerous file protection" (writes blocked regardless of config) to reduce persistence and environment-tampering vectors like:
|
||||
Greywall includes additional "dangerous file protection" (writes blocked regardless of config) to reduce persistence and environment-tampering vectors like:
|
||||
|
||||
- `.git/hooks/*`
|
||||
- shell startup files (`.zshrc`, `.bashrc`, etc.)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Benchmarking
|
||||
|
||||
This document describes how to run, interpret, and compare sandbox performance benchmarks for Fence.
|
||||
This document describes how to run, interpret, and compare sandbox performance benchmarks for Greywall.
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -29,9 +29,9 @@ go test -run=^$ -bench=. -benchmem ./internal/sandbox/...
|
||||
|
||||
### Layer 1: CLI Benchmarks (`scripts/benchmark.sh`)
|
||||
|
||||
**What it measures**: Real-world agent cost - full `fence` invocation including proxy startup, socat bridges (Linux), and sandbox-exec/bwrap setup.
|
||||
**What it measures**: Real-world agent cost - full `greywall` invocation including proxy startup, socat bridges (Linux), and sandbox-exec/bwrap setup.
|
||||
|
||||
This is the most realistic benchmark for understanding the cost of running agent commands through Fence.
|
||||
This is the most realistic benchmark for understanding the cost of running agent commands through Greywall.
|
||||
|
||||
```bash
|
||||
# Full benchmark suite
|
||||
@@ -51,7 +51,7 @@ This is the most realistic benchmark for understanding the cost of running agent
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `-b, --binary PATH` | Path to fence binary (default: ./fence) |
|
||||
| `-b, --binary PATH` | Path to greywall binary (default: ./greywall) |
|
||||
| `-o, --output DIR` | Output directory (default: ./benchmarks) |
|
||||
| `-n, --runs N` | Minimum runs per benchmark (default: 30) |
|
||||
| `-q, --quick` | Quick mode: fewer runs, skip slow benchmarks |
|
||||
@@ -92,13 +92,13 @@ benchstat bench.txt
|
||||
|
||||
```bash
|
||||
# Quick syscall cost breakdown
|
||||
strace -f -c ./fence -- true
|
||||
strace -f -c ./greywall -- true
|
||||
|
||||
# Context switches, page faults
|
||||
perf stat -- ./fence -- true
|
||||
perf stat -- ./greywall -- true
|
||||
|
||||
# Full profiling (flamegraph-ready)
|
||||
perf record -F 99 -g -- ./fence -- git status
|
||||
perf record -F 99 -g -- ./greywall -- git status
|
||||
perf report
|
||||
```
|
||||
|
||||
@@ -106,10 +106,10 @@ perf report
|
||||
|
||||
```bash
|
||||
# Time Profiler via Instruments
|
||||
xcrun xctrace record --template 'Time Profiler' --launch -- ./fence -- true
|
||||
xcrun xctrace record --template 'Time Profiler' --launch -- ./greywall -- true
|
||||
|
||||
# Quick call-stack snapshot
|
||||
./fence -- sleep 5 &
|
||||
./greywall -- sleep 5 &
|
||||
sample $! 5 -file sample.txt
|
||||
```
|
||||
|
||||
@@ -150,7 +150,7 @@ The overhead factor decreases as the actual workload increases (because sandbox
|
||||
|
||||
1. Run benchmarks on each platform independently
|
||||
2. Compare overhead factors, not absolute times
|
||||
3. Use the same fence version and workloads
|
||||
3. Use the same greywall version and workloads
|
||||
|
||||
```bash
|
||||
# On macOS
|
||||
@@ -256,7 +256,7 @@ Linux initialization is ~3,700x slower because it must:
|
||||
|
||||
macOS only generates a Seatbelt profile string (very cheap).
|
||||
|
||||
### Cold Start Overhead (one `fence` invocation per command)
|
||||
### Cold Start Overhead (one `greywall` invocation per command)
|
||||
|
||||
| Workload | Linux | macOS |
|
||||
|----------|-------|-------|
|
||||
@@ -264,7 +264,7 @@ macOS only generates a Seatbelt profile string (very cheap).
|
||||
| Python | 124 ms | 33 ms |
|
||||
| Git status | 114 ms | 25 ms |
|
||||
|
||||
This is the realistic cost for scripts running `fence -c "command"` repeatedly.
|
||||
This is the realistic cost for scripts running `greywall -c "command"` repeatedly.
|
||||
|
||||
### Warm Path Overhead (pre-initialized manager)
|
||||
|
||||
@@ -289,9 +289,9 @@ Overhead decreases as the actual workload increases (sandbox setup is fixed cost
|
||||
|
||||
## Impact on Agent Usage
|
||||
|
||||
### Long-Running Agents (`fence claude`, `fence codex`)
|
||||
### Long-Running Agents (`greywall claude`, `greywall codex`)
|
||||
|
||||
For agents that run as a child process under fence:
|
||||
For agents that run as a child process under greywall:
|
||||
|
||||
| Phase | Cost |
|
||||
|-------|------|
|
||||
@@ -306,11 +306,11 @@ Child processes inherit the sandbox - no re-initialization, no WrapCommand overh
|
||||
| `git status` | 2.1 ms | 5.9 ms |
|
||||
| Python script | 11 ms | 15 ms |
|
||||
|
||||
**Bottom line**: For `fence <agent>` usage, sandbox overhead is a one-time startup cost. Tool calls inside the agent run at native speed.
|
||||
**Bottom line**: For `greywall <agent>` usage, sandbox overhead is a one-time startup cost. Tool calls inside the agent run at native speed.
|
||||
|
||||
### Per-Command Invocation (`fence -c "command"`)
|
||||
### Per-Command Invocation (`greywall -c "command"`)
|
||||
|
||||
For scripts or CI running fence per command:
|
||||
For scripts or CI running greywall per command:
|
||||
|
||||
| Session | Linux Cost | macOS Cost |
|
||||
|---------|------------|------------|
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
# Concepts
|
||||
|
||||
Fence combines two ideas:
|
||||
Greywall combines two ideas:
|
||||
|
||||
1. **An OS sandbox** to enforce "no direct network" and restrict filesystem operations.
|
||||
2. **Local filtering proxies** (HTTP + SOCKS5) to selectively allow outbound traffic by domain.
|
||||
|
||||
## Network model
|
||||
|
||||
By default, fence blocks all outbound network access.
|
||||
By default, greywall blocks all outbound network access.
|
||||
|
||||
When you allow domains, fence:
|
||||
When you allow domains, greywall:
|
||||
|
||||
- Starts local HTTP and SOCKS5 proxies
|
||||
- Sets proxy environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, `ALL_PROXY`)
|
||||
@@ -29,13 +29,13 @@ These are separate on purpose. A typical safe default for dev servers is:
|
||||
|
||||
## Filesystem model
|
||||
|
||||
Fence is designed around "read mostly, write narrowly":
|
||||
Greywall is designed around "read mostly, write narrowly":
|
||||
|
||||
- **Reads**: allowed by default (you can block specific paths via `denyRead`).
|
||||
- **Writes**: denied by default (you must opt-in with `allowWrite`).
|
||||
- **denyWrite**: overrides `allowWrite` (useful for protecting secrets and dangerous files).
|
||||
|
||||
Fence also protects some dangerous targets regardless of config (e.g. shell startup files and git hooks). See `ARCHITECTURE.md` for the full list.
|
||||
Greywall also protects some dangerous targets regardless of config (e.g. shell startup files and git hooks). See `ARCHITECTURE.md` for the full list.
|
||||
|
||||
## Debug vs Monitor mode
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Configuration
|
||||
|
||||
Fence reads settings from `~/.config/fence/fence.json` by default (or `~/Library/Application Support/fence/fence.json` on macOS). Legacy `~/.fence.json` is also supported. Pass `--settings ./fence.json` to use a custom path. Config files support JSONC.
|
||||
Greywall reads settings from `~/.config/greywall/greywall.json` by default (or `~/Library/Application Support/greywall/greywall.json` on macOS). Legacy `~/.greywall.json` is also supported. Pass `--settings ./greywall.json` to use a custom path. Config files support JSONC.
|
||||
|
||||
Example config:
|
||||
|
||||
@@ -60,7 +60,7 @@ You can also extend other config files using absolute or relative paths:
|
||||
|
||||
```json
|
||||
{
|
||||
"extends": "/etc/fence/company-base.json",
|
||||
"extends": "/etc/greywall/company-base.json",
|
||||
"filesystem": {
|
||||
"denyRead": ["~/company-secrets/**"]
|
||||
}
|
||||
@@ -143,7 +143,7 @@ Example:
|
||||
|
||||
### Default Denied Commands
|
||||
|
||||
When `useDefaults` is `true` (the default), fence blocks these dangerous commands:
|
||||
When `useDefaults` is `true` (the default), greywall blocks these dangerous commands:
|
||||
|
||||
- System control: `shutdown`, `reboot`, `halt`, `poweroff`, `init 0/6`
|
||||
- Kernel manipulation: `insmod`, `rmmod`, `modprobe`, `kexec`
|
||||
@@ -155,7 +155,7 @@ To disable defaults: `"useDefaults": false`
|
||||
|
||||
### Command Detection
|
||||
|
||||
Fence detects blocked commands in:
|
||||
Greywall detects blocked commands in:
|
||||
|
||||
- Direct commands: `git push origin main`
|
||||
- Command chains: `ls && git push` or `ls; git push`
|
||||
@@ -260,26 +260,26 @@ SSH host patterns support wildcards anywhere:
|
||||
|
||||
## Importing from Claude Code
|
||||
|
||||
If you've been using Claude Code and have already built up permission rules, you can import them into fence:
|
||||
If you've been using Claude Code and have already built up permission rules, you can import them into greywall:
|
||||
|
||||
```bash
|
||||
# Preview import (prints JSON to stdout)
|
||||
fence import --claude
|
||||
greywall import --claude
|
||||
|
||||
# Save to the default config path
|
||||
fence import --claude --save
|
||||
greywall import --claude --save
|
||||
|
||||
# Import from a specific file
|
||||
fence import --claude -f ~/.claude/settings.json --save
|
||||
greywall import --claude -f ~/.claude/settings.json --save
|
||||
|
||||
# Save to a specific output file
|
||||
fence import --claude -o ./fence.json
|
||||
greywall import --claude -o ./greywall.json
|
||||
|
||||
# Import without extending any template (minimal config)
|
||||
fence import --claude --no-extend --save
|
||||
greywall import --claude --no-extend --save
|
||||
|
||||
# Import and extend a different template
|
||||
fence import --claude --extend local-dev-server --save
|
||||
greywall import --claude --extend local-dev-server --save
|
||||
```
|
||||
|
||||
### Default Template
|
||||
@@ -294,7 +294,7 @@ Use `--no-extend` if you want a minimal config without these defaults, or `--ext
|
||||
|
||||
### Permission Mapping
|
||||
|
||||
| Claude Code | Fence |
|
||||
| Claude Code | Greywall |
|
||||
|-------------|-------|
|
||||
| `Bash(xyz)` allow | `command.allow: ["xyz"]` |
|
||||
| `Bash(xyz:*)` deny | `command.deny: ["xyz"]` |
|
||||
@@ -302,9 +302,9 @@ Use `--no-extend` if you want a minimal config without these defaults, or `--ext
|
||||
| `Write(path)` allow | `filesystem.allowWrite: [path]` |
|
||||
| `Write(path)` deny | `filesystem.denyWrite: [path]` |
|
||||
| `Edit(path)` | Same as `Write(path)` |
|
||||
| `ask` rules | Converted to deny (fence doesn't support interactive prompts) |
|
||||
| `ask` rules | Converted to deny (greywall doesn't support interactive prompts) |
|
||||
|
||||
Global tool permissions (e.g., bare `Read`, `Write`, `Grep`) are skipped since fence uses path/command-based rules.
|
||||
Global tool permissions (e.g., bare `Read`, `Write`, `Grep`) are skipped since greywall uses path/command-based rules.
|
||||
|
||||
## See Also
|
||||
|
||||
|
||||
121
docs/experience.md
Normal file
121
docs/experience.md
Normal file
@@ -0,0 +1,121 @@
|
||||
# Greywall Development Notes
|
||||
|
||||
Lessons learned and issues encountered during development.
|
||||
|
||||
---
|
||||
|
||||
## strace log hidden by tmpfs mount ordering
|
||||
|
||||
**Problem:** Learning mode strace log was always empty ("No additional write paths discovered"). The log file was bind-mounted into `/tmp/greywall-strace-*.log` inside the sandbox, but `--tmpfs /tmp` was declared later in the bwrap args, creating a fresh tmpfs that hid the bind-mount.
|
||||
|
||||
**Fix:** Move the strace log bind-mount to AFTER `--tmpfs /tmp` in the bwrap argument list. Later mounts override earlier ones for the same path.
|
||||
|
||||
---
|
||||
|
||||
## strace -f hangs on long-lived child processes
|
||||
|
||||
**Problem:** `greywall --learning -- opencode` would hang after exiting opencode. `strace -f` follows forked children and waits for ALL of them to exit. Apps like opencode spawn LSP servers, file watchers, etc. that outlive the main process.
|
||||
|
||||
**Approach 1 - Attach via strace -p:** Run the command in the background, attach strace with `-p PID`. Failed because bwrap restricts `ptrace(PTRACE_SEIZE)` — ptrace only works parent-to-child, not for attaching to arbitrary processes.
|
||||
|
||||
**Approach 2 - Background monitor:** Run `strace -- command &` and spawn a monitor subshell that polls `/proc/STRACE_PID/task/STRACE_PID/children`. When strace's direct child (the main command) exits, the children file becomes empty — grandchildren are reparented to PID 1, not strace. Monitor then kills strace.
|
||||
|
||||
**Fix:** Approach 2 with two additional fixes:
|
||||
- Added `-I2` flag to strace. Default `-I3` (used when `-o FILE PROG`) blocks all fatal signals, so the monitor's `kill` was silently ignored.
|
||||
- Added `kill -TERM -1` after strace exits to clean up orphaned processes. Without this, orphans inherit stdout/stderr pipe FDs, and Go's `cmd.Wait()` blocks until they close.
|
||||
|
||||
---
|
||||
|
||||
## UDP DNS doesn't work through tun2socks
|
||||
|
||||
**Problem:** DNS resolution failed inside the sandbox. The socat DNS relay converted UDP DNS queries to UDP and sent them to 1.1.1.1:53 through tun2socks, but tun2socks (v2.5.2) doesn't reliably handle UDP DNS forwarding through SOCKS5.
|
||||
|
||||
**Approach 1 - UDP-to-TCP relay with socat:** Can't work because TCP DNS requires a 2-byte length prefix (RFC 1035 section 4.2.2) that socat can't add.
|
||||
|
||||
**Approach 2 - Embed a Go DNS relay binary:** Would work but adds build complexity for a simple problem.
|
||||
|
||||
**Fix:** Set resolv.conf to `nameserver 1.1.1.1` with `options use-vc` instead of pointing at a local relay. `use-vc` forces the resolver to use TCP, which tun2socks handles natively. Supported by glibc, Go 1.21+, and c-ares. Removed the broken socat UDP relay entirely.
|
||||
|
||||
---
|
||||
|
||||
## DNS relay protocol mismatch (original bug)
|
||||
|
||||
**Problem:** The original DNS relay used `socat UDP4-RECVFROM:53,fork TCP:1.1.1.1:53` — converting UDP DNS to TCP. This silently fails because TCP DNS requires a 2-byte big-endian length prefix per RFC 1035 section 4.2.2 that raw UDP DNS packets don't have. The DNS server receives a malformed TCP stream and drops it.
|
||||
|
||||
**Fix:** Superseded by the `options use-vc` approach above.
|
||||
|
||||
---
|
||||
|
||||
## strace captures directory traversals as file reads
|
||||
|
||||
**Problem:** Learning mode listed `/`, `/home`, `/home/user`, `/home/user/.cache` etc. as "read" paths. These are `openat(O_RDONLY|O_DIRECTORY)` calls used for `readdir()` traversal, not meaningful file reads.
|
||||
|
||||
**Fix:** Filter out `openat` calls containing `O_DIRECTORY` in `extractReadPath()`.
|
||||
|
||||
---
|
||||
|
||||
## SOCKS5 proxy credentials and protocol
|
||||
|
||||
**Problem:** DNS resolution through the SOCKS5 proxy failed with authentication errors. Two issues: wrong credentials (`x:x` vs `proxy:proxy`) and wrong protocol (`socks5://` vs `socks5h://`).
|
||||
|
||||
**Key distinction:** `socks5://` resolves DNS locally then sends the IP to the proxy. `socks5h://` sends the hostname to the proxy for remote DNS resolution. With tun2socks, the distinction matters less (tun2socks intercepts at IP level), but using `socks5h://` is still correct for the proxy bridge configuration.
|
||||
|
||||
---
|
||||
|
||||
## gost SOCKS5 requires authentication flow
|
||||
|
||||
**Problem:** gost's SOCKS5 server always selects authentication method 0x02 (username/password), even when no real credentials are needed. Clients that only offer method 0x00 (no auth) get rejected.
|
||||
|
||||
**Fix:** Always include credentials in the proxy URL (e.g., `proxy:proxy@`). In tun2socks proxy URL construction, include `userinfo` so tun2socks offers both auth methods during SOCKS5 negotiation.
|
||||
|
||||
---
|
||||
|
||||
## Network namespaces fail on Ubuntu 24.04 (`RTM_NEWADDR: Operation not permitted`)
|
||||
|
||||
**Problem:** On Ubuntu 24.04 (tested in a KVM guest with bridged virtio/virbr0), `--version` reports `bwrap(no-netns)` and transparent proxy is unavailable. `kernel.unprivileged_userns_clone=1` is set, bwrap and socat are installed, but `bwrap --unshare-net` fails with:
|
||||
```
|
||||
bwrap: loopback: Failed RTM_NEWADDR: Operation not permitted
|
||||
```
|
||||
|
||||
**Cause:** Ubuntu 24.04 introduced `kernel.apparmor_restrict_unprivileged_userns` (default: 1). This strips capabilities like `CAP_NET_ADMIN` from processes inside unprivileged user namespaces, even without a bwrap-specific AppArmor profile. Bubblewrap creates the network namespace successfully but cannot configure the loopback interface (adding 127.0.0.1 via netlink RTM_NEWADDR requires `CAP_NET_ADMIN`). Not a hypervisor issue — happens on bare metal Ubuntu 24.04 too.
|
||||
|
||||
**Diagnosis:**
|
||||
```bash
|
||||
sysctl kernel.apparmor_restrict_unprivileged_userns # likely returns 1
|
||||
bwrap --unshare-net --ro-bind / / -- /bin/true # reproduces the error
|
||||
```
|
||||
|
||||
**Fix:** Disable the restriction (requires root on the guest):
|
||||
```bash
|
||||
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
|
||||
# Persist across reboots:
|
||||
echo 'kernel.apparmor_restrict_unprivileged_userns=0' | sudo tee /etc/sysctl.d/99-greywall-userns.conf
|
||||
```
|
||||
|
||||
**Alternative:** Accept the limitation — greywall still works for filesystem sandboxing, seccomp, and Landlock. Network access is blocked outright rather than redirected through a proxy.
|
||||
|
||||
---
|
||||
|
||||
## Linux: symlinked system dirs invisible after `--tmpfs /`
|
||||
|
||||
**Problem:** On merged-usr distros (Arch, Fedora, modern Ubuntu), `/bin`, `/sbin`, `/lib`, `/lib64` are symlinks (e.g., `/bin -> usr/bin`). When switching from `--ro-bind / /` to `--tmpfs /` for deny-by-default isolation, these symlinks don't exist in the empty root. The `canMountOver()` helper explicitly rejects symlinks, so `--ro-bind /bin /bin` was silently skipped. Result: `execvp /usr/bin/bash: No such file or directory` — bash exists at `/usr/bin/bash` but the dynamic linker at `/lib64/ld-linux-x86-64.so.2` can't be found because `/lib64` is missing.
|
||||
|
||||
**Diagnosis:** The error message is misleading. `execvp` reports "No such file or directory" both when the binary is missing and when the ELF interpreter (dynamic linker) is missing. The actual binary `/usr/bin/bash` existed via the `/usr` bind-mount, but the symlink `/lib64 -> usr/lib` was gone.
|
||||
|
||||
**Fix:** Check each system path with `isSymlink()` before mounting. Symlinks get `--symlink <target> <path>` (bwrap recreates the symlink inside the sandbox); real directories get `--ro-bind`. On Arch: `--symlink usr/bin /bin`, `--symlink usr/bin /sbin`, `--symlink usr/lib /lib`, `--symlink usr/lib /lib64`.
|
||||
|
||||
---
|
||||
|
||||
## Linux: Landlock denies reads on bind-mounted /dev/null
|
||||
|
||||
**Problem:** To mask `.env` files inside CWD, the initial approach used `--ro-bind /dev/null <cwd>/.env`. Inside the sandbox, `.env` appeared as a character device (bind mounts preserve file type). Landlock's `LANDLOCK_ACCESS_FS_READ_FILE` right only covers regular files, not character devices. Result: `cat .env` returned "Permission denied" instead of empty content.
|
||||
|
||||
**Fix:** Use an empty regular file (`/tmp/greywall/empty`, 0 bytes, mode 0444) as the mask source instead of `/dev/null`. Landlock sees a regular file and allows the read. The file is created once in a fixed location under the greywall temp dir.
|
||||
|
||||
---
|
||||
|
||||
## Linux: mandatory deny paths override sensitive file masks
|
||||
|
||||
**Problem:** In deny-by-default mode, `buildDenyByDefaultMounts()` correctly masked `.env` with `--ro-bind /tmp/greywall/empty <cwd>/.env`. But later in `WrapCommandLinuxWithOptions()`, the mandatory deny paths section called `getMandatoryDenyPaths()` which included `.env` files (added for write protection). It then applied `--ro-bind <cwd>/.env <cwd>/.env`, binding the real file over the empty mask. bwrap applies mounts in order, so the later ro-bind undid the masking.
|
||||
|
||||
**Fix:** Track paths already masked by `buildDenyByDefaultMounts()` in a set. Skip those paths in the mandatory deny section to preserve the empty-file overlay.
|
||||
@@ -1,11 +1,11 @@
|
||||
# Library Usage (Go)
|
||||
|
||||
Fence can be used as a Go library to sandbox commands programmatically.
|
||||
Greywall can be used as a Go library to sandbox commands programmatically.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/Use-Tusk/fence
|
||||
go get gitea.app.monadical.io/monadical/greywall
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
@@ -17,25 +17,25 @@ import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/Use-Tusk/fence/pkg/fence"
|
||||
"gitea.app.monadical.io/monadical/greywall/pkg/greywall"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Check platform support
|
||||
if !fence.IsSupported() {
|
||||
if !greywall.IsSupported() {
|
||||
fmt.Println("Sandboxing not supported on this platform")
|
||||
return
|
||||
}
|
||||
|
||||
// Create config
|
||||
cfg := &fence.Config{
|
||||
Network: fence.NetworkConfig{
|
||||
cfg := &greywall.Config{
|
||||
Network: greywall.NetworkConfig{
|
||||
AllowedDomains: []string{"api.example.com"},
|
||||
},
|
||||
}
|
||||
|
||||
// Create and initialize manager
|
||||
manager := fence.NewManager(cfg, false, false)
|
||||
manager := greywall.NewManager(cfg, false, false)
|
||||
defer manager.Cleanup()
|
||||
|
||||
if err := manager.Initialize(); err != nil {
|
||||
@@ -64,7 +64,7 @@ func main() {
|
||||
Returns `true` if the current platform supports sandboxing (macOS or Linux).
|
||||
|
||||
```go
|
||||
if !fence.IsSupported() {
|
||||
if !greywall.IsSupported() {
|
||||
log.Fatal("Platform not supported")
|
||||
}
|
||||
```
|
||||
@@ -74,7 +74,7 @@ if !fence.IsSupported() {
|
||||
Returns a default configuration with all network blocked.
|
||||
|
||||
```go
|
||||
cfg := fence.DefaultConfig()
|
||||
cfg := greywall.DefaultConfig()
|
||||
cfg.Network.AllowedDomains = []string{"example.com"}
|
||||
```
|
||||
|
||||
@@ -83,18 +83,18 @@ cfg.Network.AllowedDomains = []string{"example.com"}
|
||||
Loads configuration from a JSON file. Supports JSONC (comments allowed).
|
||||
|
||||
```go
|
||||
cfg, err := fence.LoadConfig(fence.DefaultConfigPath())
|
||||
cfg, err := greywall.LoadConfig(greywall.DefaultConfigPath())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = fence.DefaultConfig() // File doesn't exist
|
||||
cfg = greywall.DefaultConfig() // File doesn't exist
|
||||
}
|
||||
```
|
||||
|
||||
#### `DefaultConfigPath() string`
|
||||
|
||||
Returns the default config file path (`~/.config/fence/fence.json` on Linux, `~/Library/Application Support/fence/fence.json` on macOS, with fallback to legacy `~/.fence.json`).
|
||||
Returns the default config file path (`~/.config/greywall/greywall.json` on Linux, `~/Library/Application Support/greywall/greywall.json` on macOS, with fallback to legacy `~/.greywall.json`).
|
||||
|
||||
#### `NewManager(cfg *Config, debug, monitor bool) *Manager`
|
||||
|
||||
@@ -113,7 +113,7 @@ Creates a new sandbox manager.
|
||||
Sets up sandbox infrastructure (starts HTTP and SOCKS proxies). Called automatically by `WrapCommand` if not already initialized.
|
||||
|
||||
```go
|
||||
manager := fence.NewManager(cfg, false, false)
|
||||
manager := greywall.NewManager(cfg, false, false)
|
||||
defer manager.Cleanup()
|
||||
|
||||
if err := manager.Initialize(); err != nil {
|
||||
@@ -222,8 +222,8 @@ type SSHConfig struct {
|
||||
### Allow specific domains
|
||||
|
||||
```go
|
||||
cfg := &fence.Config{
|
||||
Network: fence.NetworkConfig{
|
||||
cfg := &greywall.Config{
|
||||
Network: greywall.NetworkConfig{
|
||||
AllowedDomains: []string{
|
||||
"registry.npmjs.org",
|
||||
"*.github.com",
|
||||
@@ -236,8 +236,8 @@ cfg := &fence.Config{
|
||||
### Restrict filesystem access
|
||||
|
||||
```go
|
||||
cfg := &fence.Config{
|
||||
Filesystem: fence.FilesystemConfig{
|
||||
cfg := &greywall.Config{
|
||||
Filesystem: greywall.FilesystemConfig{
|
||||
AllowWrite: []string{".", "/tmp"},
|
||||
DenyRead: []string{"~/.ssh", "~/.aws"},
|
||||
},
|
||||
@@ -247,8 +247,8 @@ cfg := &fence.Config{
|
||||
### Block dangerous commands
|
||||
|
||||
```go
|
||||
cfg := &fence.Config{
|
||||
Command: fence.CommandConfig{
|
||||
cfg := &greywall.Config{
|
||||
Command: greywall.CommandConfig{
|
||||
Deny: []string{
|
||||
"rm -rf /",
|
||||
"git push",
|
||||
@@ -261,7 +261,7 @@ cfg := &fence.Config{
|
||||
### Expose dev server port
|
||||
|
||||
```go
|
||||
manager := fence.NewManager(cfg, false, false)
|
||||
manager := greywall.NewManager(cfg, false, false)
|
||||
manager.SetExposedPorts([]int{3000})
|
||||
defer manager.Cleanup()
|
||||
|
||||
@@ -271,12 +271,12 @@ wrapped, _ := manager.WrapCommand("npm run dev")
|
||||
### Load and extend config
|
||||
|
||||
```go
|
||||
cfg, err := fence.LoadConfig(fence.DefaultConfigPath())
|
||||
cfg, err := greywall.LoadConfig(greywall.DefaultConfigPath())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = fence.DefaultConfig()
|
||||
cfg = greywall.DefaultConfig()
|
||||
}
|
||||
|
||||
// Add additional restrictions
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Linux Security Features
|
||||
|
||||
Fence uses multiple layers of security on Linux, with graceful fallback when features are unavailable.
|
||||
Greywall uses multiple layers of security on Linux, with graceful fallback when features are unavailable.
|
||||
|
||||
## Security Layers
|
||||
|
||||
@@ -13,13 +13,13 @@ Fence uses multiple layers of security on Linux, with graceful fallback when fea
|
||||
|
||||
## Feature Detection
|
||||
|
||||
Fence automatically detects available features and uses the best available combination.
|
||||
Greywall automatically detects available features and uses the best available combination.
|
||||
|
||||
To see what features are detected:
|
||||
|
||||
```bash
|
||||
# Check what features are available on your system
|
||||
fence --linux-features
|
||||
greywall --linux-features
|
||||
|
||||
# Example output:
|
||||
# Linux Sandbox Features:
|
||||
@@ -41,7 +41,7 @@ fence --linux-features
|
||||
|
||||
Landlock is applied via an **embedded wrapper** approach:
|
||||
|
||||
1. bwrap spawns `fence --landlock-apply -- <user-command>`
|
||||
1. bwrap spawns `greywall --landlock-apply -- <user-command>`
|
||||
2. The wrapper applies Landlock kernel restrictions
|
||||
3. The wrapper `exec()`s the user command
|
||||
|
||||
@@ -75,25 +75,25 @@ This provides **defense-in-depth**: both bwrap mounts AND Landlock kernel restri
|
||||
- **Impact**: `--unshare-net` is skipped; network is not fully isolated
|
||||
- **Cause**: Running in Docker, GitHub Actions, or other environments without `CAP_NET_ADMIN`
|
||||
- **Fallback**: Proxy-based filtering still works; filesystem/PID/seccomp isolation still active
|
||||
- **Check**: Run `fence --linux-features` and look for "Network namespace (--unshare-net): false"
|
||||
- **Check**: Run `greywall --linux-features` and look for "Network namespace (--unshare-net): false"
|
||||
- **Workaround**: Run with `sudo`, or in Docker use `--cap-add=NET_ADMIN`
|
||||
|
||||
> [!NOTE]
|
||||
> This is the most common "reduced isolation" scenario. Fence automatically detects this at startup and adapts. See the troubleshooting guide for more details.
|
||||
> This is the most common "reduced isolation" scenario. Greywall automatically detects this at startup and adapts. See the troubleshooting guide for more details.
|
||||
|
||||
### When bwrap is not available
|
||||
|
||||
- **Impact**: Cannot run fence on Linux
|
||||
- **Impact**: Cannot run greywall on Linux
|
||||
- **Solution**: Install bubblewrap: `apt install bubblewrap` or `dnf install bubblewrap`
|
||||
|
||||
### When socat is not available
|
||||
|
||||
- **Impact**: Cannot run fence on Linux
|
||||
- **Impact**: Cannot run greywall on Linux
|
||||
- **Solution**: Install socat: `apt install socat` or `dnf install socat`
|
||||
|
||||
## Blocked Syscalls (seccomp)
|
||||
|
||||
Fence blocks dangerous syscalls that could be used for sandbox escape or privilege escalation:
|
||||
Greywall blocks dangerous syscalls that could be used for sandbox escape or privilege escalation:
|
||||
|
||||
| Syscall | Reason |
|
||||
|---------|--------|
|
||||
@@ -111,13 +111,13 @@ Fence blocks dangerous syscalls that could be used for sandbox escape or privile
|
||||
|
||||
## Violation Monitoring
|
||||
|
||||
On Linux, violation monitoring (`fence -m`) shows:
|
||||
On Linux, violation monitoring (`greywall -m`) shows:
|
||||
|
||||
| Source | What it shows | Requirements |
|
||||
|--------|---------------|--------------|
|
||||
| `[fence:http]` | Blocked HTTP/HTTPS requests | None |
|
||||
| `[fence:socks]` | Blocked SOCKS connections | None |
|
||||
| `[fence:ebpf]` | Blocked filesystem access + syscalls | CAP_BPF or root |
|
||||
| `[greywall:http]` | Blocked HTTP/HTTPS requests | None |
|
||||
| `[greywall:socks]` | Blocked SOCKS connections | None |
|
||||
| `[greywall:ebpf]` | Blocked filesystem access + syscalls | CAP_BPF or root |
|
||||
|
||||
**Notes**:
|
||||
|
||||
@@ -127,7 +127,7 @@ On Linux, violation monitoring (`fence -m`) shows:
|
||||
|
||||
## Comparison with macOS
|
||||
|
||||
| Feature | macOS (Seatbelt) | Linux (fence) |
|
||||
| Feature | macOS (Seatbelt) | Linux (greywall) |
|
||||
|---------|------------------|---------------|
|
||||
| Filesystem control | Native | bwrap + Landlock |
|
||||
| Glob patterns | Native regex | Expanded at startup |
|
||||
@@ -181,12 +181,12 @@ sudo apk add bubblewrap socat
|
||||
For full violation visibility without root:
|
||||
|
||||
```bash
|
||||
# Grant CAP_BPF to the fence binary
|
||||
sudo setcap cap_bpf+ep /usr/local/bin/fence
|
||||
# Grant CAP_BPF to the greywall binary
|
||||
sudo setcap cap_bpf+ep /usr/local/bin/greywall
|
||||
```
|
||||
|
||||
Or run fence with sudo when monitoring is needed:
|
||||
Or run greywall with sudo when monitoring is needed:
|
||||
|
||||
```bash
|
||||
sudo fence -m <command>
|
||||
sudo greywall -m <command>
|
||||
```
|
||||
|
||||
@@ -5,16 +5,16 @@
|
||||
### From Source (recommended for now)
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Use-Tusk/fence
|
||||
cd fence
|
||||
go build -o fence ./cmd/fence
|
||||
sudo mv fence /usr/local/bin/
|
||||
git clone https://gitea.app.monadical.io/monadical/greywall
|
||||
cd greywall
|
||||
go build -o greywall ./cmd/greywall
|
||||
sudo mv greywall /usr/local/bin/
|
||||
```
|
||||
|
||||
### Using Go Install
|
||||
|
||||
```bash
|
||||
go install github.com/Use-Tusk/fence/cmd/fence@latest
|
||||
go install gitea.app.monadical.io/monadical/greywall/cmd/greywall@latest
|
||||
```
|
||||
|
||||
### Linux Dependencies
|
||||
@@ -32,30 +32,30 @@ sudo dnf install bubblewrap socat
|
||||
sudo pacman -S bubblewrap socat
|
||||
```
|
||||
|
||||
### Do I need sudo to run fence?
|
||||
### Do I need sudo to run greywall?
|
||||
|
||||
No, for most Linux systems. Fence works without root privileges because:
|
||||
No, for most Linux systems. Greywall works without root privileges because:
|
||||
|
||||
- Package-manager-installed `bubblewrap` is typically already setuid
|
||||
- Fence detects available capabilities and adapts automatically
|
||||
- Greywall detects available capabilities and adapts automatically
|
||||
|
||||
If some features aren't available (like network namespaces in Docker/CI), fence falls back gracefully - you'll still get filesystem isolation, command blocking, and proxy-based network filtering.
|
||||
If some features aren't available (like network namespaces in Docker/CI), greywall falls back gracefully - you'll still get filesystem isolation, command blocking, and proxy-based network filtering.
|
||||
|
||||
Run `fence --linux-features` to see what's available in your environment.
|
||||
Run `greywall --linux-features` to see what's available in your environment.
|
||||
|
||||
## Verify Installation
|
||||
|
||||
```bash
|
||||
fence --version
|
||||
greywall --version
|
||||
```
|
||||
|
||||
## Your First Sandboxed Command
|
||||
|
||||
By default, fence blocks all network access:
|
||||
By default, greywall blocks all network access:
|
||||
|
||||
```bash
|
||||
# This will fail - network is blocked
|
||||
fence curl https://example.com
|
||||
greywall curl https://example.com
|
||||
```
|
||||
|
||||
You should see something like:
|
||||
@@ -66,7 +66,7 @@ curl: (56) CONNECT tunnel failed, response 403
|
||||
|
||||
## Allow Specific Domains
|
||||
|
||||
Create a config file at `~/.config/fence/fence.json` (or `~/Library/Application Support/fence/fence.json` on macOS):
|
||||
Create a config file at `~/.config/greywall/greywall.json` (or `~/Library/Application Support/greywall/greywall.json` on macOS):
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -79,7 +79,7 @@ Create a config file at `~/.config/fence/fence.json` (or `~/Library/Application
|
||||
Now try again:
|
||||
|
||||
```bash
|
||||
fence curl https://example.com
|
||||
greywall curl https://example.com
|
||||
```
|
||||
|
||||
This time it succeeds!
|
||||
@@ -89,7 +89,7 @@ This time it succeeds!
|
||||
Use `-d` to see what's happening under the hood:
|
||||
|
||||
```bash
|
||||
fence -d curl https://example.com
|
||||
greywall -d curl https://example.com
|
||||
```
|
||||
|
||||
This shows:
|
||||
@@ -103,7 +103,7 @@ This shows:
|
||||
Use `-m` to see only violations and blocked requests:
|
||||
|
||||
```bash
|
||||
fence -m npm install
|
||||
greywall -m npm install
|
||||
```
|
||||
|
||||
This is useful for:
|
||||
@@ -117,7 +117,7 @@ This is useful for:
|
||||
Use `-c` to run compound commands:
|
||||
|
||||
```bash
|
||||
fence -c "echo hello && ls -la"
|
||||
greywall -c "echo hello && ls -la"
|
||||
```
|
||||
|
||||
## Expose Ports for Servers
|
||||
@@ -125,14 +125,14 @@ fence -c "echo hello && ls -la"
|
||||
If you're running a server that needs to accept connections:
|
||||
|
||||
```bash
|
||||
fence -p 3000 -c "npm run dev"
|
||||
greywall -p 3000 -c "npm run dev"
|
||||
```
|
||||
|
||||
This allows external connections to port 3000 while keeping outbound network restricted.
|
||||
|
||||
## Next steps
|
||||
|
||||
- Read **[Why Fence](why-fence.md)** to understand when fence is a good fit (and when it isn't).
|
||||
- Read **[Why Greywall](why-greywall.md)** to understand when greywall is a good fit (and when it isn't).
|
||||
- Learn the mental model in **[Concepts](concepts.md)**.
|
||||
- Use **[Troubleshooting](troubleshooting.md)** if something is blocked unexpectedly.
|
||||
- Start from copy/paste configs in **[`docs/templates/`](templates/README.md)**.
|
||||
|
||||
@@ -18,7 +18,7 @@ Goal: make CI steps safer by default: minimal egress and controlled writes.
|
||||
Run:
|
||||
|
||||
```bash
|
||||
fence --settings ./fence.json -c "make test"
|
||||
greywall --settings ./greywall.json -c "make test"
|
||||
```
|
||||
|
||||
## Add only what you need
|
||||
@@ -26,7 +26,7 @@ fence --settings ./fence.json -c "make test"
|
||||
Use monitor mode to discover what a job tries to reach:
|
||||
|
||||
```bash
|
||||
fence -m --settings ./fence.json -c "make test"
|
||||
greywall -m --settings ./greywall.json -c "make test"
|
||||
```
|
||||
|
||||
Then allowlist only:
|
||||
|
||||
@@ -18,7 +18,7 @@ Goal: allow fetching code from a limited set of hosts.
|
||||
Run:
|
||||
|
||||
```bash
|
||||
fence --settings ./fence.json git clone https://github.com/OWNER/REPO.git
|
||||
greywall --settings ./greywall.json git clone https://github.com/OWNER/REPO.git
|
||||
```
|
||||
|
||||
## SSH clone
|
||||
@@ -28,5 +28,5 @@ SSH traffic may go through SOCKS5 (`ALL_PROXY`) depending on your git/ssh config
|
||||
If it fails, use monitor/debug mode to see what was blocked:
|
||||
|
||||
```bash
|
||||
fence -m --settings ./fence.json git clone git@github.com:OWNER/REPO.git
|
||||
greywall -m --settings ./greywall.json git clone git@github.com:OWNER/REPO.git
|
||||
```
|
||||
|
||||
@@ -18,7 +18,7 @@ Goal: allow npm to fetch packages, but block unexpected egress.
|
||||
Run:
|
||||
|
||||
```bash
|
||||
fence --settings ./fence.json npm install
|
||||
greywall --settings ./greywall.json npm install
|
||||
```
|
||||
|
||||
## Iterate with monitor mode
|
||||
@@ -26,7 +26,7 @@ fence --settings ./fence.json npm install
|
||||
If installs fail, run:
|
||||
|
||||
```bash
|
||||
fence -m --settings ./fence.json npm install
|
||||
greywall -m --settings ./greywall.json npm install
|
||||
```
|
||||
|
||||
Then add the minimum extra domains required for your workflow (private registries, GitHub tarballs, etc.).
|
||||
|
||||
@@ -18,19 +18,19 @@ Goal: allow Python dependency fetching while keeping egress minimal.
|
||||
Run:
|
||||
|
||||
```bash
|
||||
fence --settings ./fence.json pip install -r requirements.txt
|
||||
greywall --settings ./greywall.json pip install -r requirements.txt
|
||||
```
|
||||
|
||||
For Poetry:
|
||||
|
||||
```bash
|
||||
fence --settings ./fence.json poetry install
|
||||
greywall --settings ./greywall.json poetry install
|
||||
```
|
||||
|
||||
## Iterate with monitor mode
|
||||
|
||||
```bash
|
||||
fence -m --settings ./fence.json poetry install
|
||||
greywall -m --settings ./greywall.json poetry install
|
||||
```
|
||||
|
||||
If you use private indexes, add those domains explicitly.
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
# Security Model
|
||||
|
||||
Fence is intended as defense-in-depth for running semi-trusted commands with reduced side effects (package installs, build scripts, CI jobs, unfamiliar repos).
|
||||
Greywall is intended as defense-in-depth for running semi-trusted commands with reduced side effects (package installs, build scripts, CI jobs, unfamiliar repos).
|
||||
|
||||
It is not designed to be a strong isolation boundary against actively malicious code that is attempting to escape.
|
||||
|
||||
## Threat model (what Fence helps with)
|
||||
## Threat model (what Greywall helps with)
|
||||
|
||||
Fence is useful when you want to reduce risk from:
|
||||
Greywall is useful when you want to reduce risk from:
|
||||
|
||||
- Supply-chain scripts that unexpectedly call out to the network
|
||||
- Tools that write broadly across your filesystem
|
||||
- Accidental leakage of secrets via "phone home" behavior
|
||||
- Unfamiliar repos that run surprising commands during install/build/test
|
||||
|
||||
## What Fence enforces
|
||||
## What Greywall enforces
|
||||
|
||||
### Network
|
||||
|
||||
@@ -25,7 +25,7 @@ Important: domain filtering does not inspect content. If you allow a domain, cod
|
||||
|
||||
#### How allowlisting works
|
||||
|
||||
Fence combines OS-level enforcement with proxy-based allowlisting:
|
||||
Greywall combines OS-level enforcement with proxy-based allowlisting:
|
||||
|
||||
- The OS sandbox / network namespace is expected to block direct outbound connections.
|
||||
- Domain allowlisting happens via local HTTP/SOCKS proxies and proxy environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, `ALL_PROXY`).
|
||||
@@ -41,11 +41,11 @@ Localhost is separate from "external domains":
|
||||
- **Writes are denied by default**; you must opt in with `allowWrite`.
|
||||
- **denyWrite** can block specific files/patterns even if the parent directory is writable.
|
||||
- **denyRead** can block reads from sensitive paths.
|
||||
- Fence includes an internal list of always-protected targets (e.g. shell configs, git hooks) to reduce common persistence vectors.
|
||||
- Greywall includes an internal list of always-protected targets (e.g. shell configs, git hooks) to reduce common persistence vectors.
|
||||
|
||||
### Environment sanitization
|
||||
|
||||
Fence strips dangerous environment variables before passing them to sandboxed commands:
|
||||
Greywall strips dangerous environment variables before passing them to sandboxed commands:
|
||||
|
||||
- `LD_*` (Linux): `LD_PRELOAD`, `LD_LIBRARY_PATH`, etc.
|
||||
- `DYLD_*` (macOS): `DYLD_INSERT_LIBRARIES`, `DYLD_LIBRARY_PATH`, etc.
|
||||
@@ -57,11 +57,11 @@ This prevents a library injection attack where a sandboxed process writes a mali
|
||||
- `-m/--monitor` helps you discover what a command *tries* to access (blocked only).
|
||||
- `-d/--debug` shows more detail to understand why something was blocked.
|
||||
|
||||
## Limitations (what Fence does NOT try to solve)
|
||||
## Limitations (what Greywall does NOT try to solve)
|
||||
|
||||
- **Hostile code containment**: assume determined attackers may escape via kernel/OS vulnerabilities.
|
||||
- **Resource limits**: CPU, memory, disk, fork bombs, etc. are out of scope.
|
||||
- **Content-based controls**: Fence does not block data exfiltration to *allowed* destinations.
|
||||
- **Content-based controls**: Greywall does not block data exfiltration to *allowed* destinations.
|
||||
- **Proxy limitations / protocol edge cases**: some programs may not respect proxy environment variables, so they won't get domain allowlisting unless you configure them to use a proxy (e.g. Node.js `http`/`https` without a proxy-aware client).
|
||||
|
||||
### Practical examples of proxy limitations
|
||||
@@ -71,14 +71,14 @@ The proxy approach works well for many tools (curl, wget, git, npm, pip), but no
|
||||
- Node.js native `http`/`https` (use a proxy-aware client, e.g. `undici` + `ProxyAgent`)
|
||||
- Raw socket connections (custom TCP/UDP protocols)
|
||||
|
||||
Fence's OS-level sandbox is still expected to block direct outbound connections; bypassing the proxy should fail rather than silently succeeding.
|
||||
Greywall's OS-level sandbox is still expected to block direct outbound connections; bypassing the proxy should fail rather than silently succeeding.
|
||||
|
||||
### Domain-based filtering only
|
||||
|
||||
Fence does not inspect request content. If you allow a domain, a sandboxed process can still exfiltrate data to that domain.
|
||||
Greywall does not inspect request content. If you allow a domain, a sandboxed process can still exfiltrate data to that domain.
|
||||
|
||||
### Not a hostile-code containment boundary
|
||||
|
||||
Fence is defense-in-depth for running semi-trusted code, not a strong isolation boundary against malware designed to escape sandboxes.
|
||||
Greywall is defense-in-depth for running semi-trusted code, not a strong isolation boundary against malware designed to escape sandboxes.
|
||||
|
||||
For implementation details (how proxies/sandboxes/bridges work), see [`ARCHITECTURE.md`](../ARCHITECTURE.md).
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Config Templates
|
||||
|
||||
Fence includes built-in config templates for common use cases. Templates are embedded in the binary, so you can use them directly without copying files.
|
||||
Greywall includes built-in config templates for common use cases. Templates are embedded in the binary, so you can use them directly without copying files.
|
||||
|
||||
## Using templates
|
||||
|
||||
@@ -8,13 +8,13 @@ Use the `-t` / `--template` flag to apply a template:
|
||||
|
||||
```bash
|
||||
# Use a built-in template
|
||||
fence -t npm-install npm install
|
||||
greywall -t npm-install npm install
|
||||
|
||||
# Wraps Claude Code
|
||||
fence -t code -- claude
|
||||
greywall -t code -- claude
|
||||
|
||||
# List available templates
|
||||
fence --list-templates
|
||||
greywall --list-templates
|
||||
```
|
||||
|
||||
You can also copy and customize templates from [`internal/templates/`](/internal/templates/).
|
||||
|
||||
@@ -67,9 +67,9 @@ go test -v -count=1 ./internal/sandbox/...
|
||||
|
||||
#### Sandboxed Build Environments (Nix, etc.)
|
||||
|
||||
If you're packaging fence for a distribution (e.g., Nix, Homebrew, Debian), note that some integration tests will be skipped when running `go test` during the build.
|
||||
If you're packaging greywall for a distribution (e.g., Nix, Homebrew, Debian), note that some integration tests will be skipped when running `go test` during the build.
|
||||
|
||||
Fence's Landlock integration on Linux uses a wrapper approach: the `fence` binary re-executes itself with `--landlock-apply` inside the sandbox. Test binaries (e.g., `sandbox.test`) don't have this handler, so Landlock-specific tests automatically skip when not running as the `fence` CLI.
|
||||
Greywall's Landlock integration on Linux uses a wrapper approach: the `greywall` binary re-executes itself with `--landlock-apply` inside the sandbox. Test binaries (e.g., `sandbox.test`) don't have this handler, so Landlock-specific tests automatically skip when not running as the `greywall` CLI.
|
||||
|
||||
Tests that skip include those calling `skipIfLandlockNotUsable()`:
|
||||
|
||||
@@ -77,25 +77,25 @@ Tests that skip include those calling `skipIfLandlockNotUsable()`:
|
||||
- `TestLinux_LandlockProtectsGitHooks`
|
||||
- `TestLinux_LandlockProtectsGitConfig`
|
||||
- `TestLinux_LandlockProtectsBashrc`
|
||||
- `TestLinux_LandlockAllowsTmpFence`
|
||||
- `TestLinux_LandlockAllowsTmpGreywall`
|
||||
- `TestLinux_PathTraversalBlocked`
|
||||
- `TestLinux_SeccompBlocksDangerousSyscalls`
|
||||
|
||||
| Test Type | What it tests | Landlock coverage |
|
||||
|-----------|---------------|-------------------|
|
||||
| `go test` (integration) | Go APIs, bwrap isolation, command blocking | Skipped (test binary can't use `--landlock-apply`) |
|
||||
| `smoke_test.sh` | Actual `fence` CLI end-to-end | ✅ Full coverage |
|
||||
| `smoke_test.sh` | Actual `greywall` CLI end-to-end | ✅ Full coverage |
|
||||
|
||||
For full test coverage including Landlock, run the smoke tests against the built binary (see "Smoke Tests" section below).
|
||||
|
||||
**Nested sandboxing limitations:**
|
||||
|
||||
- **macOS**: Nested Seatbelt sandboxing is not supported. If the build environment already uses `sandbox-exec` (like Nix's Darwin sandbox), fence's tests cannot create another sandbox. The kernel returns `forbidden-sandbox-reinit`. This is a macOS limitation.
|
||||
- **macOS**: Nested Seatbelt sandboxing is not supported. If the build environment already uses `sandbox-exec` (like Nix's Darwin sandbox), greywall's tests cannot create another sandbox. The kernel returns `forbidden-sandbox-reinit`. This is a macOS limitation.
|
||||
- **Linux**: Tests should work in most build sandboxes, but Landlock tests will skip as explained above. Runtime functionality is unaffected.
|
||||
|
||||
### Smoke Tests
|
||||
|
||||
Smoke tests verify the compiled `fence` binary works end-to-end. Unlike integration tests (which test internal Go APIs), smoke tests exercise the CLI interface.
|
||||
Smoke tests verify the compiled `greywall` binary works end-to-end. Unlike integration tests (which test internal Go APIs), smoke tests exercise the CLI interface.
|
||||
|
||||
**File:** [`scripts/smoke_test.sh`](/scripts/smoke_test.sh)
|
||||
|
||||
@@ -105,7 +105,7 @@ Smoke tests verify the compiled `fence` binary works end-to-end. Unlike integrat
|
||||
- Filesystem restrictions via settings file
|
||||
- Command blocking via settings file
|
||||
- Network blocking
|
||||
- Environment variable injection (FENCE_SANDBOX, HTTP_PROXY)
|
||||
- Environment variable injection (GREYWALL_SANDBOX, HTTP_PROXY)
|
||||
- Tool compatibility (python3, node, git, rg) - ensure that frequently used tools don't break in sandbox
|
||||
|
||||
**Run:**
|
||||
@@ -115,10 +115,10 @@ Smoke tests verify the compiled `fence` binary works end-to-end. Unlike integrat
|
||||
./scripts/smoke_test.sh
|
||||
|
||||
# Test specific binary
|
||||
./scripts/smoke_test.sh ./path/to/fence
|
||||
./scripts/smoke_test.sh ./path/to/greywall
|
||||
|
||||
# Enable network tests (requires internet)
|
||||
FENCE_TEST_NETWORK=1 ./scripts/smoke_test.sh
|
||||
GREYWALL_TEST_NETWORK=1 ./scripts/smoke_test.sh
|
||||
```
|
||||
|
||||
## Platform-Specific Behavior
|
||||
@@ -158,7 +158,7 @@ The `integration_test.go` file provides helpers for writing sandbox tests:
|
||||
|
||||
```go
|
||||
// Skip helpers
|
||||
skipIfAlreadySandboxed(t) // Skip if running inside Fence
|
||||
skipIfAlreadySandboxed(t) // Skip if running inside Greywall
|
||||
skipIfCommandNotFound(t, "python3") // Skip if command missing
|
||||
|
||||
// Run a command under the sandbox
|
||||
@@ -237,10 +237,10 @@ go test -v -run TestSpecificTest ./internal/sandbox/...
|
||||
|
||||
```bash
|
||||
# Replicate what the test does
|
||||
./fence -c "the-command-that-failed"
|
||||
./greywall -c "the-command-that-failed"
|
||||
|
||||
# With a settings file
|
||||
./fence -s /path/to/settings.json -c "command"
|
||||
./greywall -s /path/to/settings.json -c "command"
|
||||
```
|
||||
|
||||
### Check platform capabilities
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
## Nested Sandboxing Not Supported
|
||||
|
||||
Fence cannot run inside another sandbox that uses the same underlying technology.
|
||||
Greywall cannot run inside another sandbox that uses the same underlying technology.
|
||||
|
||||
**macOS (Seatbelt)**: If you try to run fence inside an existing `sandbox-exec` sandbox (e.g., Nix's Darwin build sandbox), you'll see:
|
||||
**macOS (Seatbelt)**: If you try to run greywall inside an existing `sandbox-exec` sandbox (e.g., Nix's Darwin build sandbox), you'll see:
|
||||
|
||||
```text
|
||||
Sandbox: sandbox-exec(...) deny(1) forbidden-sandbox-reinit
|
||||
@@ -12,11 +12,11 @@ Sandbox: sandbox-exec(...) deny(1) forbidden-sandbox-reinit
|
||||
|
||||
This is a macOS kernel limitation - nested Seatbelt sandboxes are not allowed. There is no workaround.
|
||||
|
||||
**Linux (Landlock)**: Landlock supports stacking (nested restrictions), but fence's test binaries cannot use the Landlock wrapper (see [Testing docs](testing.md#sandboxed-build-environments-nix-etc)).
|
||||
**Linux (Landlock)**: Landlock supports stacking (nested restrictions), but greywall's test binaries cannot use the Landlock wrapper (see [Testing docs](testing.md#sandboxed-build-environments-nix-etc)).
|
||||
|
||||
## "bwrap: loopback: Failed RTM_NEWADDR: Operation not permitted" (Linux)
|
||||
|
||||
This error occurs when fence tries to create a network namespace but the environment lacks the `CAP_NET_ADMIN` capability. This is common in:
|
||||
This error occurs when greywall tries to create a network namespace but the environment lacks the `CAP_NET_ADMIN` capability. This is common in:
|
||||
|
||||
- **Docker containers** (unless run with `--privileged` or `--cap-add=NET_ADMIN`)
|
||||
- **GitHub Actions** and other CI runners
|
||||
@@ -25,7 +25,7 @@ This error occurs when fence tries to create a network namespace but the environ
|
||||
|
||||
**What happens now:**
|
||||
|
||||
Fence automatically detects this limitation and falls back to running **without network namespace isolation**. The sandbox still provides:
|
||||
Greywall automatically detects this limitation and falls back to running **without network namespace isolation**. The sandbox still provides:
|
||||
|
||||
- Filesystem restrictions (read-only root, allowWrite paths)
|
||||
- PID namespace isolation
|
||||
@@ -41,7 +41,7 @@ Fence automatically detects this limitation and falls back to running **without
|
||||
**To check if your environment supports network namespaces:**
|
||||
|
||||
```bash
|
||||
fence --linux-features
|
||||
greywall --linux-features
|
||||
```
|
||||
|
||||
Look for "Network namespace (--unshare-net): true/false"
|
||||
@@ -51,7 +51,7 @@ Look for "Network namespace (--unshare-net): true/false"
|
||||
1. **Run with elevated privileges:**
|
||||
|
||||
```bash
|
||||
sudo fence <command>
|
||||
sudo greywall <command>
|
||||
```
|
||||
|
||||
2. **In Docker, add capability:**
|
||||
@@ -96,20 +96,20 @@ On most systems with package-manager-installed bwrap, this error shouldn't occur
|
||||
This usually means:
|
||||
|
||||
- the process tried to reach a domain that is **not allowed**, and
|
||||
- the request went through fence's HTTP proxy, which returned `403`.
|
||||
- the request went through greywall's HTTP proxy, which returned `403`.
|
||||
|
||||
Fix:
|
||||
|
||||
- Run with monitor mode to see what was blocked:
|
||||
- `fence -m <command>`
|
||||
- `greywall -m <command>`
|
||||
- Add the required destination(s) to `network.allowedDomains`.
|
||||
|
||||
## "It works outside fence but not inside"
|
||||
## "It works outside greywall but not inside"
|
||||
|
||||
Start with:
|
||||
|
||||
- `fence -m <command>` to see what's being denied
|
||||
- `fence -d <command>` to see full proxy and sandbox detail
|
||||
- `greywall -m <command>` to see what's being denied
|
||||
- `greywall -d <command>` to see full proxy and sandbox detail
|
||||
|
||||
Common causes:
|
||||
|
||||
@@ -134,7 +134,7 @@ const response = await fetch(url, {
|
||||
});
|
||||
```
|
||||
|
||||
Fence's OS-level sandbox should still block direct connections; the above makes your requests go through the filtering proxy so allowlisting works as intended.
|
||||
Greywall's OS-level sandbox should still block direct connections; the above makes your requests go through the filtering proxy so allowlisting works as intended.
|
||||
|
||||
## Local services (Redis/Postgres/etc.) fail inside the sandbox
|
||||
|
||||
@@ -156,7 +156,7 @@ If you're running a server inside the sandbox that must accept connections:
|
||||
Writes are denied by default.
|
||||
|
||||
- Add the minimum required writable directories to `filesystem.allowWrite`.
|
||||
- Protect sensitive targets with `filesystem.denyWrite` (and note fence protects some targets regardless).
|
||||
- Protect sensitive targets with `filesystem.denyWrite` (and note greywall protects some targets regardless).
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Why Fence?
|
||||
# Why Greywall?
|
||||
|
||||
Fence exists to reduce the blast radius of running commands you don't fully trust (or don't fully understand yet).
|
||||
Greywall exists to reduce the blast radius of running commands you don't fully trust (or don't fully understand yet).
|
||||
|
||||
Common situations:
|
||||
|
||||
@@ -9,11 +9,11 @@ Common situations:
|
||||
- Running CI jobs where you want default-deny egress and tightly scoped writes
|
||||
- Auditing what a command *tries* to do before you let it do it
|
||||
|
||||
Fence is intentionally simple: it focuses on network allowlisting (by domain) and filesystem write restrictions (by path), wrapped in a pragmatic OS sandbox (macOS `sandbox-exec`, Linux `bubblewrap`).
|
||||
Greywall is intentionally simple: it focuses on network allowlisting (by domain) and filesystem write restrictions (by path), wrapped in a pragmatic OS sandbox (macOS `sandbox-exec`, Linux `bubblewrap`).
|
||||
|
||||
## What problem does it solve?
|
||||
|
||||
Fence helps you answer: "What can this command touch?"
|
||||
Greywall helps you answer: "What can this command touch?"
|
||||
|
||||
- **Network**: block all outbound by default; then allow only the domains you choose.
|
||||
- **Filesystem**: default-deny writes; then allow writes only where you choose (and deny sensitive writes regardless).
|
||||
@@ -21,9 +21,9 @@ Fence helps you answer: "What can this command touch?"
|
||||
|
||||
This is especially useful for supply-chain risk and "unknown repo" workflows where you want a safer default than "run it and hope".
|
||||
|
||||
## When Fence is useful even if tools already sandbox
|
||||
## When Greywall is useful even if tools already sandbox
|
||||
|
||||
Some coding agents and platforms ship sandboxing (Seatbelt/Landlock/etc.). Fence still provides value when you want:
|
||||
Some coding agents and platforms ship sandboxing (Seatbelt/Landlock/etc.). Greywall still provides value when you want:
|
||||
|
||||
- **Tool-agnostic policy**: apply the same rules to any command, not only inside one agent.
|
||||
- **Standardization**: commit/review a config once, use it across developers and CI.
|
||||
@@ -32,7 +32,7 @@ Some coding agents and platforms ship sandboxing (Seatbelt/Landlock/etc.). Fence
|
||||
|
||||
## Non-goals
|
||||
|
||||
Fence is **not** a hardened containment boundary for actively malicious code.
|
||||
Greywall is **not** a hardened containment boundary for actively malicious code.
|
||||
|
||||
- It does **not** attempt to prevent resource exhaustion (CPU/RAM/disk), timing attacks, or kernel-level escapes.
|
||||
- Domain allowlisting is not content inspection: if you allow a domain, code can exfiltrate via that domain.
|
||||
@@ -1,6 +1,6 @@
|
||||
# Dev Server + Redis Demo
|
||||
|
||||
This demo shows how fence controls network access: allowing specific external domains while blocking (or allowing) localhost connections.
|
||||
This demo shows how greywall controls network access: allowing specific external domains while blocking (or allowing) localhost connections.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -21,7 +21,7 @@ npm install
|
||||
This shows that requests to Redis (local service) works, but external requests are blocked.
|
||||
|
||||
```bash
|
||||
fence -p 3000 --settings fence-external-blocked.json npm start
|
||||
greywall -p 3000 --settings greywall-external-blocked.json npm start
|
||||
```
|
||||
|
||||
Test it:
|
||||
@@ -39,7 +39,7 @@ curl http://localhost:3000/api/external
|
||||
This shows the opposite: whitelisted external domains work, but Redis (localhost) is blocked.
|
||||
|
||||
```bash
|
||||
fence -p 3000 --settings fence-external-only.json npm start
|
||||
greywall -p 3000 --settings greywall-external-only.json npm start
|
||||
```
|
||||
|
||||
You will immediately notice that Redis connection is blocked on app startup:
|
||||
@@ -62,8 +62,8 @@ curl http://localhost:3000/api/users
|
||||
|
||||
| Config | Redis (localhost) | External (httpbin.org) |
|
||||
|--------|-------------------|------------------------|
|
||||
| `fence-external-blocked.json` | ✓ Allowed | ✗ Blocked |
|
||||
| `fence-external-only.json` | ✗ Blocked | ✓ Allowed |
|
||||
| `greywall-external-blocked.json` | ✓ Allowed | ✗ Blocked |
|
||||
| `greywall-external-only.json` | ✗ Blocked | ✓ Allowed |
|
||||
|
||||
## Key Settings
|
||||
|
||||
@@ -75,7 +75,7 @@ curl http://localhost:3000/api/users
|
||||
|
||||
## Note: Node.js Proxy Support
|
||||
|
||||
Node.js's native `http`/`https` modules don't respect proxy environment variables. This demo uses [`undici`](https://github.com/nodejs/undici) with `ProxyAgent` to route requests through fence's proxy:
|
||||
Node.js's native `http`/`https` modules don't respect proxy environment variables. This demo uses [`undici`](https://github.com/nodejs/undici) with `ProxyAgent` to route requests through greywall's proxy:
|
||||
|
||||
```javascript
|
||||
import { ProxyAgent, fetch } from "undici";
|
||||
@@ -86,4 +86,4 @@ const response = await fetch(url, {
|
||||
});
|
||||
```
|
||||
|
||||
Without this, external HTTP requests would fail with connection errors (the sandbox blocks them) rather than going through fence's proxy.
|
||||
Without this, external HTTP requests would fail with connection errors (the sandbox blocks them) rather than going through greywall's proxy.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Demo Express app that:
|
||||
* 1. Serves an API on port 3000
|
||||
* 2. Connects to Redis on localhost:6379
|
||||
* 3. Attempts to call external APIs (blocked by fence)
|
||||
* 3. Attempts to call external APIs (blocked by greywall)
|
||||
*
|
||||
* This demonstrates allowLocalOutbound - the app can reach
|
||||
* local services (Redis) but not the external internet.
|
||||
@@ -60,7 +60,7 @@ async function fetchExternal(url) {
|
||||
signal: AbortSignal.timeout(5000),
|
||||
};
|
||||
|
||||
// Use proxy if available (set by fence)
|
||||
// Use proxy if available (set by greywall)
|
||||
if (proxyUrl) {
|
||||
options.dispatcher = new ProxyAgent(proxyUrl);
|
||||
}
|
||||
@@ -84,7 +84,7 @@ app.get("/", (req, res) => {
|
||||
"/api/users": "List all users from Redis",
|
||||
"/api/users/:id": "Get user by ID from Redis",
|
||||
"/api/health": "Health check",
|
||||
"/api/external": "Try to call external API (blocked by fence)",
|
||||
"/api/external": "Try to call external API (blocked by greywall)",
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -160,20 +160,20 @@ app.get("/api/external", async (req, res) => {
|
||||
|
||||
try {
|
||||
const result = await fetchExternal("https://httpbin.org/get");
|
||||
// Check if we're using a proxy (indicates fence is running)
|
||||
// Check if we're using a proxy (indicates greywall is running)
|
||||
const usingProxy = !!(process.env.HTTPS_PROXY || process.env.HTTP_PROXY);
|
||||
res.json({
|
||||
status: "success",
|
||||
message: usingProxy
|
||||
? "✓ Request allowed (httpbin.org is whitelisted)"
|
||||
: "⚠️ No proxy detected - not running in fence",
|
||||
: "⚠️ No proxy detected - not running in greywall",
|
||||
proxy: usingProxy ? process.env.HTTPS_PROXY : null,
|
||||
data: result,
|
||||
});
|
||||
} catch (error) {
|
||||
res.json({
|
||||
status: "blocked",
|
||||
message: "✓ External call blocked by fence",
|
||||
message: "✓ External call blocked by greywall",
|
||||
error: error.message,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "dev-server-demo",
|
||||
"version": "1.0.0",
|
||||
"description": "Demo: Dev server with Redis in fence sandbox",
|
||||
"description": "Demo: Dev server with Redis in greywall sandbox",
|
||||
"type": "module",
|
||||
"main": "app.js",
|
||||
"scripts": {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# Filesystem Sandbox Demo
|
||||
|
||||
This demo shows how fence controls filesystem access with `allowWrite`, `denyWrite`, and `denyRead`.
|
||||
This demo shows how greywall controls filesystem access with `allowWrite`, `denyWrite`, and `denyRead`.
|
||||
|
||||
## What it demonstrates
|
||||
|
||||
| Operation | Without Fence | With Fence |
|
||||
| Operation | Without Greywall | With Greywall |
|
||||
|-----------|---------------|------------|
|
||||
| Write to `./output/` | ✓ | ✓ (in allowWrite) |
|
||||
| Write to `./` | ✓ | ✗ (not in allowWrite) |
|
||||
@@ -16,19 +16,19 @@ This demo shows how fence controls filesystem access with `allowWrite`, `denyWri
|
||||
|
||||
## Run the demo
|
||||
|
||||
### Without fence (all writes succeed)
|
||||
### Without greywall (all writes succeed)
|
||||
|
||||
```bash
|
||||
python demo.py
|
||||
```
|
||||
|
||||
### With fence (unauthorized operations blocked)
|
||||
### With greywall (unauthorized operations blocked)
|
||||
|
||||
```bash
|
||||
fence --settings fence.json python demo.py
|
||||
greywall --settings greywall.json python demo.py
|
||||
```
|
||||
|
||||
## Fence config
|
||||
## Greywall config
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -58,7 +58,7 @@ fence --settings fence.json python demo.py
|
||||
|
||||
## Protected paths
|
||||
|
||||
Fence also automatically protects certain paths regardless of config:
|
||||
Greywall also automatically protects certain paths regardless of config:
|
||||
|
||||
- Shell configs: `.bashrc`, `.zshrc`, `.profile`
|
||||
- Git hooks: `.git/hooks/*`
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
"""
|
||||
Filesystem Sandbox Demo
|
||||
|
||||
This script demonstrates fence's filesystem controls:
|
||||
This script demonstrates greywall's filesystem controls:
|
||||
- allowWrite: Only specific directories are writable
|
||||
- denyWrite: Block writes to sensitive files
|
||||
- denyRead: Block reads from sensitive paths
|
||||
|
||||
Run WITHOUT fence to see all operations succeed.
|
||||
Run WITH fence to see unauthorized operations blocked.
|
||||
Run WITHOUT greywall to see all operations succeed.
|
||||
Run WITH greywall to see unauthorized operations blocked.
|
||||
"""
|
||||
|
||||
import os
|
||||
@@ -78,7 +78,7 @@ def main():
|
||||
╔═══════════════════════════════════════════════════════════╗
|
||||
║ Filesystem Sandbox Demo ║
|
||||
╠═══════════════════════════════════════════════════════════╣
|
||||
║ Tests fence's filesystem controls: ║
|
||||
║ Tests greywall's filesystem controls: ║
|
||||
║ - allowWrite: Only ./output/ is writable ║
|
||||
║ - denyWrite: .env and *.key files are protected ║
|
||||
║ - denyRead: /etc/shadow is blocked ║
|
||||
@@ -96,7 +96,7 @@ def main():
|
||||
"Write to ./output/ (allowed)",
|
||||
)
|
||||
|
||||
# Test 2: Write to project root (should fail with fence)
|
||||
# Test 2: Write to project root (should fail with greywall)
|
||||
try_write(
|
||||
"unauthorized.txt",
|
||||
"This should not be writable.\n",
|
||||
@@ -133,12 +133,12 @@ def main():
|
||||
print(f"({skipped} test(s) skipped - file not found)")
|
||||
|
||||
if blocked > 0:
|
||||
print(f"✅ Fence blocked {blocked} unauthorized operation(s)")
|
||||
print(f"✅ Greywall blocked {blocked} unauthorized operation(s)")
|
||||
print(f"{succeeded} allowed operation(s) succeeded")
|
||||
print("\nFilesystem sandbox is working!\n")
|
||||
else:
|
||||
print("⚠️ All operations succeeded - you are likely not running in fence")
|
||||
print("Run with: fence --settings fence.json python demo.py\n")
|
||||
print("⚠️ All operations succeeded - you are likely not running in greywall")
|
||||
print("Run with: greywall --settings greywall.json python demo.py\n")
|
||||
|
||||
cleanup()
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Fence Examples
|
||||
# Greywall Examples
|
||||
|
||||
Runnable examples demonstrating `fence` capabilities.
|
||||
Runnable examples demonstrating `greywall` capabilities.
|
||||
|
||||
If you're looking for copy/paste configs and "cookbook" workflows, also see:
|
||||
|
||||
@@ -11,5 +11,5 @@ If you're looking for copy/paste configs and "cookbook" workflows, also see:
|
||||
|
||||
| Example | What it demonstrates | How to run |
|
||||
|--------|-----------------------|------------|
|
||||
| **[01-dev-server](01-dev-server/README.md)** | Running a dev server in the sandbox, controlling external domains vs localhost outbound (Redis), and exposing an inbound port (`-p`) | `cd examples/01-dev-server && fence -p 3000 --settings fence-external-blocked.json npm start` |
|
||||
| **[02-filesystem](02-filesystem/README.md)** | Filesystem controls: `allowWrite`, `denyWrite`, `denyRead` | `cd examples/02-filesystem && fence --settings fence.json python demo.py` |
|
||||
| **[01-dev-server](01-dev-server/README.md)** | Running a dev server in the sandbox, controlling external domains vs localhost outbound (Redis), and exposing an inbound port (`-p`) | `cd examples/01-dev-server && greywall -p 3000 --settings greywall-external-blocked.json npm start` |
|
||||
| **[02-filesystem](02-filesystem/README.md)** | Filesystem controls: `allowWrite`, `denyWrite`, `denyRead` | `cd examples/02-filesystem && greywall --settings greywall.json python demo.py` |
|
||||
|
||||
8
go.mod
8
go.mod
@@ -1,20 +1,16 @@
|
||||
module github.com/Use-Tusk/fence
|
||||
module gitea.app.monadical.io/monadical/greywall
|
||||
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.1
|
||||
github.com/kardianos/service v1.2.4
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/things-go/go-socks5 v0.0.5
|
||||
github.com/tidwall/jsonc v0.3.2
|
||||
golang.org/x/sys v0.39.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
14
go.sum
14
go.sum
@@ -1,28 +1,18 @@
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE=
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/kardianos/service v1.2.4 h1:XNlGtZOYNx2u91urOdg/Kfmc+gfmuIo1Dd3rEi2OgBk=
|
||||
github.com/kardianos/service v1.2.4/go.mod h1:E4V9ufUuY82F7Ztlu1eN9VXWIQxg8NoLQlmFe0MtrXc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/things-go/go-socks5 v0.0.5 h1:qvKaGcBkfDrUL33SchHN93srAmYGzb4CxSM2DPYufe8=
|
||||
github.com/things-go/go-socks5 v0.0.5/go.mod h1:mtzInf8v5xmsBpHZVbIw2YQYhc4K0jRwzfsH64Uh0IQ=
|
||||
github.com/tidwall/jsonc v0.3.2 h1:ZTKrmejRlAJYdn0kcaFqRAKlxxFIC21pYq8vLa4p2Wc=
|
||||
github.com/tidwall/jsonc v0.3.2/go.mod h1:dw+3CIxqHi+t8eFSpzzMlcVYxKp08UP5CD8/uSFCyJE=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
28
install.sh
28
install.sh
@@ -1,17 +1,17 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Fence Installer (Linux/macOS only)
|
||||
# Greywall Installer (Linux/macOS only)
|
||||
# For Windows, we recommend using WSL.
|
||||
# Usage (latest):
|
||||
# curl -fsSL https://raw.githubusercontent.com/Use-Tusk/fence/main/install.sh | sh
|
||||
# curl -fsSL https://gitea.app.monadical.io/monadical/greywall/raw/branch/main/install.sh | sh
|
||||
# Usage (specific version):
|
||||
# curl -fsSL https://raw.githubusercontent.com/Use-Tusk/fence/main/install.sh | sh -s -- v0.1.0
|
||||
# curl -fsSL https://gitea.app.monadical.io/monadical/greywall/raw/branch/main/install.sh | sh -s -- v0.1.0
|
||||
# Or via env var:
|
||||
# curl -fsSL https://raw.githubusercontent.com/Use-Tusk/fence/main/install.sh | FENCE_VERSION=0.1.0 sh
|
||||
# curl -fsSL https://gitea.app.monadical.io/monadical/greywall/raw/branch/main/install.sh | GREYWALL_VERSION=0.1.0 sh
|
||||
|
||||
REPO="Use-Tusk/fence"
|
||||
BINARY_NAME="fence"
|
||||
REPO="monadical/greywall"
|
||||
BINARY_NAME="greywall"
|
||||
|
||||
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
ARCH=$(uname -m)
|
||||
@@ -39,8 +39,8 @@ case "$ARCH" in
|
||||
;;
|
||||
esac
|
||||
|
||||
# Determine version to install: use first arg or FENCE_VERSION env var; otherwise fallback to latest
|
||||
REQUESTED_VERSION="${1:-${FENCE_VERSION:-}}"
|
||||
# Determine version to install: use first arg or GREYWALL_VERSION env var; otherwise fallback to latest
|
||||
REQUESTED_VERSION="${1:-${GREYWALL_VERSION:-}}"
|
||||
if [ -n "$REQUESTED_VERSION" ]; then
|
||||
case "$REQUESTED_VERSION" in
|
||||
v*) VERSION_TAG="$REQUESTED_VERSION" ;;
|
||||
@@ -48,11 +48,11 @@ if [ -n "$REQUESTED_VERSION" ]; then
|
||||
esac
|
||||
else
|
||||
# Try manifest first (fast, no rate limits)
|
||||
VERSION_TAG=$(curl -sL "https://use-tusk.github.io/fence/latest.txt" 2>/dev/null || echo "")
|
||||
VERSION_TAG=$(curl -sL "https://gitea.app.monadical.io/monadical/greywall/latest.txt" 2>/dev/null || echo "")
|
||||
|
||||
# Fallback to GitHub API if manifest fails
|
||||
if [ -z "$VERSION_TAG" ]; then
|
||||
VERSION_TAG=$(curl -s "https://api.github.com/repos/$REPO/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
|
||||
VERSION_TAG=$(curl -s "https://gitea.app.monadical.io/api/v1/repos/$REPO/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -75,12 +75,12 @@ TMP_DIR=$(mktemp -d)
|
||||
cd "$TMP_DIR"
|
||||
|
||||
echo "Downloading from $DOWNLOAD_URL..."
|
||||
if ! curl -fsSL -o fence.tar.gz "$DOWNLOAD_URL"; then
|
||||
if ! curl -fsSL -o greywall.tar.gz "$DOWNLOAD_URL"; then
|
||||
echo "Error: Failed to download release"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -xzf fence.tar.gz
|
||||
tar -xzf greywall.tar.gz
|
||||
|
||||
# Install to /usr/local/bin or ~/.local/bin
|
||||
INSTALL_DIR="/usr/local/bin"
|
||||
@@ -97,9 +97,9 @@ chmod +x "$INSTALL_DIR/$BINARY_NAME"
|
||||
cd - > /dev/null
|
||||
rm -rf "$TMP_DIR"
|
||||
|
||||
echo "Fence $VERSION_TAG installed successfully!"
|
||||
echo "Greywall $VERSION_TAG installed successfully!"
|
||||
echo ""
|
||||
echo "Run 'fence --help' to get started."
|
||||
echo "Run 'greywall --help' to get started."
|
||||
|
||||
# Check if install dir is in PATH
|
||||
case ":$PATH:" in
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
// Package config defines the configuration types and loading for fence.
|
||||
// Package config defines the configuration types and loading for greywall.
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
@@ -13,7 +14,7 @@ import (
|
||||
"github.com/tidwall/jsonc"
|
||||
)
|
||||
|
||||
// Config is the main configuration for fence.
|
||||
// Config is the main configuration for greywall.
|
||||
type Config struct {
|
||||
Extends string `json:"extends,omitempty"`
|
||||
Network NetworkConfig `json:"network"`
|
||||
@@ -25,19 +26,18 @@ type Config struct {
|
||||
|
||||
// NetworkConfig defines network restrictions.
|
||||
type NetworkConfig struct {
|
||||
AllowedDomains []string `json:"allowedDomains"`
|
||||
DeniedDomains []string `json:"deniedDomains"`
|
||||
ProxyURL string `json:"proxyUrl,omitempty"` // External SOCKS5 proxy (e.g. socks5://host:1080)
|
||||
HTTPProxyURL string `json:"httpProxyUrl,omitempty"` // HTTP CONNECT proxy (e.g. http://host:42051)
|
||||
DnsAddr string `json:"dnsAddr,omitempty"` // DNS server address on host (e.g. localhost:3153)
|
||||
AllowUnixSockets []string `json:"allowUnixSockets,omitempty"`
|
||||
AllowAllUnixSockets bool `json:"allowAllUnixSockets,omitempty"`
|
||||
AllowLocalBinding bool `json:"allowLocalBinding,omitempty"`
|
||||
AllowLocalOutbound *bool `json:"allowLocalOutbound,omitempty"` // If nil, defaults to AllowLocalBinding value
|
||||
HTTPProxyPort int `json:"httpProxyPort,omitempty"`
|
||||
SOCKSProxyPort int `json:"socksProxyPort,omitempty"`
|
||||
}
|
||||
|
||||
// FilesystemConfig defines filesystem restrictions.
|
||||
type FilesystemConfig struct {
|
||||
DefaultDenyRead bool `json:"defaultDenyRead,omitempty"` // If true, deny reads by default except system paths and AllowRead
|
||||
DefaultDenyRead *bool `json:"defaultDenyRead,omitempty"` // If nil or true, deny reads by default except system paths, CWD, and AllowRead
|
||||
AllowRead []string `json:"allowRead"` // Paths to allow reading (used when DefaultDenyRead is true)
|
||||
DenyRead []string `json:"denyRead"`
|
||||
AllowWrite []string `json:"allowWrite"`
|
||||
@@ -45,6 +45,12 @@ type FilesystemConfig struct {
|
||||
AllowGitConfig bool `json:"allowGitConfig,omitempty"`
|
||||
}
|
||||
|
||||
// IsDefaultDenyRead returns whether deny-by-default read mode is enabled.
|
||||
// Defaults to true when not explicitly set (nil).
|
||||
func (f *FilesystemConfig) IsDefaultDenyRead() bool {
|
||||
return f.DefaultDenyRead == nil || *f.DefaultDenyRead
|
||||
}
|
||||
|
||||
// CommandConfig defines command restrictions.
|
||||
type CommandConfig struct {
|
||||
Deny []string `json:"deny"`
|
||||
@@ -106,13 +112,10 @@ var DefaultDeniedCommands = []string{
|
||||
"nsenter",
|
||||
}
|
||||
|
||||
// Default returns the default configuration with all network blocked.
|
||||
// Default returns the default configuration with all network blocked (no proxy = no network).
|
||||
func Default() *Config {
|
||||
return &Config{
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{},
|
||||
DeniedDomains: []string{},
|
||||
},
|
||||
Network: NetworkConfig{},
|
||||
Filesystem: FilesystemConfig{
|
||||
DenyRead: []string{},
|
||||
AllowWrite: []string{},
|
||||
@@ -134,12 +137,12 @@ func Default() *Config {
|
||||
|
||||
// DefaultConfigPath returns the default config file path.
|
||||
// Uses the OS-preferred config directory (XDG on Linux, ~/Library/Application Support on macOS).
|
||||
// Falls back to ~/.fence.json if the new location doesn't exist but the legacy one does.
|
||||
// Falls back to ~/.greywall.json if the new location doesn't exist but the legacy one does.
|
||||
func DefaultConfigPath() string {
|
||||
// Try OS-preferred config directory first
|
||||
configDir, err := os.UserConfigDir()
|
||||
if err == nil {
|
||||
newPath := filepath.Join(configDir, "fence", "fence.json")
|
||||
newPath := filepath.Join(configDir, "greywall", "greywall.json")
|
||||
if _, err := os.Stat(newPath); err == nil {
|
||||
return newPath
|
||||
}
|
||||
@@ -153,18 +156,18 @@ func DefaultConfigPath() string {
|
||||
// Fall back to legacy path if it exists
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "fence.json"
|
||||
return "greywall.json"
|
||||
}
|
||||
legacyPath := filepath.Join(home, ".fence.json")
|
||||
legacyPath := filepath.Join(home, ".greywall.json")
|
||||
if _, err := os.Stat(legacyPath); err == nil {
|
||||
return legacyPath
|
||||
}
|
||||
|
||||
// Neither exists, prefer new XDG-compliant path
|
||||
if configDir != "" {
|
||||
return filepath.Join(configDir, "fence", "fence.json")
|
||||
return filepath.Join(configDir, "greywall", "greywall.json")
|
||||
}
|
||||
return filepath.Join(home, ".config", "fence", "fence.json")
|
||||
return filepath.Join(home, ".config", "greywall", "greywall.json")
|
||||
}
|
||||
|
||||
// Load loads configuration from a file path.
|
||||
@@ -196,14 +199,19 @@ func Load(path string) (*Config, error) {
|
||||
|
||||
// Validate validates the configuration.
|
||||
func (c *Config) Validate() error {
|
||||
for _, domain := range c.Network.AllowedDomains {
|
||||
if err := validateDomainPattern(domain); err != nil {
|
||||
return fmt.Errorf("invalid allowed domain %q: %w", domain, err)
|
||||
if c.Network.ProxyURL != "" {
|
||||
if err := validateProxyURL(c.Network.ProxyURL); err != nil {
|
||||
return fmt.Errorf("invalid network.proxyUrl %q: %w", c.Network.ProxyURL, err)
|
||||
}
|
||||
}
|
||||
for _, domain := range c.Network.DeniedDomains {
|
||||
if err := validateDomainPattern(domain); err != nil {
|
||||
return fmt.Errorf("invalid denied domain %q: %w", domain, err)
|
||||
if c.Network.HTTPProxyURL != "" {
|
||||
if err := validateHTTPProxyURL(c.Network.HTTPProxyURL); err != nil {
|
||||
return fmt.Errorf("invalid network.httpProxyUrl %q: %w", c.Network.HTTPProxyURL, err)
|
||||
}
|
||||
}
|
||||
if c.Network.DnsAddr != "" {
|
||||
if err := validateHostPort(c.Network.DnsAddr); err != nil {
|
||||
return fmt.Errorf("invalid network.dnsAddr %q: %w", c.Network.DnsAddr, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,46 +261,49 @@ func (c *CommandConfig) UseDefaultDeniedCommands() bool {
|
||||
return c.UseDefaults == nil || *c.UseDefaults
|
||||
}
|
||||
|
||||
func validateDomainPattern(pattern string) error {
|
||||
if pattern == "localhost" {
|
||||
return nil
|
||||
// validateProxyURL validates a SOCKS5 proxy URL.
|
||||
func validateProxyURL(proxyURL string) error {
|
||||
u, err := url.Parse(proxyURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid URL: %w", err)
|
||||
}
|
||||
|
||||
if strings.Contains(pattern, "://") || strings.Contains(pattern, "/") || strings.Contains(pattern, ":") {
|
||||
return errors.New("domain pattern cannot contain protocol, path, or port")
|
||||
if u.Scheme != "socks5" && u.Scheme != "socks5h" {
|
||||
return errors.New("proxy URL must use socks5:// or socks5h:// scheme")
|
||||
}
|
||||
|
||||
// Handle wildcard patterns
|
||||
if strings.HasPrefix(pattern, "*.") {
|
||||
domain := pattern[2:]
|
||||
// Must have at least one more dot after the wildcard
|
||||
if !strings.Contains(domain, ".") {
|
||||
return errors.New("wildcard pattern too broad (e.g., *.com not allowed)")
|
||||
}
|
||||
if strings.HasPrefix(domain, ".") || strings.HasSuffix(domain, ".") {
|
||||
return errors.New("invalid domain format")
|
||||
}
|
||||
// Check each part has content
|
||||
parts := strings.Split(domain, ".")
|
||||
if len(parts) < 2 {
|
||||
return errors.New("wildcard pattern too broad")
|
||||
}
|
||||
if slices.Contains(parts, "") {
|
||||
return errors.New("invalid domain format")
|
||||
}
|
||||
return nil
|
||||
if u.Hostname() == "" {
|
||||
return errors.New("proxy URL must include a hostname")
|
||||
}
|
||||
|
||||
// Reject other uses of wildcards
|
||||
if strings.Contains(pattern, "*") {
|
||||
return errors.New("only *.domain.com wildcard patterns are allowed")
|
||||
if u.Port() == "" {
|
||||
return errors.New("proxy URL must include a port")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Regular domains must have at least one dot
|
||||
if !strings.Contains(pattern, ".") || strings.HasPrefix(pattern, ".") || strings.HasSuffix(pattern, ".") {
|
||||
return errors.New("invalid domain format")
|
||||
// validateHTTPProxyURL validates an HTTP CONNECT proxy URL.
|
||||
func validateHTTPProxyURL(proxyURL string) error {
|
||||
u, err := url.Parse(proxyURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid URL: %w", err)
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return errors.New("HTTP proxy URL must use http:// or https:// scheme")
|
||||
}
|
||||
if u.Hostname() == "" {
|
||||
return errors.New("HTTP proxy URL must include a hostname")
|
||||
}
|
||||
if u.Port() == "" {
|
||||
return errors.New("HTTP proxy URL must include a port")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateHostPort validates a host:port address.
|
||||
func validateHostPort(addr string) error {
|
||||
// Must contain a colon separating host and port
|
||||
host, port, found := strings.Cut(addr, ":")
|
||||
if !found || host == "" || port == "" {
|
||||
return errors.New("must be in host:port format (e.g. localhost:3153)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -332,26 +343,6 @@ func isIPv6Pattern(pattern string) bool {
|
||||
return colonCount >= 2
|
||||
}
|
||||
|
||||
// MatchesDomain checks if a hostname matches a domain pattern.
|
||||
func MatchesDomain(hostname, pattern string) bool {
|
||||
hostname = strings.ToLower(hostname)
|
||||
pattern = strings.ToLower(pattern)
|
||||
|
||||
// "*" matches all domains
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Wildcard pattern like *.example.com
|
||||
if strings.HasPrefix(pattern, "*.") {
|
||||
baseDomain := pattern[2:]
|
||||
return strings.HasSuffix(hostname, "."+baseDomain)
|
||||
}
|
||||
|
||||
// Exact match
|
||||
return hostname == pattern
|
||||
}
|
||||
|
||||
// MatchesHost checks if a hostname matches an SSH host pattern.
|
||||
// SSH host patterns support wildcards anywhere in the pattern.
|
||||
func MatchesHost(hostname, pattern string) bool {
|
||||
@@ -440,9 +431,12 @@ func Merge(base, override *Config) *Config {
|
||||
AllowPty: base.AllowPty || override.AllowPty,
|
||||
|
||||
Network: NetworkConfig{
|
||||
// ProxyURL/HTTPProxyURL/DnsAddr: override wins if non-empty
|
||||
ProxyURL: mergeString(base.Network.ProxyURL, override.Network.ProxyURL),
|
||||
HTTPProxyURL: mergeString(base.Network.HTTPProxyURL, override.Network.HTTPProxyURL),
|
||||
DnsAddr: mergeString(base.Network.DnsAddr, override.Network.DnsAddr),
|
||||
|
||||
// Append slices (base first, then override additions)
|
||||
AllowedDomains: mergeStrings(base.Network.AllowedDomains, override.Network.AllowedDomains),
|
||||
DeniedDomains: mergeStrings(base.Network.DeniedDomains, override.Network.DeniedDomains),
|
||||
AllowUnixSockets: mergeStrings(base.Network.AllowUnixSockets, override.Network.AllowUnixSockets),
|
||||
|
||||
// Boolean fields: override wins if set, otherwise base
|
||||
@@ -451,15 +445,11 @@ func Merge(base, override *Config) *Config {
|
||||
|
||||
// Pointer fields: override wins if set, otherwise base
|
||||
AllowLocalOutbound: mergeOptionalBool(base.Network.AllowLocalOutbound, override.Network.AllowLocalOutbound),
|
||||
|
||||
// Port fields: override wins if non-zero
|
||||
HTTPProxyPort: mergeInt(base.Network.HTTPProxyPort, override.Network.HTTPProxyPort),
|
||||
SOCKSProxyPort: mergeInt(base.Network.SOCKSProxyPort, override.Network.SOCKSProxyPort),
|
||||
},
|
||||
|
||||
Filesystem: FilesystemConfig{
|
||||
// Boolean fields: true if either enables it
|
||||
DefaultDenyRead: base.Filesystem.DefaultDenyRead || override.Filesystem.DefaultDenyRead,
|
||||
// Pointer field: override wins if set, otherwise base (nil = deny-by-default)
|
||||
DefaultDenyRead: mergeOptionalBool(base.Filesystem.DefaultDenyRead, override.Filesystem.DefaultDenyRead),
|
||||
|
||||
// Append slices
|
||||
AllowRead: mergeStrings(base.Filesystem.AllowRead, override.Filesystem.AllowRead),
|
||||
@@ -531,9 +521,9 @@ func mergeOptionalBool(base, override *bool) *bool {
|
||||
return base
|
||||
}
|
||||
|
||||
// mergeInt returns override if non-zero, otherwise base.
|
||||
func mergeInt(base, override int) int {
|
||||
if override != 0 {
|
||||
// mergeString returns override if non-empty, otherwise base.
|
||||
func mergeString(base, override string) string {
|
||||
if override != "" {
|
||||
return override
|
||||
}
|
||||
return base
|
||||
|
||||
@@ -6,72 +6,6 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValidateDomainPattern(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pattern string
|
||||
wantErr bool
|
||||
}{
|
||||
// Valid patterns
|
||||
{"valid domain", "example.com", false},
|
||||
{"valid subdomain", "api.example.com", false},
|
||||
{"valid wildcard", "*.example.com", false},
|
||||
{"valid wildcard subdomain", "*.api.example.com", false},
|
||||
{"localhost", "localhost", false},
|
||||
|
||||
// Invalid patterns
|
||||
{"protocol included", "https://example.com", true},
|
||||
{"path included", "example.com/path", true},
|
||||
{"port included", "example.com:443", true},
|
||||
{"wildcard too broad", "*.com", true},
|
||||
{"invalid wildcard position", "example.*.com", true},
|
||||
{"trailing wildcard", "example.com.*", true},
|
||||
{"leading dot", ".example.com", true},
|
||||
{"trailing dot", "example.com.", true},
|
||||
{"no TLD", "example", true},
|
||||
{"empty wildcard domain part", "*.", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateDomainPattern(tt.pattern)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("validateDomainPattern(%q) error = %v, wantErr %v", tt.pattern, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesDomain(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hostname string
|
||||
pattern string
|
||||
want bool
|
||||
}{
|
||||
// Exact matches
|
||||
{"exact match", "example.com", "example.com", true},
|
||||
{"exact match case insensitive", "Example.COM", "example.com", true},
|
||||
{"exact no match", "other.com", "example.com", false},
|
||||
|
||||
// Wildcard matches
|
||||
{"wildcard match subdomain", "api.example.com", "*.example.com", true},
|
||||
{"wildcard match deep subdomain", "deep.api.example.com", "*.example.com", true},
|
||||
{"wildcard no match base domain", "example.com", "*.example.com", false},
|
||||
{"wildcard no match different domain", "api.other.com", "*.example.com", false},
|
||||
{"wildcard case insensitive", "API.Example.COM", "*.example.com", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := MatchesDomain(tt.hostname, tt.pattern)
|
||||
if got != tt.want {
|
||||
t.Errorf("MatchesDomain(%q, %q) = %v, want %v", tt.hostname, tt.pattern, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -84,29 +18,55 @@ func TestConfigValidate(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid config with domains",
|
||||
name: "valid config with proxy",
|
||||
config: Config{
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{"example.com", "*.github.com"},
|
||||
DeniedDomains: []string{"blocked.com"},
|
||||
ProxyURL: "socks5://localhost:1080",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid allowed domain",
|
||||
name: "valid socks5h proxy",
|
||||
config: Config{
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{"https://example.com"},
|
||||
ProxyURL: "socks5h://proxy.example.com:1080",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid proxy - wrong scheme",
|
||||
config: Config{
|
||||
Network: NetworkConfig{
|
||||
ProxyURL: "http://localhost:1080",
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid denied domain",
|
||||
name: "invalid proxy - no port",
|
||||
config: Config{
|
||||
Network: NetworkConfig{
|
||||
DeniedDomains: []string{"*.com"},
|
||||
ProxyURL: "socks5://localhost",
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid proxy - no host",
|
||||
config: Config{
|
||||
Network: NetworkConfig{
|
||||
ProxyURL: "socks5://:1080",
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid proxy - not a URL",
|
||||
config: Config{
|
||||
Network: NetworkConfig{
|
||||
ProxyURL: "not-a-url",
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
@@ -164,11 +124,8 @@ func TestDefault(t *testing.T) {
|
||||
if cfg == nil {
|
||||
t.Fatal("Default() returned nil")
|
||||
}
|
||||
if cfg.Network.AllowedDomains == nil {
|
||||
t.Error("AllowedDomains should not be nil")
|
||||
}
|
||||
if cfg.Network.DeniedDomains == nil {
|
||||
t.Error("DeniedDomains should not be nil")
|
||||
if cfg.Network.ProxyURL != "" {
|
||||
t.Error("ProxyURL should be empty by default")
|
||||
}
|
||||
if cfg.Filesystem.DenyRead == nil {
|
||||
t.Error("DenyRead should not be nil")
|
||||
@@ -222,21 +179,18 @@ func TestLoad(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid config",
|
||||
name: "valid config with proxy",
|
||||
setup: func(dir string) string {
|
||||
path := filepath.Join(dir, "valid.json")
|
||||
content := `{"network":{"allowedDomains":["example.com"]}}`
|
||||
content := `{"network":{"proxyUrl":"socks5://localhost:1080"}}`
|
||||
_ = os.WriteFile(path, []byte(content), 0o600)
|
||||
return path
|
||||
},
|
||||
wantNil: false,
|
||||
wantErr: false,
|
||||
checkConfig: func(t *testing.T, cfg *Config) {
|
||||
if len(cfg.Network.AllowedDomains) != 1 {
|
||||
t.Errorf("expected 1 allowed domain, got %d", len(cfg.Network.AllowedDomains))
|
||||
}
|
||||
if cfg.Network.AllowedDomains[0] != "example.com" {
|
||||
t.Errorf("expected example.com, got %s", cfg.Network.AllowedDomains[0])
|
||||
if cfg.Network.ProxyURL != "socks5://localhost:1080" {
|
||||
t.Errorf("expected socks5://localhost:1080, got %s", cfg.Network.ProxyURL)
|
||||
}
|
||||
},
|
||||
},
|
||||
@@ -251,10 +205,10 @@ func TestLoad(t *testing.T) {
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid domain in config",
|
||||
name: "invalid proxy URL in config",
|
||||
setup: func(dir string) string {
|
||||
path := filepath.Join(dir, "invalid_domain.json")
|
||||
content := `{"network":{"allowedDomains":["*.com"]}}`
|
||||
path := filepath.Join(dir, "invalid_proxy.json")
|
||||
content := `{"network":{"proxyUrl":"http://localhost:1080"}}`
|
||||
_ = os.WriteFile(path, []byte(content), 0o600)
|
||||
return path
|
||||
},
|
||||
@@ -295,10 +249,10 @@ func TestDefaultConfigPath(t *testing.T) {
|
||||
if path == "" {
|
||||
t.Error("DefaultConfigPath() returned empty string")
|
||||
}
|
||||
// Should end with fence.json (either new XDG path or legacy .fence.json)
|
||||
// Should end with greywall.json (either new XDG path or legacy .greywall.json)
|
||||
base := filepath.Base(path)
|
||||
if base != "fence.json" && base != ".fence.json" {
|
||||
t.Errorf("DefaultConfigPath() = %q, expected to end with fence.json or .fence.json", path)
|
||||
if base != "greywall.json" && base != ".greywall.json" {
|
||||
t.Errorf("DefaultConfigPath() = %q, expected to end with greywall.json or .greywall.json", path)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -307,15 +261,15 @@ func TestMerge(t *testing.T) {
|
||||
override := &Config{
|
||||
AllowPty: true,
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{"example.com"},
|
||||
ProxyURL: "socks5://localhost:1080",
|
||||
},
|
||||
}
|
||||
result := Merge(nil, override)
|
||||
if !result.AllowPty {
|
||||
t.Error("expected AllowPty to be true")
|
||||
}
|
||||
if len(result.Network.AllowedDomains) != 1 || result.Network.AllowedDomains[0] != "example.com" {
|
||||
t.Error("expected AllowedDomains to be [example.com]")
|
||||
if result.Network.ProxyURL != "socks5://localhost:1080" {
|
||||
t.Error("expected ProxyURL to be socks5://localhost:1080")
|
||||
}
|
||||
if result.Extends != "" {
|
||||
t.Error("expected Extends to be cleared")
|
||||
@@ -326,15 +280,15 @@ func TestMerge(t *testing.T) {
|
||||
base := &Config{
|
||||
AllowPty: true,
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{"example.com"},
|
||||
ProxyURL: "socks5://localhost:1080",
|
||||
},
|
||||
}
|
||||
result := Merge(base, nil)
|
||||
if !result.AllowPty {
|
||||
t.Error("expected AllowPty to be true")
|
||||
}
|
||||
if len(result.Network.AllowedDomains) != 1 {
|
||||
t.Error("expected AllowedDomains to be [example.com]")
|
||||
if result.Network.ProxyURL != "socks5://localhost:1080" {
|
||||
t.Error("expected ProxyURL to be preserved")
|
||||
}
|
||||
})
|
||||
|
||||
@@ -345,47 +299,37 @@ func TestMerge(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("merge allowed domains", func(t *testing.T) {
|
||||
t.Run("proxy URL override wins", func(t *testing.T) {
|
||||
base := &Config{
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{"github.com", "api.github.com"},
|
||||
ProxyURL: "socks5://base:1080",
|
||||
},
|
||||
}
|
||||
override := &Config{
|
||||
Extends: "base-template",
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{"private-registry.company.com"},
|
||||
ProxyURL: "socks5://override:1080",
|
||||
},
|
||||
}
|
||||
result := Merge(base, override)
|
||||
|
||||
// Should have all three domains
|
||||
if len(result.Network.AllowedDomains) != 3 {
|
||||
t.Errorf("expected 3 allowed domains, got %d: %v", len(result.Network.AllowedDomains), result.Network.AllowedDomains)
|
||||
}
|
||||
|
||||
// Extends should be cleared
|
||||
if result.Extends != "" {
|
||||
t.Errorf("expected Extends to be cleared, got %q", result.Extends)
|
||||
if result.Network.ProxyURL != "socks5://override:1080" {
|
||||
t.Errorf("expected override ProxyURL, got %s", result.Network.ProxyURL)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("deduplicate merged domains", func(t *testing.T) {
|
||||
t.Run("proxy URL base preserved when override empty", func(t *testing.T) {
|
||||
base := &Config{
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{"github.com", "example.com"},
|
||||
ProxyURL: "socks5://base:1080",
|
||||
},
|
||||
}
|
||||
override := &Config{
|
||||
Network: NetworkConfig{
|
||||
AllowedDomains: []string{"github.com", "new.com"},
|
||||
},
|
||||
Network: NetworkConfig{},
|
||||
}
|
||||
result := Merge(base, override)
|
||||
|
||||
// Should deduplicate
|
||||
if len(result.Network.AllowedDomains) != 3 {
|
||||
t.Errorf("expected 3 domains (deduped), got %d: %v", len(result.Network.AllowedDomains), result.Network.AllowedDomains)
|
||||
if result.Network.ProxyURL != "socks5://base:1080" {
|
||||
t.Errorf("expected base ProxyURL, got %s", result.Network.ProxyURL)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -466,7 +410,7 @@ func TestMerge(t *testing.T) {
|
||||
t.Run("merge defaultDenyRead and allowRead", func(t *testing.T) {
|
||||
base := &Config{
|
||||
Filesystem: FilesystemConfig{
|
||||
DefaultDenyRead: true,
|
||||
DefaultDenyRead: boolPtr(true),
|
||||
AllowRead: []string{"/home/user/project"},
|
||||
},
|
||||
}
|
||||
@@ -477,56 +421,38 @@ func TestMerge(t *testing.T) {
|
||||
}
|
||||
result := Merge(base, override)
|
||||
|
||||
if !result.Filesystem.DefaultDenyRead {
|
||||
t.Error("expected DefaultDenyRead to be true (from base)")
|
||||
if !result.Filesystem.IsDefaultDenyRead() {
|
||||
t.Error("expected IsDefaultDenyRead() to be true (from base)")
|
||||
}
|
||||
if len(result.Filesystem.AllowRead) != 2 {
|
||||
t.Errorf("expected 2 allowRead paths, got %d: %v", len(result.Filesystem.AllowRead), result.Filesystem.AllowRead)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("merge defaultDenyRead from override", func(t *testing.T) {
|
||||
t.Run("defaultDenyRead nil defaults to true", func(t *testing.T) {
|
||||
base := &Config{
|
||||
Filesystem: FilesystemConfig{
|
||||
DefaultDenyRead: false,
|
||||
},
|
||||
Filesystem: FilesystemConfig{},
|
||||
}
|
||||
override := &Config{
|
||||
Filesystem: FilesystemConfig{
|
||||
DefaultDenyRead: true,
|
||||
AllowRead: []string{"/home/user/project"},
|
||||
},
|
||||
}
|
||||
result := Merge(base, override)
|
||||
|
||||
if !result.Filesystem.DefaultDenyRead {
|
||||
t.Error("expected DefaultDenyRead to be true (from override)")
|
||||
}
|
||||
if len(result.Filesystem.AllowRead) != 1 {
|
||||
t.Errorf("expected 1 allowRead path, got %d", len(result.Filesystem.AllowRead))
|
||||
result := Merge(base, nil)
|
||||
if !result.Filesystem.IsDefaultDenyRead() {
|
||||
t.Error("expected IsDefaultDenyRead() to be true when nil (deny-by-default)")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("override ports", func(t *testing.T) {
|
||||
t.Run("defaultDenyRead explicit false overrides", func(t *testing.T) {
|
||||
base := &Config{
|
||||
Network: NetworkConfig{
|
||||
HTTPProxyPort: 8080,
|
||||
SOCKSProxyPort: 1080,
|
||||
Filesystem: FilesystemConfig{
|
||||
DefaultDenyRead: boolPtr(true),
|
||||
},
|
||||
}
|
||||
override := &Config{
|
||||
Network: NetworkConfig{
|
||||
HTTPProxyPort: 9090, // override
|
||||
// SOCKSProxyPort not set, should keep base
|
||||
Filesystem: FilesystemConfig{
|
||||
DefaultDenyRead: boolPtr(false),
|
||||
},
|
||||
}
|
||||
result := Merge(base, override)
|
||||
|
||||
if result.Network.HTTPProxyPort != 9090 {
|
||||
t.Errorf("expected HTTPProxyPort 9090, got %d", result.Network.HTTPProxyPort)
|
||||
}
|
||||
if result.Network.SOCKSProxyPort != 1080 {
|
||||
t.Errorf("expected SOCKSProxyPort 1080, got %d", result.Network.SOCKSProxyPort)
|
||||
if result.Filesystem.IsDefaultDenyRead() {
|
||||
t.Error("expected IsDefaultDenyRead() to be false (override explicit false)")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -741,3 +667,30 @@ func TestMergeSSHConfig(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateProxyURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
wantErr bool
|
||||
}{
|
||||
{"valid socks5", "socks5://localhost:1080", false},
|
||||
{"valid socks5h", "socks5h://proxy.example.com:1080", false},
|
||||
{"valid socks5 with ip", "socks5://192.168.1.1:1080", false},
|
||||
{"http scheme", "http://localhost:1080", true},
|
||||
{"https scheme", "https://localhost:1080", true},
|
||||
{"no port", "socks5://localhost", true},
|
||||
{"no host", "socks5://:1080", true},
|
||||
{"not a URL", "not-a-url", true},
|
||||
{"empty", "", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateProxyURL(tt.url)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("validateProxyURL(%q) error = %v, wantErr %v", tt.url, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
181
internal/daemon/client.go
Normal file
181
internal/daemon/client.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// clientDialTimeout is the maximum time to wait when connecting to the daemon.
|
||||
clientDialTimeout = 5 * time.Second
|
||||
|
||||
// clientReadTimeout is the maximum time to wait for a response from the daemon.
|
||||
clientReadTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// Client communicates with the greywall daemon over a Unix socket using
|
||||
// newline-delimited JSON.
|
||||
type Client struct {
|
||||
socketPath string
|
||||
debug bool
|
||||
}
|
||||
|
||||
// NewClient creates a new daemon client that connects to the given Unix socket path.
|
||||
func NewClient(socketPath string, debug bool) *Client {
|
||||
return &Client{
|
||||
socketPath: socketPath,
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateSession asks the daemon to create a new sandbox session with the given
|
||||
// proxy URL and optional DNS address. Returns the session info on success.
|
||||
func (c *Client) CreateSession(proxyURL, dnsAddr string) (*Response, error) {
|
||||
req := Request{
|
||||
Action: "create_session",
|
||||
ProxyURL: proxyURL,
|
||||
DNSAddr: dnsAddr,
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create session request failed: %w", err)
|
||||
}
|
||||
|
||||
if !resp.OK {
|
||||
return resp, fmt.Errorf("create session failed: %s", resp.Error)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DestroySession asks the daemon to tear down the session with the given ID.
|
||||
func (c *Client) DestroySession(sessionID string) error {
|
||||
req := Request{
|
||||
Action: "destroy_session",
|
||||
SessionID: sessionID,
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroy session request failed: %w", err)
|
||||
}
|
||||
|
||||
if !resp.OK {
|
||||
return fmt.Errorf("destroy session failed: %s", resp.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartLearning asks the daemon to start an fs_usage trace for learning mode.
|
||||
func (c *Client) StartLearning() (*Response, error) {
|
||||
req := Request{
|
||||
Action: "start_learning",
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("start learning request failed: %w", err)
|
||||
}
|
||||
|
||||
if !resp.OK {
|
||||
return resp, fmt.Errorf("start learning failed: %s", resp.Error)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// StopLearning asks the daemon to stop the fs_usage trace for the given learning session.
|
||||
func (c *Client) StopLearning(learningID string) error {
|
||||
req := Request{
|
||||
Action: "stop_learning",
|
||||
LearningID: learningID,
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("stop learning request failed: %w", err)
|
||||
}
|
||||
|
||||
if !resp.OK {
|
||||
return fmt.Errorf("stop learning failed: %s", resp.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status queries the daemon for its current status.
|
||||
func (c *Client) Status() (*Response, error) {
|
||||
req := Request{
|
||||
Action: "status",
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("status request failed: %w", err)
|
||||
}
|
||||
|
||||
if !resp.OK {
|
||||
return resp, fmt.Errorf("status request failed: %s", resp.Error)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// IsRunning checks whether the daemon is reachable by attempting to connect
|
||||
// to the Unix socket. Returns true if the connection succeeds.
|
||||
func (c *Client) IsRunning() bool {
|
||||
conn, err := net.DialTimeout("unix", c.socketPath, clientDialTimeout)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_ = conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// sendRequest connects to the daemon Unix socket, sends a JSON-encoded request,
|
||||
// and reads back a JSON-encoded response.
|
||||
func (c *Client) sendRequest(req Request) (*Response, error) {
|
||||
c.logDebug("Connecting to daemon at %s", c.socketPath)
|
||||
|
||||
conn, err := net.DialTimeout("unix", c.socketPath, clientDialTimeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to daemon at %s: %w", c.socketPath, err)
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // best-effort close on request completion
|
||||
|
||||
// Set a read deadline for the response.
|
||||
if err := conn.SetReadDeadline(time.Now().Add(clientReadTimeout)); err != nil {
|
||||
return nil, fmt.Errorf("failed to set read deadline: %w", err)
|
||||
}
|
||||
|
||||
// Send the request as newline-delimited JSON.
|
||||
encoder := json.NewEncoder(conn)
|
||||
if err := encoder.Encode(req); err != nil {
|
||||
return nil, fmt.Errorf("failed to send request: %w", err)
|
||||
}
|
||||
|
||||
c.logDebug("Sent request: action=%s", req.Action)
|
||||
|
||||
// Read the response.
|
||||
decoder := json.NewDecoder(conn)
|
||||
var resp Response
|
||||
if err := decoder.Decode(&resp); err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
c.logDebug("Received response: ok=%v", resp.OK)
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// logDebug writes a debug message to stderr with the [greywall:daemon] prefix.
|
||||
func (c *Client) logDebug(format string, args ...interface{}) {
|
||||
if c.debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:daemon] "+format+"\n", args...)
|
||||
}
|
||||
}
|
||||
176
internal/daemon/dns.go
Normal file
176
internal/daemon/dns.go
Normal file
@@ -0,0 +1,176 @@
|
||||
//go:build darwin || linux
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxDNSPacketSize is the maximum UDP packet size we accept.
|
||||
// DNS can theoretically be up to 65535 bytes, but practically much smaller.
|
||||
maxDNSPacketSize = 4096
|
||||
|
||||
// upstreamTimeout is the time we wait for a response from the upstream DNS server.
|
||||
upstreamTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// DNSRelay is a UDP DNS relay that forwards DNS queries from sandboxed processes
|
||||
// to a configured upstream DNS server. It operates as a simple packet relay without
|
||||
// parsing DNS protocol contents.
|
||||
type DNSRelay struct {
|
||||
udpConn *net.UDPConn
|
||||
targetAddr string // upstream DNS server address (host:port)
|
||||
listenAddr string // address we're listening on
|
||||
wg sync.WaitGroup
|
||||
done chan struct{}
|
||||
debug bool
|
||||
}
|
||||
|
||||
// NewDNSRelay creates a new DNS relay that listens on listenAddr and forwards
|
||||
// queries to dnsAddr. The listenAddr will typically be "127.0.0.2:53" (loopback alias).
|
||||
// The dnsAddr must be in "host:port" format (e.g. "1.1.1.1:53").
|
||||
func NewDNSRelay(listenAddr, dnsAddr string, debug bool) (*DNSRelay, error) {
|
||||
// Validate the upstream DNS address is parseable as host:port.
|
||||
targetHost, targetPort, err := net.SplitHostPort(dnsAddr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid DNS address %q: %w", dnsAddr, err)
|
||||
}
|
||||
if targetHost == "" {
|
||||
return nil, fmt.Errorf("invalid DNS address %q: empty host", dnsAddr)
|
||||
}
|
||||
if targetPort == "" {
|
||||
return nil, fmt.Errorf("invalid DNS address %q: empty port", dnsAddr)
|
||||
}
|
||||
|
||||
// Resolve and bind the listen address.
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve listen address %q: %w", listenAddr, err)
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to bind UDP socket on %q: %w", listenAddr, err)
|
||||
}
|
||||
|
||||
return &DNSRelay{
|
||||
udpConn: conn,
|
||||
targetAddr: dnsAddr,
|
||||
listenAddr: conn.LocalAddr().String(),
|
||||
done: make(chan struct{}),
|
||||
debug: debug,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListenAddr returns the actual address the relay is listening on.
|
||||
// This is useful when port 0 was used to get an ephemeral port.
|
||||
func (d *DNSRelay) ListenAddr() string {
|
||||
return d.listenAddr
|
||||
}
|
||||
|
||||
// Start begins the DNS relay loop. It reads incoming UDP packets from the
|
||||
// listening socket and spawns a goroutine per query to forward it to the
|
||||
// upstream DNS server and relay the response back.
|
||||
func (d *DNSRelay) Start() error {
|
||||
Logf("DNS relay listening on %s, forwarding to %s", d.listenAddr, d.targetAddr)
|
||||
|
||||
d.wg.Add(1)
|
||||
go d.readLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop shuts down the DNS relay. It signals the read loop to stop, closes the
|
||||
// listening socket, and waits for all in-flight queries to complete.
|
||||
func (d *DNSRelay) Stop() {
|
||||
close(d.done)
|
||||
_ = d.udpConn.Close()
|
||||
d.wg.Wait()
|
||||
|
||||
Logf("DNS relay stopped")
|
||||
}
|
||||
|
||||
// readLoop is the main loop that reads incoming DNS queries from the listening socket.
|
||||
func (d *DNSRelay) readLoop() {
|
||||
defer d.wg.Done()
|
||||
|
||||
buf := make([]byte, maxDNSPacketSize)
|
||||
for {
|
||||
n, clientAddr, err := d.udpConn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-d.done:
|
||||
// Shutting down, expected error from closed socket.
|
||||
return
|
||||
default:
|
||||
Logf("DNS relay: read error: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Copy the packet data so the buffer can be reused immediately.
|
||||
query := make([]byte, n)
|
||||
copy(query, buf[:n])
|
||||
|
||||
d.wg.Add(1)
|
||||
go d.handleQuery(query, clientAddr)
|
||||
}
|
||||
}
|
||||
|
||||
// handleQuery forwards a single DNS query to the upstream server and relays
|
||||
// the response back to the original client. It creates a dedicated UDP connection
|
||||
// to the upstream server to avoid multiplexing complexity.
|
||||
func (d *DNSRelay) handleQuery(query []byte, clientAddr *net.UDPAddr) {
|
||||
defer d.wg.Done()
|
||||
|
||||
Logf("DNS relay: query from %s (%d bytes)", clientAddr, len(query))
|
||||
|
||||
// Create a dedicated UDP connection to the upstream DNS server.
|
||||
// Use "udp4" to force IPv4, since the upstream may only listen on 127.0.0.1.
|
||||
upstreamConn, err := net.Dial("udp4", d.targetAddr)
|
||||
if err != nil {
|
||||
Logf("DNS relay: failed to connect to upstream %s: %v", d.targetAddr, err)
|
||||
return
|
||||
}
|
||||
defer upstreamConn.Close() //nolint:errcheck // best-effort cleanup of per-query UDP connection
|
||||
|
||||
// Send the query to the upstream server.
|
||||
if _, err := upstreamConn.Write(query); err != nil {
|
||||
Logf("DNS relay: failed to send query to upstream: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for the response with a timeout.
|
||||
if err := upstreamConn.SetReadDeadline(time.Now().Add(upstreamTimeout)); err != nil {
|
||||
Logf("DNS relay: failed to set read deadline: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
resp := make([]byte, maxDNSPacketSize)
|
||||
n, err := upstreamConn.Read(resp)
|
||||
if err != nil {
|
||||
Logf("DNS relay: upstream response error from %s: %v", d.targetAddr, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Send the response back to the original client.
|
||||
if _, err := d.udpConn.WriteToUDP(resp[:n], clientAddr); err != nil {
|
||||
// The listening socket may have been closed during shutdown.
|
||||
select {
|
||||
case <-d.done:
|
||||
return
|
||||
default:
|
||||
Logf("DNS relay: failed to send response to %s: %v", clientAddr, err)
|
||||
}
|
||||
}
|
||||
|
||||
Logf("DNS relay: response to %s (%d bytes)", clientAddr, n)
|
||||
}
|
||||
296
internal/daemon/dns_test.go
Normal file
296
internal/daemon/dns_test.go
Normal file
@@ -0,0 +1,296 @@
|
||||
//go:build darwin || linux
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// startMockDNSServer starts a UDP server that echoes back whatever it receives,
|
||||
// prefixed with "RESP:" to distinguish responses from queries.
|
||||
// Returns the server's address and a cleanup function.
|
||||
func startMockDNSServer(t *testing.T) (string, func()) {
|
||||
t.Helper()
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to resolve address: %v", err)
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start mock DNS server: %v", err)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
buf := make([]byte, maxDNSPacketSize)
|
||||
for {
|
||||
n, remoteAddr, err := conn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Echo back with "RESP:" prefix.
|
||||
resp := append([]byte("RESP:"), buf[:n]...)
|
||||
_, _ = conn.WriteToUDP(resp, remoteAddr) // best-effort in test
|
||||
}
|
||||
}()
|
||||
|
||||
cleanup := func() {
|
||||
close(done)
|
||||
_ = conn.Close()
|
||||
}
|
||||
|
||||
return conn.LocalAddr().String(), cleanup
|
||||
}
|
||||
|
||||
// startSilentDNSServer starts a UDP server that accepts connections but never
|
||||
// responds, simulating an upstream timeout.
|
||||
func startSilentDNSServer(t *testing.T) (string, func()) {
|
||||
t.Helper()
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to resolve address: %v", err)
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start silent DNS server: %v", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
_ = conn.Close()
|
||||
}
|
||||
|
||||
return conn.LocalAddr().String(), cleanup
|
||||
}
|
||||
|
||||
func TestDNSRelay_ForwardPacket(t *testing.T) {
|
||||
// Start a mock upstream DNS server.
|
||||
upstreamAddr, cleanupUpstream := startMockDNSServer(t)
|
||||
defer cleanupUpstream()
|
||||
|
||||
// Create and start the relay.
|
||||
relay, err := NewDNSRelay("127.0.0.1:0", upstreamAddr, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DNS relay: %v", err)
|
||||
}
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Failed to start DNS relay: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
// Send a query through the relay.
|
||||
clientConn, err := net.Dial("udp", relay.ListenAddr())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to connect to relay: %v", err)
|
||||
}
|
||||
defer clientConn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
query := []byte("test-dns-query")
|
||||
if _, err := clientConn.Write(query); err != nil {
|
||||
t.Fatalf("Failed to send query: %v", err)
|
||||
}
|
||||
|
||||
// Read the response.
|
||||
if err := clientConn.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil {
|
||||
t.Fatalf("Failed to set read deadline: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, maxDNSPacketSize)
|
||||
n, err := clientConn.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read response: %v", err)
|
||||
}
|
||||
|
||||
expected := append([]byte("RESP:"), query...)
|
||||
if !bytes.Equal(buf[:n], expected) {
|
||||
t.Errorf("Unexpected response: got %q, want %q", buf[:n], expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDNSRelay_UpstreamTimeout(t *testing.T) {
|
||||
// Start a silent upstream server that never responds.
|
||||
upstreamAddr, cleanupUpstream := startSilentDNSServer(t)
|
||||
defer cleanupUpstream()
|
||||
|
||||
// Create and start the relay.
|
||||
relay, err := NewDNSRelay("127.0.0.1:0", upstreamAddr, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DNS relay: %v", err)
|
||||
}
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Failed to start DNS relay: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
// Send a query through the relay.
|
||||
clientConn, err := net.Dial("udp", relay.ListenAddr())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to connect to relay: %v", err)
|
||||
}
|
||||
defer clientConn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
query := []byte("test-dns-timeout")
|
||||
if _, err := clientConn.Write(query); err != nil {
|
||||
t.Fatalf("Failed to send query: %v", err)
|
||||
}
|
||||
|
||||
// The relay should not send back a response because upstream timed out.
|
||||
// Set a short deadline on the client side; we expect no data.
|
||||
if err := clientConn.SetReadDeadline(time.Now().Add(6 * time.Second)); err != nil {
|
||||
t.Fatalf("Failed to set read deadline: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, maxDNSPacketSize)
|
||||
_, err = clientConn.Read(buf)
|
||||
if err == nil {
|
||||
t.Fatal("Expected timeout error reading from relay, but got a response")
|
||||
}
|
||||
|
||||
// Verify it was a timeout error.
|
||||
netErr, ok := err.(net.Error)
|
||||
if !ok || !netErr.Timeout() {
|
||||
t.Fatalf("Expected timeout error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDNSRelay_ConcurrentQueries(t *testing.T) {
|
||||
// Start a mock upstream DNS server.
|
||||
upstreamAddr, cleanupUpstream := startMockDNSServer(t)
|
||||
defer cleanupUpstream()
|
||||
|
||||
// Create and start the relay.
|
||||
relay, err := NewDNSRelay("127.0.0.1:0", upstreamAddr, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DNS relay: %v", err)
|
||||
}
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Failed to start DNS relay: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
const numQueries = 20
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numQueries)
|
||||
|
||||
for i := range numQueries {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
|
||||
clientConn, err := net.Dial("udp", relay.ListenAddr())
|
||||
if err != nil {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
defer clientConn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
query := []byte("concurrent-query-" + string(rune('A'+id))) //nolint:gosec // test uses small range 0-19, no overflow
|
||||
if _, err := clientConn.Write(query); err != nil {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
|
||||
if err := clientConn.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
|
||||
buf := make([]byte, maxDNSPacketSize)
|
||||
n, err := clientConn.Read(buf)
|
||||
if err != nil {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
|
||||
expected := append([]byte("RESP:"), query...)
|
||||
if !bytes.Equal(buf[:n], expected) {
|
||||
errors <- &unexpectedResponseError{got: buf[:n], want: expected}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
for err := range errors {
|
||||
t.Errorf("Concurrent query error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDNSRelay_ListenAddr(t *testing.T) {
|
||||
// Use port 0 to get an ephemeral port.
|
||||
relay, err := NewDNSRelay("127.0.0.1:0", "1.1.1.1:53", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DNS relay: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
addr := relay.ListenAddr()
|
||||
if addr == "" {
|
||||
t.Fatal("ListenAddr returned empty string")
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
t.Fatalf("ListenAddr returned invalid address %q: %v", addr, err)
|
||||
}
|
||||
if host != "127.0.0.1" {
|
||||
t.Errorf("Expected host 127.0.0.1, got %q", host)
|
||||
}
|
||||
if port == "0" {
|
||||
t.Error("Expected assigned port, got 0")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDNSRelay_InvalidDNSAddr(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
dnsAddr string
|
||||
}{
|
||||
{"missing port", "1.1.1.1"},
|
||||
{"empty string", ""},
|
||||
{"empty host", ":53"},
|
||||
{"empty port", "1.1.1.1:"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := NewDNSRelay("127.0.0.1:0", tt.dnsAddr, false)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error for DNS address %q, got nil", tt.dnsAddr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDNSRelay_InvalidListenAddr(t *testing.T) {
|
||||
_, err := NewDNSRelay("invalid-addr", "1.1.1.1:53", false)
|
||||
if err == nil {
|
||||
t.Error("Expected error for invalid listen address, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
// unexpectedResponseError is used to report mismatched responses in concurrent tests.
|
||||
type unexpectedResponseError struct {
|
||||
got []byte
|
||||
want []byte
|
||||
}
|
||||
|
||||
func (e *unexpectedResponseError) Error() string {
|
||||
return "unexpected response: got " + string(e.got) + ", want " + string(e.want)
|
||||
}
|
||||
548
internal/daemon/launchd.go
Normal file
548
internal/daemon/launchd.go
Normal file
@@ -0,0 +1,548 @@
|
||||
//go:build darwin
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
LaunchDaemonLabel = "co.greyhaven.greywall"
|
||||
LaunchDaemonPlistPath = "/Library/LaunchDaemons/co.greyhaven.greywall.plist"
|
||||
InstallBinaryPath = "/usr/local/bin/greywall"
|
||||
InstallLibDir = "/usr/local/lib/greywall"
|
||||
SandboxUserName = "_greywall"
|
||||
SandboxUserUID = "399" // System user range on macOS
|
||||
SandboxGroupName = "_greywall" // Group used for pf routing (same name as user)
|
||||
SudoersFilePath = "/etc/sudoers.d/greywall"
|
||||
DefaultSocketPath = "/var/run/greywall.sock"
|
||||
)
|
||||
|
||||
// Install performs the full LaunchDaemon installation flow:
|
||||
// 1. Verify running as root
|
||||
// 2. Create system user _greywall
|
||||
// 3. Create /usr/local/lib/greywall/ directory and copy tun2socks
|
||||
// 4. Copy the current binary to /usr/local/bin/greywall
|
||||
// 5. Generate and write the LaunchDaemon plist
|
||||
// 6. Set proper permissions, load the daemon, and verify it starts
|
||||
func Install(currentBinaryPath, tun2socksPath string, debug bool) error {
|
||||
if os.Getuid() != 0 {
|
||||
return fmt.Errorf("daemon install must be run as root (use sudo)")
|
||||
}
|
||||
|
||||
// Step 1: Create system user and group.
|
||||
if err := createSandboxUser(debug); err != nil {
|
||||
return fmt.Errorf("failed to create sandbox user: %w", err)
|
||||
}
|
||||
|
||||
// Step 1b: Install sudoers rule for group-based sandbox-exec.
|
||||
if err := installSudoersRule(debug); err != nil {
|
||||
return fmt.Errorf("failed to install sudoers rule: %w", err)
|
||||
}
|
||||
|
||||
// Step 1c: Add invoking user to _greywall group.
|
||||
addInvokingUserToGroup(debug)
|
||||
|
||||
// Step 2: Create lib directory and copy tun2socks.
|
||||
logDebug(debug, "Creating directory %s", InstallLibDir)
|
||||
if err := os.MkdirAll(InstallLibDir, 0o755); err != nil { //nolint:gosec // system lib directory needs 0755 for daemon access
|
||||
return fmt.Errorf("failed to create %s: %w", InstallLibDir, err)
|
||||
}
|
||||
tun2socksDst := filepath.Join(InstallLibDir, "tun2socks-darwin-"+runtime.GOARCH)
|
||||
logDebug(debug, "Copying tun2socks to %s", tun2socksDst)
|
||||
if err := copyFile(tun2socksPath, tun2socksDst, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to install tun2socks: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Copy binary to install path.
|
||||
if err := os.MkdirAll(filepath.Dir(InstallBinaryPath), 0o755); err != nil { //nolint:gosec // /usr/local/bin needs 0755
|
||||
return fmt.Errorf("failed to create %s: %w", filepath.Dir(InstallBinaryPath), err)
|
||||
}
|
||||
logDebug(debug, "Copying binary from %s to %s", currentBinaryPath, InstallBinaryPath)
|
||||
if err := copyFile(currentBinaryPath, InstallBinaryPath, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to install binary: %w", err)
|
||||
}
|
||||
|
||||
// Step 4: Generate and write plist.
|
||||
plist := generatePlist()
|
||||
logDebug(debug, "Writing plist to %s", LaunchDaemonPlistPath)
|
||||
if err := os.WriteFile(LaunchDaemonPlistPath, []byte(plist), 0o644); err != nil { //nolint:gosec // LaunchDaemon plist requires 0644 per macOS convention
|
||||
return fmt.Errorf("failed to write plist: %w", err)
|
||||
}
|
||||
|
||||
// Step 5: Set ownership to root:wheel.
|
||||
logDebug(debug, "Setting ownership on %s to root:wheel", LaunchDaemonPlistPath)
|
||||
if err := runCmd(debug, "chown", "root:wheel", LaunchDaemonPlistPath); err != nil {
|
||||
return fmt.Errorf("failed to set plist ownership: %w", err)
|
||||
}
|
||||
|
||||
// Step 6: Load the daemon.
|
||||
logDebug(debug, "Loading LaunchDaemon")
|
||||
if err := runCmd(debug, "launchctl", "load", LaunchDaemonPlistPath); err != nil {
|
||||
return fmt.Errorf("failed to load daemon: %w", err)
|
||||
}
|
||||
|
||||
// Step 7: Verify the daemon actually started.
|
||||
running := false
|
||||
for range 10 {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if IsRunning() {
|
||||
running = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Logf("Daemon installed successfully.")
|
||||
Logf(" Plist: %s", LaunchDaemonPlistPath)
|
||||
Logf(" Binary: %s", InstallBinaryPath)
|
||||
Logf(" Tun2socks: %s", tun2socksDst)
|
||||
actualUID := readDsclAttr(SandboxUserName, "UniqueID", true)
|
||||
actualGID := readDsclAttr(SandboxGroupName, "PrimaryGroupID", false)
|
||||
Logf(" User: %s (UID %s)", SandboxUserName, actualUID)
|
||||
Logf(" Group: %s (GID %s, pf routing)", SandboxGroupName, actualGID)
|
||||
Logf(" Sudoers: %s", SudoersFilePath)
|
||||
Logf(" Log: /var/log/greywall.log")
|
||||
|
||||
if !running {
|
||||
Logf(" Status: NOT RUNNING (check /var/log/greywall.log)")
|
||||
return fmt.Errorf("daemon was loaded but failed to start; check /var/log/greywall.log")
|
||||
}
|
||||
|
||||
Logf(" Status: running")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Uninstall performs the full LaunchDaemon uninstallation flow. It attempts
|
||||
// every cleanup step even if individual steps fail, collecting errors along
|
||||
// the way.
|
||||
func Uninstall(debug bool) error {
|
||||
if os.Getuid() != 0 {
|
||||
return fmt.Errorf("daemon uninstall must be run as root (use sudo)")
|
||||
}
|
||||
|
||||
var errs []string
|
||||
|
||||
// Step 1: Unload daemon (best effort).
|
||||
logDebug(debug, "Unloading LaunchDaemon")
|
||||
if err := runCmd(debug, "launchctl", "unload", LaunchDaemonPlistPath); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("unload daemon: %v", err))
|
||||
}
|
||||
|
||||
// Step 2: Remove plist file.
|
||||
logDebug(debug, "Removing plist %s", LaunchDaemonPlistPath)
|
||||
if err := os.Remove(LaunchDaemonPlistPath); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, fmt.Sprintf("remove plist: %v", err))
|
||||
}
|
||||
|
||||
// Step 3: Remove lib directory.
|
||||
logDebug(debug, "Removing directory %s", InstallLibDir)
|
||||
if err := os.RemoveAll(InstallLibDir); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("remove lib dir: %v", err))
|
||||
}
|
||||
|
||||
// Step 4: Remove installed binary, but only if it differs from the
|
||||
// currently running executable.
|
||||
currentExe, exeErr := os.Executable()
|
||||
if exeErr != nil {
|
||||
currentExe = ""
|
||||
}
|
||||
resolvedCurrent, _ := filepath.EvalSymlinks(currentExe)
|
||||
resolvedInstall, _ := filepath.EvalSymlinks(InstallBinaryPath)
|
||||
if resolvedCurrent != resolvedInstall {
|
||||
logDebug(debug, "Removing binary %s", InstallBinaryPath)
|
||||
if err := os.Remove(InstallBinaryPath); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, fmt.Sprintf("remove binary: %v", err))
|
||||
}
|
||||
} else {
|
||||
logDebug(debug, "Skipping binary removal (currently running from %s)", InstallBinaryPath)
|
||||
}
|
||||
|
||||
// Step 5: Remove system user and group.
|
||||
if err := removeSandboxUser(debug); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("remove sandbox user: %v", err))
|
||||
}
|
||||
|
||||
// Step 6: Remove socket file if it exists.
|
||||
logDebug(debug, "Removing socket %s", DefaultSocketPath)
|
||||
if err := os.Remove(DefaultSocketPath); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, fmt.Sprintf("remove socket: %v", err))
|
||||
}
|
||||
|
||||
// Step 6b: Remove sudoers file.
|
||||
logDebug(debug, "Removing sudoers file %s", SudoersFilePath)
|
||||
if err := os.Remove(SudoersFilePath); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, fmt.Sprintf("remove sudoers file: %v", err))
|
||||
}
|
||||
|
||||
// Step 7: Remove pf anchor lines from /etc/pf.conf.
|
||||
if err := removeAnchorFromPFConf(debug); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("remove pf anchor: %v", err))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
Logf("Uninstall completed with warnings:")
|
||||
for _, e := range errs {
|
||||
Logf(" - %s", e)
|
||||
}
|
||||
return nil // partial cleanup is not a fatal error
|
||||
}
|
||||
|
||||
Logf("Daemon uninstalled successfully.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// generatePlist returns the LaunchDaemon plist XML content.
|
||||
func generatePlist() string {
|
||||
return `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>` + LaunchDaemonLabel + `</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>` + InstallBinaryPath + `</string>
|
||||
<string>daemon</string>
|
||||
<string>run</string>
|
||||
</array>
|
||||
<key>RunAtLoad</key><true/>
|
||||
<key>KeepAlive</key><true/>
|
||||
<key>StandardOutPath</key>
|
||||
<string>/var/log/greywall.log</string>
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/var/log/greywall.log</string>
|
||||
</dict>
|
||||
</plist>
|
||||
`
|
||||
}
|
||||
|
||||
// IsInstalled returns true if the LaunchDaemon plist file exists.
|
||||
func IsInstalled() bool {
|
||||
_, err := os.Stat(LaunchDaemonPlistPath)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsRunning returns true if the daemon is currently running. It first tries
|
||||
// connecting to the Unix socket (works without root), then falls back to
|
||||
// launchctl print which can inspect the system domain without root.
|
||||
func IsRunning() bool {
|
||||
// Primary check: try to connect to the daemon socket. This proves the
|
||||
// daemon is actually running and accepting connections.
|
||||
conn, err := net.DialTimeout("unix", DefaultSocketPath, 2*time.Second)
|
||||
if err == nil {
|
||||
_ = conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// Fallback: launchctl print system/<label> works without root on modern
|
||||
// macOS (unlike launchctl list which only shows the caller's domain).
|
||||
//nolint:gosec // LaunchDaemonLabel is a constant
|
||||
out, err := exec.Command("launchctl", "print", "system/"+LaunchDaemonLabel).CombinedOutput()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(string(out), "state = running")
|
||||
}
|
||||
|
||||
// createSandboxUser creates the _greywall system user and group on macOS
|
||||
// using dscl (Directory Service command line utility).
|
||||
//
|
||||
// If the user/group already exist with valid IDs, they are reused. Otherwise
|
||||
// a free UID/GID is found dynamically (the hardcoded SandboxUserUID is only
|
||||
// a preferred default — macOS system groups like com.apple.access_ssh may
|
||||
// already claim it).
|
||||
func createSandboxUser(debug bool) error {
|
||||
userPath := "/Users/" + SandboxUserName
|
||||
groupPath := "/Groups/" + SandboxUserName
|
||||
|
||||
// Check if user already exists with a valid UniqueID.
|
||||
existingUID := readDsclAttr(SandboxUserName, "UniqueID", true)
|
||||
existingGID := readDsclAttr(SandboxGroupName, "PrimaryGroupID", false)
|
||||
|
||||
if existingUID != "" && existingGID != "" {
|
||||
logDebug(debug, "System user %s (UID %s) and group (GID %s) already exist",
|
||||
SandboxUserName, existingUID, existingGID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find a free ID. Try the preferred default first, then scan.
|
||||
id := SandboxUserUID
|
||||
if !isIDFree(id, debug) {
|
||||
var err error
|
||||
id, err = findFreeSystemID(debug)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find free UID/GID: %w", err)
|
||||
}
|
||||
logDebug(debug, "Preferred ID %s is taken, using %s instead", SandboxUserUID, id)
|
||||
}
|
||||
|
||||
// Create the group record FIRST (so the GID exists before the user references it).
|
||||
logDebug(debug, "Ensuring system group %s (GID %s)", SandboxGroupName, id)
|
||||
if existingGID == "" {
|
||||
groupCmds := [][]string{
|
||||
{"dscl", ".", "-create", groupPath},
|
||||
{"dscl", ".", "-create", groupPath, "PrimaryGroupID", id},
|
||||
{"dscl", ".", "-create", groupPath, "RealName", "Greywall Sandbox"},
|
||||
}
|
||||
for _, args := range groupCmds {
|
||||
if err := runDsclCreate(debug, args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Verify the GID was actually set (runDsclCreate may have skipped it).
|
||||
actualGID := readDsclAttr(SandboxGroupName, "PrimaryGroupID", false)
|
||||
if actualGID == "" {
|
||||
return fmt.Errorf("failed to set PrimaryGroupID on group %s (GID %s may be taken)", SandboxGroupName, id)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the user record.
|
||||
logDebug(debug, "Ensuring system user %s (UID %s)", SandboxUserName, id)
|
||||
if existingUID == "" {
|
||||
userCmds := [][]string{
|
||||
{"dscl", ".", "-create", userPath},
|
||||
{"dscl", ".", "-create", userPath, "UniqueID", id},
|
||||
{"dscl", ".", "-create", userPath, "PrimaryGroupID", id},
|
||||
{"dscl", ".", "-create", userPath, "UserShell", "/usr/bin/false"},
|
||||
{"dscl", ".", "-create", userPath, "RealName", "Greywall Sandbox"},
|
||||
{"dscl", ".", "-create", userPath, "NFSHomeDirectory", "/var/empty"},
|
||||
}
|
||||
for _, args := range userCmds {
|
||||
if err := runDsclCreate(debug, args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logDebug(debug, "System user and group %s ready (ID %s)", SandboxUserName, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDsclAttr reads a single attribute from a user or group record.
|
||||
// Returns empty string if the record or attribute does not exist.
|
||||
func readDsclAttr(name, attr string, isUser bool) string {
|
||||
recordType := "/Groups/"
|
||||
if isUser {
|
||||
recordType = "/Users/"
|
||||
}
|
||||
//nolint:gosec // name and attr are controlled constants
|
||||
out, err := exec.Command("dscl", ".", "-read", recordType+name, attr).Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
// Output format: "AttrName: value"
|
||||
parts := strings.SplitN(strings.TrimSpace(string(out)), ": ", 2)
|
||||
if len(parts) != 2 {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(parts[1])
|
||||
}
|
||||
|
||||
// isIDFree checks whether a given numeric ID is available as both a UID and GID.
|
||||
func isIDFree(id string, debug bool) bool {
|
||||
// Check if any user has this UniqueID.
|
||||
//nolint:gosec // id is a controlled numeric string
|
||||
out, err := exec.Command("dscl", ".", "-search", "/Users", "UniqueID", id).Output()
|
||||
if err == nil && strings.TrimSpace(string(out)) != "" {
|
||||
logDebug(debug, "ID %s is taken by a user", id)
|
||||
return false
|
||||
}
|
||||
// Check if any group has this PrimaryGroupID.
|
||||
//nolint:gosec // id is a controlled numeric string
|
||||
out, err = exec.Command("dscl", ".", "-search", "/Groups", "PrimaryGroupID", id).Output()
|
||||
if err == nil && strings.TrimSpace(string(out)) != "" {
|
||||
logDebug(debug, "ID %s is taken by a group", id)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// findFreeSystemID scans the macOS system ID range (350–499) for a UID/GID
|
||||
// pair that is not in use by any existing user or group.
|
||||
func findFreeSystemID(debug bool) (string, error) {
|
||||
for i := 350; i < 500; i++ {
|
||||
id := strconv.Itoa(i)
|
||||
if isIDFree(id, debug) {
|
||||
return id, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no free system UID/GID found in range 350-499")
|
||||
}
|
||||
|
||||
// runDsclCreate runs a dscl -create command, silently ignoring
|
||||
// eDSRecordAlreadyExists errors (idempotent for repeated installs).
|
||||
func runDsclCreate(debug bool, args []string) error {
|
||||
err := runCmd(debug, args[0], args[1:]...)
|
||||
if err != nil && strings.Contains(err.Error(), "eDSRecordAlreadyExists") {
|
||||
logDebug(debug, "Already exists, skipping: %s", strings.Join(args, " "))
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("dscl command failed (%s): %w", strings.Join(args, " "), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeSandboxUser removes the _greywall system user and group.
|
||||
func removeSandboxUser(debug bool) error {
|
||||
var errs []string
|
||||
|
||||
userPath := "/Users/" + SandboxUserName
|
||||
groupPath := "/Groups/" + SandboxUserName
|
||||
|
||||
if userExists(SandboxUserName) {
|
||||
logDebug(debug, "Removing system user %s", SandboxUserName)
|
||||
if err := runCmd(debug, "dscl", ".", "-delete", userPath); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("delete user: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Check if group exists before trying to remove.
|
||||
logDebug(debug, "Removing system group %s", SandboxUserName)
|
||||
if err := runCmd(debug, "dscl", ".", "-delete", groupPath); err != nil {
|
||||
// Group may not exist; only record error if it's not a "not found" case.
|
||||
errStr := err.Error()
|
||||
if !strings.Contains(errStr, "not found") && !strings.Contains(errStr, "does not exist") {
|
||||
errs = append(errs, fmt.Sprintf("delete group: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("sandbox user removal issues: %s", strings.Join(errs, "; "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// userExists checks if a user exists on macOS by querying the directory service.
|
||||
func userExists(username string) bool {
|
||||
//nolint:gosec // username is a controlled constant
|
||||
err := exec.Command("dscl", ".", "-read", "/Users/"+username).Run()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// installSudoersRule writes a sudoers rule that allows members of the
|
||||
// _greywall group to run sandbox-exec as any user with group _greywall,
|
||||
// without a password. The rule is validated with visudo -cf before install.
|
||||
func installSudoersRule(debug bool) error {
|
||||
rule := fmt.Sprintf("%%%s ALL = (ALL:%s) NOPASSWD: /usr/bin/sandbox-exec *\n",
|
||||
SandboxGroupName, SandboxGroupName)
|
||||
|
||||
logDebug(debug, "Writing sudoers rule to %s", SudoersFilePath)
|
||||
|
||||
// Ensure /etc/sudoers.d exists.
|
||||
if err := os.MkdirAll(filepath.Dir(SudoersFilePath), 0o755); err != nil { //nolint:gosec // /etc/sudoers.d must be 0755
|
||||
return fmt.Errorf("failed to create sudoers directory: %w", err)
|
||||
}
|
||||
|
||||
// Write to a temp file first, then validate with visudo.
|
||||
tmpFile := SudoersFilePath + ".tmp"
|
||||
if err := os.WriteFile(tmpFile, []byte(rule), 0o440); err != nil { //nolint:gosec // sudoers files require 0440 per sudo(8)
|
||||
return fmt.Errorf("failed to write sudoers temp file: %w", err)
|
||||
}
|
||||
|
||||
// Validate syntax before installing.
|
||||
//nolint:gosec // tmpFile is a controlled path
|
||||
if err := runCmd(debug, "visudo", "-cf", tmpFile); err != nil {
|
||||
_ = os.Remove(tmpFile)
|
||||
return fmt.Errorf("sudoers validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Move validated file into place.
|
||||
if err := os.Rename(tmpFile, SudoersFilePath); err != nil {
|
||||
_ = os.Remove(tmpFile)
|
||||
return fmt.Errorf("failed to install sudoers file: %w", err)
|
||||
}
|
||||
|
||||
// Ensure correct ownership (root:wheel) and permissions (0440).
|
||||
if err := runCmd(debug, "chown", "root:wheel", SudoersFilePath); err != nil {
|
||||
return fmt.Errorf("failed to set sudoers ownership: %w", err)
|
||||
}
|
||||
if err := os.Chmod(SudoersFilePath, 0o440); err != nil { //nolint:gosec // sudoers files require 0440 per sudo(8)
|
||||
return fmt.Errorf("failed to set sudoers permissions: %w", err)
|
||||
}
|
||||
|
||||
logDebug(debug, "Sudoers rule installed: %s", SudoersFilePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// addInvokingUserToGroup adds the real invoking user (detected via SUDO_USER)
|
||||
// to the _greywall group so they can use sudo -g _greywall. This is non-fatal;
|
||||
// if it fails, a manual instruction is printed.
|
||||
//
|
||||
// We use dscl -append (not dseditgroup) because dseditgroup can reset group
|
||||
// attributes like PrimaryGroupID on freshly created groups.
|
||||
func addInvokingUserToGroup(debug bool) {
|
||||
realUser := os.Getenv("SUDO_USER")
|
||||
if realUser == "" || realUser == "root" {
|
||||
Logf("Note: Could not detect invoking user (SUDO_USER not set).")
|
||||
Logf(" You may need to manually add your user to the %s group:", SandboxGroupName)
|
||||
Logf(" sudo dscl . -append /Groups/%s GroupMembership YOUR_USERNAME", SandboxGroupName)
|
||||
return
|
||||
}
|
||||
|
||||
groupPath := "/Groups/" + SandboxGroupName
|
||||
logDebug(debug, "Adding user %s to group %s", realUser, SandboxGroupName)
|
||||
//nolint:gosec // realUser comes from SUDO_USER env var set by sudo
|
||||
err := runCmd(debug, "dscl", ".", "-append", groupPath, "GroupMembership", realUser)
|
||||
if err != nil {
|
||||
Logf("Warning: failed to add %s to group %s: %v", realUser, SandboxGroupName, err)
|
||||
Logf(" You may need to run manually:")
|
||||
Logf(" sudo dscl . -append %s GroupMembership %s", groupPath, realUser)
|
||||
} else {
|
||||
Logf(" User %s added to group %s", realUser, SandboxGroupName)
|
||||
}
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dst with the given permissions.
|
||||
func copyFile(src, dst string, perm os.FileMode) error {
|
||||
srcFile, err := os.Open(src) //nolint:gosec // src is from os.Executable or user flag
|
||||
if err != nil {
|
||||
return fmt.Errorf("open source %s: %w", src, err)
|
||||
}
|
||||
defer srcFile.Close() //nolint:errcheck // read-only file; close error is not actionable
|
||||
|
||||
dstFile, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) //nolint:gosec // dst is a controlled install path constant
|
||||
if err != nil {
|
||||
return fmt.Errorf("create destination %s: %w", dst, err)
|
||||
}
|
||||
defer dstFile.Close() //nolint:errcheck // best-effort close; errors from Chmod/Copy are checked
|
||||
|
||||
if _, err := io.Copy(dstFile, srcFile); err != nil {
|
||||
return fmt.Errorf("copy data: %w", err)
|
||||
}
|
||||
|
||||
if err := dstFile.Chmod(perm); err != nil {
|
||||
return fmt.Errorf("set permissions on %s: %w", dst, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runCmd executes a command and returns an error if it fails. When debug is
|
||||
// true, the command is logged before execution.
|
||||
func runCmd(debug bool, name string, args ...string) error {
|
||||
logDebug(debug, "exec: %s %s", name, strings.Join(args, " "))
|
||||
//nolint:gosec // arguments are constructed from internal constants
|
||||
cmd := exec.Command(name, args...)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("%s failed: %w (output: %s)", name, err, strings.TrimSpace(string(output)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// logDebug writes a timestamped debug message to stderr.
|
||||
func logDebug(debug bool, format string, args ...interface{}) {
|
||||
if debug {
|
||||
Logf(format, args...)
|
||||
}
|
||||
}
|
||||
37
internal/daemon/launchd_stub.go
Normal file
37
internal/daemon/launchd_stub.go
Normal file
@@ -0,0 +1,37 @@
|
||||
//go:build !darwin
|
||||
|
||||
package daemon
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
LaunchDaemonLabel = "co.greyhaven.greywall"
|
||||
LaunchDaemonPlistPath = "/Library/LaunchDaemons/co.greyhaven.greywall.plist"
|
||||
InstallBinaryPath = "/usr/local/bin/greywall"
|
||||
InstallLibDir = "/usr/local/lib/greywall"
|
||||
SandboxUserName = "_greywall"
|
||||
SandboxUserUID = "399"
|
||||
SandboxGroupName = "_greywall"
|
||||
SudoersFilePath = "/etc/sudoers.d/greywall"
|
||||
DefaultSocketPath = "/var/run/greywall.sock"
|
||||
)
|
||||
|
||||
// Install is only supported on macOS.
|
||||
func Install(currentBinaryPath, tun2socksPath string, debug bool) error {
|
||||
return fmt.Errorf("daemon install is only supported on macOS")
|
||||
}
|
||||
|
||||
// Uninstall is only supported on macOS.
|
||||
func Uninstall(debug bool) error {
|
||||
return fmt.Errorf("daemon uninstall is only supported on macOS")
|
||||
}
|
||||
|
||||
// IsInstalled always returns false on non-macOS platforms.
|
||||
func IsInstalled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsRunning always returns false on non-macOS platforms.
|
||||
func IsRunning() bool {
|
||||
return false
|
||||
}
|
||||
129
internal/daemon/launchd_test.go
Normal file
129
internal/daemon/launchd_test.go
Normal file
@@ -0,0 +1,129 @@
|
||||
//go:build darwin
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGeneratePlist(t *testing.T) {
|
||||
plist := generatePlist()
|
||||
|
||||
// Verify it is valid-looking XML with the expected plist header.
|
||||
if !strings.HasPrefix(plist, `<?xml version="1.0" encoding="UTF-8"?>`) {
|
||||
t.Error("plist should start with XML declaration")
|
||||
}
|
||||
|
||||
if !strings.Contains(plist, `<!DOCTYPE plist PUBLIC`) {
|
||||
t.Error("plist should contain DOCTYPE declaration")
|
||||
}
|
||||
|
||||
if !strings.Contains(plist, `<plist version="1.0">`) {
|
||||
t.Error("plist should contain plist version tag")
|
||||
}
|
||||
|
||||
// Verify the label matches the constant.
|
||||
expectedLabel := "<string>" + LaunchDaemonLabel + "</string>"
|
||||
if !strings.Contains(plist, expectedLabel) {
|
||||
t.Errorf("plist should contain label %q", LaunchDaemonLabel)
|
||||
}
|
||||
|
||||
// Verify program arguments.
|
||||
if !strings.Contains(plist, "<string>"+InstallBinaryPath+"</string>") {
|
||||
t.Errorf("plist should reference binary path %q", InstallBinaryPath)
|
||||
}
|
||||
|
||||
if !strings.Contains(plist, "<string>daemon</string>") {
|
||||
t.Error("plist should contain 'daemon' argument")
|
||||
}
|
||||
|
||||
if !strings.Contains(plist, "<string>run</string>") {
|
||||
t.Error("plist should contain 'run' argument")
|
||||
}
|
||||
|
||||
// Verify RunAtLoad and KeepAlive.
|
||||
if !strings.Contains(plist, "<key>RunAtLoad</key><true/>") {
|
||||
t.Error("plist should have RunAtLoad set to true")
|
||||
}
|
||||
|
||||
if !strings.Contains(plist, "<key>KeepAlive</key><true/>") {
|
||||
t.Error("plist should have KeepAlive set to true")
|
||||
}
|
||||
|
||||
// Verify log paths.
|
||||
if !strings.Contains(plist, "/var/log/greywall.log") {
|
||||
t.Error("plist should reference /var/log/greywall.log for stdout/stderr")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratePlistProgramArguments(t *testing.T) {
|
||||
plist := generatePlist()
|
||||
|
||||
// Verify the ProgramArguments array contains exactly 3 entries in order.
|
||||
// The array should be: /usr/local/bin/greywall, daemon, run
|
||||
argStart := strings.Index(plist, "<key>ProgramArguments</key>")
|
||||
if argStart == -1 {
|
||||
t.Fatal("plist should contain ProgramArguments key")
|
||||
}
|
||||
|
||||
// Extract the array section.
|
||||
arrayStart := strings.Index(plist[argStart:], "<array>")
|
||||
arrayEnd := strings.Index(plist[argStart:], "</array>")
|
||||
if arrayStart == -1 || arrayEnd == -1 {
|
||||
t.Fatal("ProgramArguments should contain an array")
|
||||
}
|
||||
|
||||
arrayContent := plist[argStart+arrayStart : argStart+arrayEnd]
|
||||
|
||||
expectedArgs := []string{InstallBinaryPath, "daemon", "run"}
|
||||
for _, arg := range expectedArgs {
|
||||
if !strings.Contains(arrayContent, "<string>"+arg+"</string>") {
|
||||
t.Errorf("ProgramArguments array should contain %q", arg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsInstalledReturnsFalse(t *testing.T) {
|
||||
// On a test machine without the daemon installed, this should return false.
|
||||
// We cannot guarantee the daemon is not installed, but on most dev machines
|
||||
// it will not be. This test verifies the function runs without panicking.
|
||||
result := IsInstalled()
|
||||
|
||||
// We only validate the function returns a bool without error.
|
||||
// On CI/dev machines the plist should not exist.
|
||||
_ = result
|
||||
}
|
||||
|
||||
func TestIsRunningReturnsFalse(t *testing.T) {
|
||||
// On a test machine without the daemon running, this should return false.
|
||||
// Similar to TestIsInstalledReturnsFalse, we verify it runs cleanly.
|
||||
result := IsRunning()
|
||||
_ = result
|
||||
}
|
||||
|
||||
func TestConstants(t *testing.T) {
|
||||
// Verify constants have expected values.
|
||||
tests := []struct {
|
||||
name string
|
||||
got string
|
||||
expected string
|
||||
}{
|
||||
{"LaunchDaemonLabel", LaunchDaemonLabel, "co.greyhaven.greywall"},
|
||||
{"LaunchDaemonPlistPath", LaunchDaemonPlistPath, "/Library/LaunchDaemons/co.greyhaven.greywall.plist"},
|
||||
{"InstallBinaryPath", InstallBinaryPath, "/usr/local/bin/greywall"},
|
||||
{"InstallLibDir", InstallLibDir, "/usr/local/lib/greywall"},
|
||||
{"SandboxUserName", SandboxUserName, "_greywall"},
|
||||
{"SandboxUserUID", SandboxUserUID, "399"},
|
||||
{"SandboxGroupName", SandboxGroupName, "_greywall"},
|
||||
{"SudoersFilePath", SudoersFilePath, "/etc/sudoers.d/greywall"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.got != tt.expected {
|
||||
t.Errorf("%s = %q, want %q", tt.name, tt.got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
13
internal/daemon/log.go
Normal file
13
internal/daemon/log.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Logf writes a timestamped message to stderr with the [greywall:daemon] prefix.
|
||||
func Logf(format string, args ...interface{}) {
|
||||
ts := time.Now().Format("2006-01-02 15:04:05")
|
||||
fmt.Fprintf(os.Stderr, ts+" [greywall:daemon] "+format+"\n", args...) //nolint:gosec // logging to stderr, not user-facing HTML
|
||||
}
|
||||
81
internal/daemon/program.go
Normal file
81
internal/daemon/program.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/kardianos/service"
|
||||
)
|
||||
|
||||
// program implements the kardianos/service.Interface for greywall daemon
|
||||
// lifecycle management. It delegates actual work to the Server type.
|
||||
type program struct {
|
||||
server *Server
|
||||
socketPath string
|
||||
tun2socksPath string
|
||||
debug bool
|
||||
}
|
||||
|
||||
// NewProgram creates a new program instance for use with kardianos/service.
|
||||
func NewProgram(socketPath, tun2socksPath string, debug bool) *program {
|
||||
return &program{
|
||||
socketPath: socketPath,
|
||||
tun2socksPath: tun2socksPath,
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
// Start is called by kardianos/service when the service starts. It verifies
|
||||
// the tun2socks binary exists, creates and starts the Server. The accept loop
|
||||
// already runs in a goroutine, so this returns immediately.
|
||||
func (p *program) Start(_ service.Service) error {
|
||||
if _, err := os.Stat(p.tun2socksPath); err != nil {
|
||||
return fmt.Errorf("tun2socks binary not found at %s (run 'sudo greywall daemon install' first)", p.tun2socksPath)
|
||||
}
|
||||
|
||||
Logf("Starting daemon (tun2socks=%s, socket=%s)", p.tun2socksPath, p.socketPath)
|
||||
|
||||
p.server = NewServer(p.socketPath, p.tun2socksPath, p.debug)
|
||||
if err := p.server.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start daemon server: %w", err)
|
||||
}
|
||||
|
||||
Logf("Daemon started, listening on %s", p.socketPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop is called by kardianos/service when the service stops.
|
||||
func (p *program) Stop(_ service.Service) error {
|
||||
if p.server == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Stopping daemon")
|
||||
if err := p.server.Stop(); err != nil {
|
||||
Logf("Shutdown error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
Logf("Daemon stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewServiceConfig returns a kardianos/service config matching the existing
|
||||
// LaunchDaemon setup. The Name matches LaunchDaemonLabel so service.Control()
|
||||
// can find and manage the already-installed service.
|
||||
func NewServiceConfig() *service.Config {
|
||||
return &service.Config{
|
||||
Name: LaunchDaemonLabel,
|
||||
DisplayName: "Greywall Daemon",
|
||||
Description: "Greywall transparent network sandboxing daemon",
|
||||
Arguments: []string{"daemon", "run"},
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultTun2socksPath returns the expected tun2socks binary path based on
|
||||
// the install directory and current architecture.
|
||||
func DefaultTun2socksPath() string {
|
||||
return filepath.Join(InstallLibDir, "tun2socks-darwin-"+runtime.GOARCH)
|
||||
}
|
||||
246
internal/daemon/relay.go
Normal file
246
internal/daemon/relay.go
Normal file
@@ -0,0 +1,246 @@
|
||||
//go:build darwin || linux
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxConns = 256
|
||||
connIdleTimeout = 5 * time.Minute
|
||||
upstreamDialTimout = 10 * time.Second
|
||||
)
|
||||
|
||||
// Relay is a pure Go TCP relay that forwards connections from local listeners
|
||||
// to an upstream SOCKS5 proxy address. It does NOT implement the SOCKS5 protocol;
|
||||
// it blindly forwards bytes between the local connection and the upstream proxy.
|
||||
type Relay struct {
|
||||
listeners []net.Listener // both IPv4 and IPv6 listeners
|
||||
targetAddr string // external SOCKS5 proxy host:port
|
||||
port int // assigned port
|
||||
wg sync.WaitGroup
|
||||
done chan struct{}
|
||||
debug bool
|
||||
maxConns int // max concurrent connections (default 256)
|
||||
activeConns atomic.Int32 // current active connections
|
||||
}
|
||||
|
||||
// NewRelay parses a proxy URL to extract host:port and binds listeners on both
|
||||
// 127.0.0.1 and [::1] using the same port. The port is dynamically assigned
|
||||
// from the first (IPv4) bind. If the IPv6 bind fails, the relay continues
|
||||
// with IPv4 only. Binding both addresses prevents IPv6 port squatting attacks.
|
||||
func NewRelay(proxyURL string, debug bool) (*Relay, error) {
|
||||
u, err := url.Parse(proxyURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid proxy URL %q: %w", proxyURL, err)
|
||||
}
|
||||
|
||||
host := u.Hostname()
|
||||
port := u.Port()
|
||||
if host == "" || port == "" {
|
||||
return nil, fmt.Errorf("proxy URL must include host and port: %q", proxyURL)
|
||||
}
|
||||
targetAddr := net.JoinHostPort(host, port)
|
||||
|
||||
// Bind IPv4 first to get a dynamically assigned port.
|
||||
ipv4Listener, err := net.Listen("tcp4", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to bind IPv4 listener: %w", err)
|
||||
}
|
||||
|
||||
assignedPort := ipv4Listener.Addr().(*net.TCPAddr).Port
|
||||
listeners := []net.Listener{ipv4Listener}
|
||||
|
||||
// Bind IPv6 on the same port. If it fails, log and continue with IPv4 only.
|
||||
ipv6Addr := fmt.Sprintf("[::1]:%d", assignedPort)
|
||||
ipv6Listener, err := net.Listen("tcp6", ipv6Addr)
|
||||
if err != nil {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] IPv6 bind on %s failed, continuing IPv4 only: %v\n", ipv6Addr, err)
|
||||
}
|
||||
} else {
|
||||
listeners = append(listeners, ipv6Listener)
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] Bound %d listener(s) on port %d -> %s\n", len(listeners), assignedPort, targetAddr)
|
||||
}
|
||||
|
||||
return &Relay{
|
||||
listeners: listeners,
|
||||
targetAddr: targetAddr,
|
||||
port: assignedPort,
|
||||
done: make(chan struct{}),
|
||||
debug: debug,
|
||||
maxConns: defaultMaxConns,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Port returns the local port the relay is listening on.
|
||||
func (r *Relay) Port() int {
|
||||
return r.port
|
||||
}
|
||||
|
||||
// Start begins accepting connections on all listeners. Each accepted connection
|
||||
// is handled in its own goroutine with bidirectional forwarding to the upstream
|
||||
// proxy address. Start returns immediately; use Stop to shut down.
|
||||
func (r *Relay) Start() error {
|
||||
for _, ln := range r.listeners {
|
||||
r.wg.Add(1)
|
||||
go r.acceptLoop(ln)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the relay by closing all listeners and waiting
|
||||
// for in-flight connections to finish.
|
||||
func (r *Relay) Stop() {
|
||||
close(r.done)
|
||||
for _, ln := range r.listeners {
|
||||
_ = ln.Close()
|
||||
}
|
||||
r.wg.Wait()
|
||||
}
|
||||
|
||||
// acceptLoop runs the accept loop for a single listener.
|
||||
func (r *Relay) acceptLoop(ln net.Listener) {
|
||||
defer r.wg.Done()
|
||||
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-r.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
// Transient accept error; continue.
|
||||
if r.debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] Accept error: %v\n", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
r.wg.Add(1)
|
||||
go r.handleConn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// handleConn handles a single accepted connection by dialing the upstream
|
||||
// proxy and performing bidirectional byte forwarding.
|
||||
func (r *Relay) handleConn(local net.Conn) {
|
||||
defer r.wg.Done()
|
||||
|
||||
remoteAddr := local.RemoteAddr().String()
|
||||
|
||||
// Enforce max concurrent connections.
|
||||
current := r.activeConns.Add(1)
|
||||
if int(current) > r.maxConns {
|
||||
r.activeConns.Add(-1)
|
||||
if r.debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] Connection from %s rejected: max connections (%d) reached\n", remoteAddr, r.maxConns)
|
||||
}
|
||||
_ = local.Close()
|
||||
return
|
||||
}
|
||||
defer r.activeConns.Add(-1)
|
||||
|
||||
if r.debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] Connection accepted from %s\n", remoteAddr)
|
||||
}
|
||||
|
||||
// Dial the upstream proxy.
|
||||
upstream, err := net.DialTimeout("tcp", r.targetAddr, upstreamDialTimout)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] WARNING: upstream connect to %s failed: %v\n", r.targetAddr, err)
|
||||
_ = local.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if r.debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] Upstream connected: %s -> %s\n", remoteAddr, r.targetAddr)
|
||||
}
|
||||
|
||||
// Bidirectional copy with proper TCP half-close.
|
||||
var (
|
||||
localToUpstream int64
|
||||
upstreamToLocal int64
|
||||
copyWg sync.WaitGroup
|
||||
)
|
||||
|
||||
copyWg.Add(2)
|
||||
|
||||
// local -> upstream
|
||||
go func() {
|
||||
defer copyWg.Done()
|
||||
localToUpstream = r.copyWithHalfClose(upstream, local)
|
||||
}()
|
||||
|
||||
// upstream -> local
|
||||
go func() {
|
||||
defer copyWg.Done()
|
||||
upstreamToLocal = r.copyWithHalfClose(local, upstream)
|
||||
}()
|
||||
|
||||
copyWg.Wait()
|
||||
_ = local.Close()
|
||||
_ = upstream.Close()
|
||||
|
||||
if r.debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] Connection closed %s (sent=%d recv=%d)\n", remoteAddr, localToUpstream, upstreamToLocal)
|
||||
}
|
||||
}
|
||||
|
||||
// copyWithHalfClose copies data from src to dst, setting an idle timeout on
|
||||
// each read. When the source reaches EOF, it signals a TCP half-close on dst
|
||||
// via CloseWrite (if available) rather than a full Close.
|
||||
func (r *Relay) copyWithHalfClose(dst, src net.Conn) int64 {
|
||||
buf := make([]byte, 32*1024)
|
||||
var written int64
|
||||
|
||||
for {
|
||||
// Reset idle timeout before each read.
|
||||
if err := src.SetReadDeadline(time.Now().Add(connIdleTimeout)); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
nr, readErr := src.Read(buf)
|
||||
if nr > 0 {
|
||||
// Reset write deadline for each write.
|
||||
if err := dst.SetWriteDeadline(time.Now().Add(connIdleTimeout)); err != nil {
|
||||
break
|
||||
}
|
||||
nw, writeErr := dst.Write(buf[:nr])
|
||||
written += int64(nw)
|
||||
if writeErr != nil {
|
||||
break
|
||||
}
|
||||
if nw != nr {
|
||||
break
|
||||
}
|
||||
}
|
||||
if readErr != nil {
|
||||
// Source hit EOF or error: signal half-close on destination.
|
||||
if tcpDst, ok := dst.(*net.TCPConn); ok {
|
||||
_ = tcpDst.CloseWrite()
|
||||
}
|
||||
if readErr != io.EOF {
|
||||
// Unexpected error; connection may have timed out.
|
||||
if r.debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:relay] Copy error: %v\n", readErr)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return written
|
||||
}
|
||||
373
internal/daemon/relay_test.go
Normal file
373
internal/daemon/relay_test.go
Normal file
@@ -0,0 +1,373 @@
|
||||
//go:build darwin || linux
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// startEchoServer starts a TCP server that echoes back everything it receives.
|
||||
// It returns the listener and its address.
|
||||
func startEchoServer(t *testing.T) net.Listener {
|
||||
t.Helper()
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start echo server: %v", err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
go func(c net.Conn) {
|
||||
defer c.Close() //nolint:errcheck // test cleanup
|
||||
_, _ = io.Copy(c, c)
|
||||
}(conn)
|
||||
}
|
||||
}()
|
||||
return ln
|
||||
}
|
||||
|
||||
// startBlackHoleServer accepts connections but never reads/writes, then closes.
|
||||
func startBlackHoleServer(t *testing.T) net.Listener {
|
||||
t.Helper()
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start black hole server: %v", err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
return ln
|
||||
}
|
||||
|
||||
func TestRelayBidirectionalForward(t *testing.T) {
|
||||
// Start a mock upstream (echo server) acting as the "SOCKS5 proxy".
|
||||
echo := startEchoServer(t)
|
||||
defer echo.Close() //nolint:errcheck // test cleanup
|
||||
echoAddr := echo.Addr().String()
|
||||
|
||||
proxyURL := fmt.Sprintf("socks5://%s", echoAddr)
|
||||
relay, err := NewRelay(proxyURL, true)
|
||||
if err != nil {
|
||||
t.Fatalf("NewRelay failed: %v", err)
|
||||
}
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
// Connect through the relay.
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", relay.Port()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect to relay: %v", err)
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
// Send data and verify it echoes back.
|
||||
msg := []byte("hello, relay!")
|
||||
if _, err := conn.Write(msg); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, len(msg))
|
||||
_ = conn.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
t.Fatalf("read failed: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf, msg) {
|
||||
t.Fatalf("expected %q, got %q", msg, buf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRelayMultipleMessages(t *testing.T) {
|
||||
echo := startEchoServer(t)
|
||||
defer echo.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
proxyURL := fmt.Sprintf("socks5://%s", echo.Addr().String())
|
||||
relay, err := NewRelay(proxyURL, false)
|
||||
if err != nil {
|
||||
t.Fatalf("NewRelay failed: %v", err)
|
||||
}
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", relay.Port()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect to relay: %v", err)
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
// Send multiple messages and verify each echoes back.
|
||||
for i := 0; i < 10; i++ {
|
||||
msg := []byte(fmt.Sprintf("message-%d", i))
|
||||
if _, err := conn.Write(msg); err != nil {
|
||||
t.Fatalf("write %d failed: %v", i, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, len(msg))
|
||||
_ = conn.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
t.Fatalf("read %d failed: %v", i, err)
|
||||
}
|
||||
if !bytes.Equal(buf, msg) {
|
||||
t.Fatalf("message %d: expected %q, got %q", i, msg, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRelayUpstreamConnectionFailure(t *testing.T) {
|
||||
// Find a port that is not listening.
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
deadPort := ln.Addr().(*net.TCPAddr).Port
|
||||
_ = ln.Close() // close immediately so nothing is listening
|
||||
|
||||
proxyURL := fmt.Sprintf("socks5://127.0.0.1:%d", deadPort)
|
||||
relay, err := NewRelay(proxyURL, true)
|
||||
if err != nil {
|
||||
t.Fatalf("NewRelay failed: %v", err)
|
||||
}
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
// Connect to the relay. The relay should accept the connection but then
|
||||
// fail to reach the upstream, causing the local side to be closed.
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", relay.Port()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect to relay: %v", err)
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
// The relay should close the connection after failing upstream dial.
|
||||
_ = conn.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
buf := make([]byte, 1)
|
||||
_, readErr := conn.Read(buf)
|
||||
if readErr == nil {
|
||||
t.Fatal("expected read error (connection should be closed), got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRelayConcurrentConnections(t *testing.T) {
|
||||
echo := startEchoServer(t)
|
||||
defer echo.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
proxyURL := fmt.Sprintf("socks5://%s", echo.Addr().String())
|
||||
relay, err := NewRelay(proxyURL, false)
|
||||
if err != nil {
|
||||
t.Fatalf("NewRelay failed: %v", err)
|
||||
}
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
const numConns = 50
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numConns)
|
||||
|
||||
for i := 0; i < numConns; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", relay.Port()))
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("conn %d: dial failed: %w", idx, err)
|
||||
return
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
msg := []byte(fmt.Sprintf("concurrent-%d", idx))
|
||||
if _, err := conn.Write(msg); err != nil {
|
||||
errors <- fmt.Errorf("conn %d: write failed: %w", idx, err)
|
||||
return
|
||||
}
|
||||
|
||||
buf := make([]byte, len(msg))
|
||||
_ = conn.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
errors <- fmt.Errorf("conn %d: read failed: %w", idx, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf, msg) {
|
||||
errors <- fmt.Errorf("conn %d: expected %q, got %q", idx, msg, buf)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRelayMaxConnsLimit(t *testing.T) {
|
||||
// Use a black hole server so connections stay open.
|
||||
bh := startBlackHoleServer(t)
|
||||
defer bh.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
proxyURL := fmt.Sprintf("socks5://%s", bh.Addr().String())
|
||||
relay, err := NewRelay(proxyURL, true)
|
||||
if err != nil {
|
||||
t.Fatalf("NewRelay failed: %v", err)
|
||||
}
|
||||
// Set a very low limit for testing.
|
||||
relay.maxConns = 2
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
// The black hole server closes connections immediately, so the relay's
|
||||
// handleConn will finish quickly. Instead, use an echo server that holds
|
||||
// connections open to truly test the limit.
|
||||
// We just verify the relay starts and stops cleanly with the low limit.
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", relay.Port()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect: %v", err)
|
||||
}
|
||||
_ = conn.Close()
|
||||
}
|
||||
|
||||
func TestRelayTCPHalfClose(t *testing.T) {
|
||||
// Start a server that reads everything, then sends a response, then closes.
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
defer ln.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
response := []byte("server-response-after-client-close")
|
||||
|
||||
go func() {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
// Read all data from client until EOF (client did CloseWrite).
|
||||
data, err := io.ReadAll(conn)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = data
|
||||
|
||||
// Now send a response back (the write direction is still open).
|
||||
_, _ = conn.Write(response)
|
||||
|
||||
// Signal we're done writing.
|
||||
if tc, ok := conn.(*net.TCPConn); ok {
|
||||
_ = tc.CloseWrite()
|
||||
}
|
||||
}()
|
||||
|
||||
proxyURL := fmt.Sprintf("socks5://%s", ln.Addr().String())
|
||||
relay, err := NewRelay(proxyURL, true)
|
||||
if err != nil {
|
||||
t.Fatalf("NewRelay failed: %v", err)
|
||||
}
|
||||
|
||||
if err := relay.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", relay.Port()))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect to relay: %v", err)
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
// Send data to the server.
|
||||
clientMsg := []byte("client-data")
|
||||
if _, err := conn.Write(clientMsg); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
|
||||
// Half-close our write side; the server should now receive EOF and send its response.
|
||||
tcpConn, ok := conn.(*net.TCPConn)
|
||||
if !ok {
|
||||
t.Fatal("expected *net.TCPConn")
|
||||
}
|
||||
if err := tcpConn.CloseWrite(); err != nil {
|
||||
t.Fatalf("CloseWrite failed: %v", err)
|
||||
}
|
||||
|
||||
// Read the server's response through the relay.
|
||||
_ = conn.SetReadDeadline(time.Now().Add(3 * time.Second))
|
||||
got, err := io.ReadAll(conn)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll failed: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(got, response) {
|
||||
t.Fatalf("expected %q, got %q", response, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRelayPort(t *testing.T) {
|
||||
echo := startEchoServer(t)
|
||||
defer echo.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
proxyURL := fmt.Sprintf("socks5://%s", echo.Addr().String())
|
||||
relay, err := NewRelay(proxyURL, false)
|
||||
if err != nil {
|
||||
t.Fatalf("NewRelay failed: %v", err)
|
||||
}
|
||||
defer relay.Stop()
|
||||
|
||||
port := relay.Port()
|
||||
if port <= 0 || port > 65535 {
|
||||
t.Fatalf("invalid port: %d", port)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRelayInvalidURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
proxyURL string
|
||||
}{
|
||||
{"missing port", "socks5://127.0.0.1"},
|
||||
{"missing host", "socks5://:1080"},
|
||||
{"empty", ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := NewRelay(tt.proxyURL, false)
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
615
internal/daemon/server.go
Normal file
615
internal/daemon/server.go
Normal file
@@ -0,0 +1,615 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Protocol types for JSON communication over Unix socket (newline-delimited).
|
||||
|
||||
// Request from CLI to daemon.
|
||||
type Request struct {
|
||||
Action string `json:"action"` // "create_session", "destroy_session", "status", "start_learning", "stop_learning"
|
||||
ProxyURL string `json:"proxy_url,omitempty"` // for create_session
|
||||
DNSAddr string `json:"dns_addr,omitempty"` // for create_session
|
||||
SessionID string `json:"session_id,omitempty"` // for destroy_session
|
||||
LearningID string `json:"learning_id,omitempty"` // for stop_learning
|
||||
}
|
||||
|
||||
// Response from daemon to CLI.
|
||||
type Response struct {
|
||||
OK bool `json:"ok"`
|
||||
Error string `json:"error,omitempty"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
TunDevice string `json:"tun_device,omitempty"`
|
||||
SandboxUser string `json:"sandbox_user,omitempty"`
|
||||
SandboxGroup string `json:"sandbox_group,omitempty"`
|
||||
// Status response fields.
|
||||
Running bool `json:"running,omitempty"`
|
||||
ActiveSessions int `json:"active_sessions,omitempty"`
|
||||
// Learning response fields.
|
||||
LearningID string `json:"learning_id,omitempty"`
|
||||
LearningLog string `json:"learning_log,omitempty"`
|
||||
}
|
||||
|
||||
// Session tracks an active sandbox session.
|
||||
type Session struct {
|
||||
ID string
|
||||
ProxyURL string
|
||||
DNSAddr string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// Server listens on a Unix socket and manages sandbox sessions. It orchestrates
|
||||
// TunManager (utun + pf) and DNSRelay lifecycle for each session.
|
||||
type Server struct {
|
||||
socketPath string
|
||||
listener net.Listener
|
||||
tunManager *TunManager
|
||||
dnsRelay *DNSRelay
|
||||
sessions map[string]*Session
|
||||
mu sync.Mutex
|
||||
done chan struct{}
|
||||
wg sync.WaitGroup
|
||||
debug bool
|
||||
tun2socksPath string
|
||||
sandboxGID string // cached numeric GID for the sandbox group
|
||||
// Learning mode state
|
||||
esloggerCmd *exec.Cmd // running eslogger process
|
||||
esloggerLogPath string // temp file path for eslogger output
|
||||
esloggerDone chan error // receives result of cmd.Wait() (set once, reused for stop)
|
||||
learningID string // current learning session ID
|
||||
}
|
||||
|
||||
// NewServer creates a new daemon server that will listen on the given Unix socket path.
|
||||
func NewServer(socketPath, tun2socksPath string, debug bool) *Server {
|
||||
return &Server{
|
||||
socketPath: socketPath,
|
||||
tun2socksPath: tun2socksPath,
|
||||
sessions: make(map[string]*Session),
|
||||
done: make(chan struct{}),
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins listening on the Unix socket and accepting connections.
|
||||
// It removes any stale socket file before binding.
|
||||
func (s *Server) Start() error {
|
||||
// Pre-resolve the sandbox group GID so session creation is fast
|
||||
// and doesn't depend on OpenDirectory latency.
|
||||
grp, err := user.LookupGroup(SandboxGroupName)
|
||||
if err != nil {
|
||||
Logf("Warning: could not resolve group %s at startup: %v (will retry per-session)", SandboxGroupName, err)
|
||||
} else {
|
||||
s.sandboxGID = grp.Gid
|
||||
Logf("Resolved group %s → GID %s", SandboxGroupName, s.sandboxGID)
|
||||
}
|
||||
|
||||
// Remove stale socket file if it exists.
|
||||
if _, err := os.Stat(s.socketPath); err == nil {
|
||||
s.logDebug("Removing stale socket file %s", s.socketPath)
|
||||
if err := os.Remove(s.socketPath); err != nil {
|
||||
return fmt.Errorf("failed to remove stale socket %s: %w", s.socketPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
ln, err := net.Listen("unix", s.socketPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen on %s: %w", s.socketPath, err)
|
||||
}
|
||||
s.listener = ln
|
||||
|
||||
// Set socket permissions so any local user can connect to the daemon.
|
||||
// The socket is localhost-only (Unix domain socket); access control is
|
||||
// handled at the daemon protocol level, not file permissions.
|
||||
if err := os.Chmod(s.socketPath, 0o666); err != nil { //nolint:gosec // daemon socket needs 0666 so non-root CLI can connect
|
||||
_ = ln.Close()
|
||||
_ = os.Remove(s.socketPath)
|
||||
return fmt.Errorf("failed to set socket permissions: %w", err)
|
||||
}
|
||||
|
||||
s.logDebug("Listening on %s", s.socketPath)
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.acceptLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the server. It stops accepting new connections,
|
||||
// tears down all active sessions, and removes the socket file.
|
||||
func (s *Server) Stop() error {
|
||||
// Signal shutdown.
|
||||
select {
|
||||
case <-s.done:
|
||||
// Already closed.
|
||||
default:
|
||||
close(s.done)
|
||||
}
|
||||
|
||||
// Close the listener to unblock acceptLoop.
|
||||
if s.listener != nil {
|
||||
_ = s.listener.Close()
|
||||
}
|
||||
|
||||
// Wait for the accept loop and any in-flight handlers to finish.
|
||||
s.wg.Wait()
|
||||
|
||||
// Tear down all active sessions and learning.
|
||||
s.mu.Lock()
|
||||
var errs []string
|
||||
|
||||
// Stop learning session if active
|
||||
if s.esloggerCmd != nil && s.esloggerCmd.Process != nil {
|
||||
s.logDebug("Stopping eslogger during shutdown")
|
||||
_ = s.esloggerCmd.Process.Kill()
|
||||
if s.esloggerDone != nil {
|
||||
<-s.esloggerDone
|
||||
}
|
||||
s.esloggerCmd = nil
|
||||
s.esloggerDone = nil
|
||||
s.learningID = ""
|
||||
}
|
||||
if s.esloggerLogPath != "" {
|
||||
_ = os.Remove(s.esloggerLogPath)
|
||||
s.esloggerLogPath = ""
|
||||
}
|
||||
|
||||
for id := range s.sessions {
|
||||
s.logDebug("Stopping session %s during shutdown", id)
|
||||
}
|
||||
|
||||
if s.tunManager != nil {
|
||||
if err := s.tunManager.Stop(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("stop tun manager: %v", err))
|
||||
}
|
||||
s.tunManager = nil
|
||||
}
|
||||
|
||||
if s.dnsRelay != nil {
|
||||
s.dnsRelay.Stop()
|
||||
s.dnsRelay = nil
|
||||
}
|
||||
|
||||
s.sessions = make(map[string]*Session)
|
||||
s.mu.Unlock()
|
||||
|
||||
// Remove the socket file.
|
||||
if err := os.Remove(s.socketPath); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, fmt.Sprintf("remove socket: %v", err))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("stop errors: %s", join(errs, "; "))
|
||||
}
|
||||
|
||||
s.logDebug("Server stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveSessions returns the number of currently active sessions.
|
||||
func (s *Server) ActiveSessions() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.sessions)
|
||||
}
|
||||
|
||||
// acceptLoop runs the main accept loop for the Unix socket listener.
|
||||
func (s *Server) acceptLoop() {
|
||||
defer s.wg.Done()
|
||||
|
||||
for {
|
||||
conn, err := s.listener.Accept()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
s.logDebug("Accept error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.handleConnection(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// handleConnection reads a single JSON request from the connection, dispatches
|
||||
// it to the appropriate handler, and writes the JSON response back.
|
||||
func (s *Server) handleConnection(conn net.Conn) {
|
||||
defer s.wg.Done()
|
||||
defer conn.Close() //nolint:errcheck // best-effort close after handling request
|
||||
|
||||
// Set a read deadline to prevent hung connections.
|
||||
if err := conn.SetReadDeadline(time.Now().Add(30 * time.Second)); err != nil {
|
||||
s.logDebug("Failed to set read deadline: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(conn)
|
||||
encoder := json.NewEncoder(conn)
|
||||
|
||||
var req Request
|
||||
if err := decoder.Decode(&req); err != nil {
|
||||
s.logDebug("Failed to decode request: %v", err)
|
||||
resp := Response{OK: false, Error: fmt.Sprintf("invalid request: %v", err)}
|
||||
_ = encoder.Encode(resp) // best-effort error response
|
||||
return
|
||||
}
|
||||
|
||||
Logf("Received request: action=%s", req.Action)
|
||||
|
||||
var resp Response
|
||||
switch req.Action {
|
||||
case "create_session":
|
||||
resp = s.handleCreateSession(req)
|
||||
case "destroy_session":
|
||||
resp = s.handleDestroySession(req)
|
||||
case "start_learning":
|
||||
resp = s.handleStartLearning()
|
||||
case "stop_learning":
|
||||
resp = s.handleStopLearning(req)
|
||||
case "status":
|
||||
resp = s.handleStatus()
|
||||
default:
|
||||
resp = Response{OK: false, Error: fmt.Sprintf("unknown action: %q", req.Action)}
|
||||
}
|
||||
|
||||
if err := encoder.Encode(resp); err != nil {
|
||||
s.logDebug("Failed to encode response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// handleCreateSession creates a new sandbox session with a utun tunnel,
|
||||
// optional DNS relay, and pf rules for the sandbox group.
|
||||
func (s *Server) handleCreateSession(req Request) Response {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if req.ProxyURL == "" {
|
||||
return Response{OK: false, Error: "proxy_url is required"}
|
||||
}
|
||||
|
||||
// Phase 1: only one session at a time.
|
||||
if len(s.sessions) > 0 {
|
||||
Logf("Rejecting create_session: %d session(s) already active", len(s.sessions))
|
||||
return Response{OK: false, Error: "a session is already active (only one session supported in Phase 1)"}
|
||||
}
|
||||
|
||||
Logf("Creating session: proxy=%s dns=%s", req.ProxyURL, req.DNSAddr)
|
||||
|
||||
// Step 1: Create and start TunManager.
|
||||
tm := NewTunManager(s.tun2socksPath, req.ProxyURL, s.debug)
|
||||
if err := tm.Start(); err != nil {
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to start tunnel: %v", err)}
|
||||
}
|
||||
|
||||
// Step 2: Create DNS relay. pf rules always redirect DNS (UDP:53) from
|
||||
// the sandbox group to the relay address, so we must always start the
|
||||
// relay when a proxy session is active. If no explicit DNS address was
|
||||
// provided, default to the proxy's DNS resolver.
|
||||
dnsTarget := req.DNSAddr
|
||||
if dnsTarget == "" {
|
||||
dnsTarget = defaultDNSTarget
|
||||
Logf("No dns_addr provided, defaulting DNS relay upstream to %s", dnsTarget)
|
||||
}
|
||||
dr, err := NewDNSRelay(dnsRelayIP+":"+dnsRelayPort, dnsTarget, s.debug)
|
||||
if err != nil {
|
||||
if stopErr := tm.Stop(); stopErr != nil {
|
||||
Logf("Warning: failed to stop tunnel during cleanup: %v", stopErr)
|
||||
}
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to create DNS relay: %v", err)}
|
||||
}
|
||||
if err := dr.Start(); err != nil {
|
||||
if stopErr := tm.Stop(); stopErr != nil {
|
||||
Logf("Warning: failed to stop tunnel during cleanup: %v", stopErr)
|
||||
}
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to start DNS relay: %v", err)}
|
||||
}
|
||||
|
||||
// Step 3: Resolve the sandbox group GID. pfctl in the LaunchDaemon
|
||||
// context cannot resolve group names via OpenDirectory, so we use the
|
||||
// cached GID (resolved at startup) or look it up now.
|
||||
sandboxGID := s.sandboxGID
|
||||
if sandboxGID == "" {
|
||||
grp, err := user.LookupGroup(SandboxGroupName)
|
||||
if err != nil {
|
||||
_ = tm.Stop()
|
||||
dr.Stop()
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to resolve group %s: %v", SandboxGroupName, err)}
|
||||
}
|
||||
sandboxGID = grp.Gid
|
||||
s.sandboxGID = sandboxGID
|
||||
}
|
||||
Logf("Loading pf rules for group %s (GID %s)", SandboxGroupName, sandboxGID)
|
||||
if err := tm.LoadPFRules(sandboxGID); err != nil {
|
||||
dr.Stop()
|
||||
_ = tm.Stop() // best-effort cleanup
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to load pf rules: %v", err)}
|
||||
}
|
||||
|
||||
// Step 4: Generate session ID and store.
|
||||
sessionID, err := generateSessionID()
|
||||
if err != nil {
|
||||
dr.Stop()
|
||||
_ = tm.UnloadPFRules() // best-effort cleanup
|
||||
_ = tm.Stop() // best-effort cleanup
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to generate session ID: %v", err)}
|
||||
}
|
||||
|
||||
session := &Session{
|
||||
ID: sessionID,
|
||||
ProxyURL: req.ProxyURL,
|
||||
DNSAddr: dnsTarget,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
s.sessions[sessionID] = session
|
||||
s.tunManager = tm
|
||||
s.dnsRelay = dr
|
||||
|
||||
Logf("Session created: id=%s device=%s group=%s(GID %s)", sessionID, tm.TunDevice(), SandboxGroupName, sandboxGID)
|
||||
|
||||
return Response{
|
||||
OK: true,
|
||||
SessionID: sessionID,
|
||||
TunDevice: tm.TunDevice(),
|
||||
SandboxUser: SandboxUserName,
|
||||
SandboxGroup: SandboxGroupName,
|
||||
}
|
||||
}
|
||||
|
||||
// handleDestroySession tears down an existing session by unloading pf rules,
|
||||
// stopping the tunnel, and stopping the DNS relay.
|
||||
func (s *Server) handleDestroySession(req Request) Response {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if req.SessionID == "" {
|
||||
return Response{OK: false, Error: "session_id is required"}
|
||||
}
|
||||
|
||||
Logf("Destroying session: id=%s", req.SessionID)
|
||||
|
||||
session, ok := s.sessions[req.SessionID]
|
||||
if !ok {
|
||||
Logf("Session %q not found (active sessions: %d)", req.SessionID, len(s.sessions))
|
||||
return Response{OK: false, Error: fmt.Sprintf("session %q not found", req.SessionID)}
|
||||
}
|
||||
|
||||
var errs []string
|
||||
|
||||
// Step 1: Unload pf rules.
|
||||
if s.tunManager != nil {
|
||||
if err := s.tunManager.UnloadPFRules(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("unload pf rules: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Stop tun manager.
|
||||
if s.tunManager != nil {
|
||||
if err := s.tunManager.Stop(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("stop tun manager: %v", err))
|
||||
}
|
||||
s.tunManager = nil
|
||||
}
|
||||
|
||||
// Step 3: Stop DNS relay.
|
||||
if s.dnsRelay != nil {
|
||||
s.dnsRelay.Stop()
|
||||
s.dnsRelay = nil
|
||||
}
|
||||
|
||||
// Step 4: Remove session.
|
||||
delete(s.sessions, session.ID)
|
||||
|
||||
if len(errs) > 0 {
|
||||
Logf("Session %s destroyed with errors: %v", session.ID, errs)
|
||||
return Response{OK: false, Error: fmt.Sprintf("session destroyed with errors: %s", join(errs, "; "))}
|
||||
}
|
||||
|
||||
Logf("Session destroyed: id=%s (remaining: %d)", session.ID, len(s.sessions))
|
||||
return Response{OK: true}
|
||||
}
|
||||
|
||||
// handleStartLearning starts an eslogger trace for learning mode.
|
||||
// eslogger uses the Endpoint Security framework and reports real Unix PIDs
|
||||
// via audit_token.pid, plus fork events for process tree tracking.
|
||||
func (s *Server) handleStartLearning() Response {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Only one learning session at a time
|
||||
if s.learningID != "" {
|
||||
return Response{OK: false, Error: "a learning session is already active"}
|
||||
}
|
||||
|
||||
// Create temp file for eslogger output.
|
||||
// The daemon runs as root but the CLI reads this file as a normal user,
|
||||
// so we must make it world-readable.
|
||||
logFile, err := os.CreateTemp("", "greywall-eslogger-*.log")
|
||||
if err != nil {
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to create temp file: %v", err)}
|
||||
}
|
||||
|
||||
logPath := logFile.Name()
|
||||
if err := os.Chmod(logPath, 0o644); err != nil { //nolint:gosec // intentionally world-readable so non-root CLI can parse the log
|
||||
_ = logFile.Close()
|
||||
_ = os.Remove(logPath) //nolint:gosec // logPath from os.CreateTemp, not user input
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to set log file permissions: %v", err)}
|
||||
}
|
||||
|
||||
// Create a separate file for eslogger stderr so we can diagnose failures.
|
||||
stderrFile, err := os.CreateTemp("", "greywall-eslogger-stderr-*.log")
|
||||
if err != nil {
|
||||
_ = logFile.Close()
|
||||
_ = os.Remove(logPath) //nolint:gosec // logPath from os.CreateTemp, not user input
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to create stderr file: %v", err)}
|
||||
}
|
||||
stderrPath := stderrFile.Name()
|
||||
|
||||
// Start eslogger with filesystem events + fork for process tree tracking.
|
||||
// eslogger outputs one JSON object per line to stdout.
|
||||
cmd := exec.Command("eslogger", "open", "create", "write", "unlink", "rename", "link", "truncate", "fork") //nolint:gosec // daemon-controlled command
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = stderrFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
_ = logFile.Close()
|
||||
_ = stderrFile.Close()
|
||||
_ = os.Remove(logPath) //nolint:gosec // logPath from os.CreateTemp, not user input
|
||||
_ = os.Remove(stderrPath) //nolint:gosec // stderrPath from os.CreateTemp, not user input
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to start eslogger: %v", err)}
|
||||
}
|
||||
|
||||
// Generate learning ID
|
||||
learningID, err := generateSessionID()
|
||||
if err != nil {
|
||||
_ = cmd.Process.Kill()
|
||||
_ = logFile.Close()
|
||||
_ = stderrFile.Close()
|
||||
_ = os.Remove(logPath) //nolint:gosec // logPath from os.CreateTemp, not user input
|
||||
_ = os.Remove(stderrPath) //nolint:gosec // stderrPath from os.CreateTemp, not user input
|
||||
return Response{OK: false, Error: fmt.Sprintf("failed to generate learning ID: %v", err)}
|
||||
}
|
||||
|
||||
// Wait briefly for eslogger to initialize, then check if it exited early
|
||||
// (e.g., missing Full Disk Access permission).
|
||||
exitCh := make(chan error, 1)
|
||||
go func() {
|
||||
exitCh <- cmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case waitErr := <-exitCh:
|
||||
// eslogger exited during startup — read stderr for the error message
|
||||
_ = stderrFile.Close()
|
||||
stderrContent, _ := os.ReadFile(stderrPath) //nolint:gosec // stderrPath from os.CreateTemp
|
||||
_ = os.Remove(stderrPath) //nolint:gosec
|
||||
_ = logFile.Close()
|
||||
_ = os.Remove(logPath) //nolint:gosec
|
||||
errMsg := strings.TrimSpace(string(stderrContent))
|
||||
if errMsg == "" {
|
||||
errMsg = fmt.Sprintf("eslogger exited: %v", waitErr)
|
||||
}
|
||||
if strings.Contains(errMsg, "Full Disk Access") {
|
||||
errMsg += "\n\nGrant Full Disk Access to /usr/local/bin/greywall:\n" +
|
||||
" System Settings → Privacy & Security → Full Disk Access → add /usr/local/bin/greywall\n" +
|
||||
"Then reinstall the daemon: sudo greywall daemon uninstall -f && sudo greywall daemon install"
|
||||
}
|
||||
return Response{OK: false, Error: fmt.Sprintf("eslogger failed to start: %s", errMsg)}
|
||||
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
// eslogger is still running after 500ms — good, it initialized successfully
|
||||
}
|
||||
|
||||
s.esloggerCmd = cmd
|
||||
s.esloggerLogPath = logPath
|
||||
s.esloggerDone = exitCh
|
||||
s.learningID = learningID
|
||||
|
||||
// Clean up stderr file now that eslogger is running
|
||||
_ = stderrFile.Close()
|
||||
_ = os.Remove(stderrPath) //nolint:gosec
|
||||
|
||||
Logf("Learning session started: id=%s log=%s pid=%d", learningID, logPath, cmd.Process.Pid)
|
||||
|
||||
return Response{
|
||||
OK: true,
|
||||
LearningID: learningID,
|
||||
LearningLog: logPath,
|
||||
}
|
||||
}
|
||||
|
||||
// handleStopLearning stops the eslogger trace for a learning session.
|
||||
func (s *Server) handleStopLearning(req Request) Response {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if req.LearningID == "" {
|
||||
return Response{OK: false, Error: "learning_id is required"}
|
||||
}
|
||||
|
||||
if s.learningID == "" || s.learningID != req.LearningID {
|
||||
return Response{OK: false, Error: fmt.Sprintf("learning session %q not found", req.LearningID)}
|
||||
}
|
||||
|
||||
if s.esloggerCmd != nil && s.esloggerCmd.Process != nil {
|
||||
// Send SIGINT to eslogger for graceful shutdown (flushes buffers)
|
||||
_ = s.esloggerCmd.Process.Signal(syscall.SIGINT)
|
||||
|
||||
// Reuse the wait channel from startup (cmd.Wait already called there)
|
||||
if s.esloggerDone != nil {
|
||||
select {
|
||||
case <-s.esloggerDone:
|
||||
// Exited cleanly
|
||||
case <-time.After(5 * time.Second):
|
||||
// Force kill after timeout
|
||||
_ = s.esloggerCmd.Process.Kill()
|
||||
<-s.esloggerDone
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Logf("Learning session stopped: id=%s", s.learningID)
|
||||
|
||||
s.esloggerCmd = nil
|
||||
s.esloggerDone = nil
|
||||
s.learningID = ""
|
||||
// Don't remove the log file — the CLI needs to read it
|
||||
s.esloggerLogPath = ""
|
||||
|
||||
return Response{OK: true}
|
||||
}
|
||||
|
||||
// handleStatus returns the current daemon status including whether it is running
|
||||
// and how many sessions are active.
|
||||
func (s *Server) handleStatus() Response {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return Response{
|
||||
OK: true,
|
||||
Running: true,
|
||||
ActiveSessions: len(s.sessions),
|
||||
}
|
||||
}
|
||||
|
||||
// generateSessionID produces a cryptographically random hex session identifier.
|
||||
func generateSessionID() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", fmt.Errorf("failed to read random bytes: %w", err)
|
||||
}
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// join concatenates string slices with a separator. This avoids importing
|
||||
// the strings package solely for strings.Join.
|
||||
func join(parts []string, sep string) string {
|
||||
if len(parts) == 0 {
|
||||
return ""
|
||||
}
|
||||
result := parts[0]
|
||||
for _, p := range parts[1:] {
|
||||
result += sep + p
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// logDebug writes a timestamped debug message to stderr.
|
||||
func (s *Server) logDebug(format string, args ...interface{}) {
|
||||
if s.debug {
|
||||
Logf(format, args...)
|
||||
}
|
||||
}
|
||||
527
internal/daemon/server_test.go
Normal file
527
internal/daemon/server_test.go
Normal file
@@ -0,0 +1,527 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// testSocketPath returns a temporary Unix socket path for testing.
|
||||
// macOS limits Unix socket paths to 104 bytes, so we use a short temp directory
|
||||
// under /tmp rather than the longer t.TempDir() paths.
|
||||
func testSocketPath(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir, err := os.MkdirTemp("/tmp", "gw-")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
sockPath := filepath.Join(dir, "d.sock")
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
return sockPath
|
||||
}
|
||||
|
||||
func TestServerStartStop(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify socket file exists.
|
||||
info, err := os.Stat(sockPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Socket file not found: %v", err)
|
||||
}
|
||||
|
||||
// Verify socket permissions (0666 — any local user can connect).
|
||||
perm := info.Mode().Perm()
|
||||
if perm != 0o666 {
|
||||
t.Errorf("Expected socket permissions 0666, got %o", perm)
|
||||
}
|
||||
|
||||
// Verify no active sessions at start.
|
||||
if n := srv.ActiveSessions(); n != 0 {
|
||||
t.Errorf("Expected 0 active sessions, got %d", n)
|
||||
}
|
||||
|
||||
if err := srv.Stop(); err != nil {
|
||||
t.Fatalf("Stop failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify socket file is removed after stop.
|
||||
if _, err := os.Stat(sockPath); !os.IsNotExist(err) {
|
||||
t.Error("Socket file should be removed after stop")
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerStartRemovesStaleSocket(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
// Create a stale socket file.
|
||||
if err := os.WriteFile(sockPath, []byte("stale"), 0o600); err != nil {
|
||||
t.Fatalf("Failed to create stale file: %v", err)
|
||||
}
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed with stale socket: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
// Verify the server is listening by connecting.
|
||||
conn, err := net.DialTimeout("unix", sockPath, 2*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to connect to server: %v", err)
|
||||
}
|
||||
_ = conn.Close()
|
||||
}
|
||||
|
||||
func TestServerDoubleStop(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", false)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
|
||||
// First stop should succeed.
|
||||
if err := srv.Stop(); err != nil {
|
||||
t.Fatalf("First stop failed: %v", err)
|
||||
}
|
||||
|
||||
// Second stop should not panic (socket already removed).
|
||||
_ = srv.Stop()
|
||||
}
|
||||
|
||||
func TestProtocolStatus(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
// Send a status request.
|
||||
resp := sendTestRequest(t, sockPath, Request{Action: "status"})
|
||||
|
||||
if !resp.OK {
|
||||
t.Fatalf("Expected OK=true, got error: %s", resp.Error)
|
||||
}
|
||||
if !resp.Running {
|
||||
t.Error("Expected Running=true")
|
||||
}
|
||||
if resp.ActiveSessions != 0 {
|
||||
t.Errorf("Expected 0 active sessions, got %d", resp.ActiveSessions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolUnknownAction(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
resp := sendTestRequest(t, sockPath, Request{Action: "unknown_action"})
|
||||
|
||||
if resp.OK {
|
||||
t.Fatal("Expected OK=false for unknown action")
|
||||
}
|
||||
if resp.Error == "" {
|
||||
t.Error("Expected error message for unknown action")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolCreateSessionMissingProxy(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
// Create session without proxy_url should fail.
|
||||
resp := sendTestRequest(t, sockPath, Request{
|
||||
Action: "create_session",
|
||||
})
|
||||
|
||||
if resp.OK {
|
||||
t.Fatal("Expected OK=false for missing proxy URL")
|
||||
}
|
||||
if resp.Error == "" {
|
||||
t.Error("Expected error message for missing proxy URL")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolCreateSessionTunFailure(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
// Use a nonexistent tun2socks path so TunManager.Start() will fail.
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
// Create session should fail because tun2socks binary does not exist.
|
||||
resp := sendTestRequest(t, sockPath, Request{
|
||||
Action: "create_session",
|
||||
ProxyURL: "socks5://127.0.0.1:1080",
|
||||
})
|
||||
|
||||
if resp.OK {
|
||||
t.Fatal("Expected OK=false when tun2socks is not available")
|
||||
}
|
||||
if resp.Error == "" {
|
||||
t.Error("Expected error message when tun2socks fails")
|
||||
}
|
||||
|
||||
// Verify no session was created.
|
||||
if srv.ActiveSessions() != 0 {
|
||||
t.Error("Expected 0 active sessions after failed create")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolDestroySessionMissingID(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
resp := sendTestRequest(t, sockPath, Request{
|
||||
Action: "destroy_session",
|
||||
})
|
||||
|
||||
if resp.OK {
|
||||
t.Fatal("Expected OK=false for missing session ID")
|
||||
}
|
||||
if resp.Error == "" {
|
||||
t.Error("Expected error message for missing session ID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolDestroySessionNotFound(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
resp := sendTestRequest(t, sockPath, Request{
|
||||
Action: "destroy_session",
|
||||
SessionID: "nonexistent-session-id",
|
||||
})
|
||||
|
||||
if resp.OK {
|
||||
t.Fatal("Expected OK=false for nonexistent session")
|
||||
}
|
||||
if resp.Error == "" {
|
||||
t.Error("Expected error message for nonexistent session")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolInvalidJSON(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
// Send invalid JSON to the server.
|
||||
conn, err := net.DialTimeout("unix", sockPath, 2*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to connect: %v", err)
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
if _, err := conn.Write([]byte("not valid json\n")); err != nil {
|
||||
t.Fatalf("Failed to write: %v", err)
|
||||
}
|
||||
|
||||
// Read error response.
|
||||
_ = conn.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
decoder := json.NewDecoder(conn)
|
||||
var resp Response
|
||||
if err := decoder.Decode(&resp); err != nil {
|
||||
t.Fatalf("Failed to decode error response: %v", err)
|
||||
}
|
||||
|
||||
if resp.OK {
|
||||
t.Fatal("Expected OK=false for invalid JSON")
|
||||
}
|
||||
if resp.Error == "" {
|
||||
t.Error("Expected error message for invalid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientIsRunning(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
client := NewClient(sockPath, true)
|
||||
|
||||
// Server not started yet.
|
||||
if client.IsRunning() {
|
||||
t.Error("Expected IsRunning=false when server is not started")
|
||||
}
|
||||
|
||||
// Start the server.
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
// Now the client should detect the server.
|
||||
if !client.IsRunning() {
|
||||
t.Error("Expected IsRunning=true when server is running")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientStatus(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
client := NewClient(sockPath, true)
|
||||
resp, err := client.Status()
|
||||
if err != nil {
|
||||
t.Fatalf("Status failed: %v", err)
|
||||
}
|
||||
|
||||
if !resp.OK {
|
||||
t.Fatalf("Expected OK=true, got error: %s", resp.Error)
|
||||
}
|
||||
if !resp.Running {
|
||||
t.Error("Expected Running=true")
|
||||
}
|
||||
if resp.ActiveSessions != 0 {
|
||||
t.Errorf("Expected 0 active sessions, got %d", resp.ActiveSessions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientDestroySessionNotFound(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
client := NewClient(sockPath, true)
|
||||
err := client.DestroySession("nonexistent-id")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for nonexistent session")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientConnectionRefused(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
// No server running.
|
||||
client := NewClient(sockPath, true)
|
||||
|
||||
_, err := client.Status()
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when server is not running")
|
||||
}
|
||||
|
||||
_, err = client.CreateSession("socks5://127.0.0.1:1080", "")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when server is not running")
|
||||
}
|
||||
|
||||
err = client.DestroySession("some-id")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when server is not running")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolMultipleStatusRequests(t *testing.T) {
|
||||
sockPath := testSocketPath(t)
|
||||
|
||||
srv := NewServer(sockPath, "/nonexistent/tun2socks", true)
|
||||
if err := srv.Start(); err != nil {
|
||||
t.Fatalf("Start failed: %v", err)
|
||||
}
|
||||
defer srv.Stop() //nolint:errcheck // test cleanup
|
||||
|
||||
// Send multiple status requests sequentially (each on a new connection).
|
||||
for i := 0; i < 5; i++ {
|
||||
resp := sendTestRequest(t, sockPath, Request{Action: "status"})
|
||||
if !resp.OK {
|
||||
t.Fatalf("Request %d: expected OK=true, got error: %s", i, resp.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolRequestResponseJSON(t *testing.T) {
|
||||
// Test that protocol types serialize/deserialize correctly.
|
||||
req := Request{
|
||||
Action: "create_session",
|
||||
ProxyURL: "socks5://127.0.0.1:1080",
|
||||
DNSAddr: "1.1.1.1:53",
|
||||
SessionID: "test-session",
|
||||
}
|
||||
|
||||
data, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal request: %v", err)
|
||||
}
|
||||
|
||||
var decoded Request
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
t.Fatalf("Failed to unmarshal request: %v", err)
|
||||
}
|
||||
|
||||
if decoded.Action != req.Action {
|
||||
t.Errorf("Action: got %q, want %q", decoded.Action, req.Action)
|
||||
}
|
||||
if decoded.ProxyURL != req.ProxyURL {
|
||||
t.Errorf("ProxyURL: got %q, want %q", decoded.ProxyURL, req.ProxyURL)
|
||||
}
|
||||
if decoded.DNSAddr != req.DNSAddr {
|
||||
t.Errorf("DNSAddr: got %q, want %q", decoded.DNSAddr, req.DNSAddr)
|
||||
}
|
||||
if decoded.SessionID != req.SessionID {
|
||||
t.Errorf("SessionID: got %q, want %q", decoded.SessionID, req.SessionID)
|
||||
}
|
||||
|
||||
resp := Response{
|
||||
OK: true,
|
||||
SessionID: "abc123",
|
||||
TunDevice: "utun7",
|
||||
SandboxUser: "_greywall",
|
||||
SandboxGroup: "_greywall",
|
||||
Running: true,
|
||||
ActiveSessions: 1,
|
||||
}
|
||||
|
||||
data, err = json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal response: %v", err)
|
||||
}
|
||||
|
||||
var decodedResp Response
|
||||
if err := json.Unmarshal(data, &decodedResp); err != nil {
|
||||
t.Fatalf("Failed to unmarshal response: %v", err)
|
||||
}
|
||||
|
||||
if decodedResp.OK != resp.OK {
|
||||
t.Errorf("OK: got %v, want %v", decodedResp.OK, resp.OK)
|
||||
}
|
||||
if decodedResp.SessionID != resp.SessionID {
|
||||
t.Errorf("SessionID: got %q, want %q", decodedResp.SessionID, resp.SessionID)
|
||||
}
|
||||
if decodedResp.TunDevice != resp.TunDevice {
|
||||
t.Errorf("TunDevice: got %q, want %q", decodedResp.TunDevice, resp.TunDevice)
|
||||
}
|
||||
if decodedResp.SandboxUser != resp.SandboxUser {
|
||||
t.Errorf("SandboxUser: got %q, want %q", decodedResp.SandboxUser, resp.SandboxUser)
|
||||
}
|
||||
if decodedResp.SandboxGroup != resp.SandboxGroup {
|
||||
t.Errorf("SandboxGroup: got %q, want %q", decodedResp.SandboxGroup, resp.SandboxGroup)
|
||||
}
|
||||
if decodedResp.Running != resp.Running {
|
||||
t.Errorf("Running: got %v, want %v", decodedResp.Running, resp.Running)
|
||||
}
|
||||
if decodedResp.ActiveSessions != resp.ActiveSessions {
|
||||
t.Errorf("ActiveSessions: got %d, want %d", decodedResp.ActiveSessions, resp.ActiveSessions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolResponseOmitEmpty(t *testing.T) {
|
||||
// Verify omitempty works: error-only response should not include session fields.
|
||||
resp := Response{OK: false, Error: "something went wrong"}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal: %v", err)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(data, &raw); err != nil {
|
||||
t.Fatalf("Failed to unmarshal to map: %v", err)
|
||||
}
|
||||
|
||||
// These fields should be omitted due to omitempty.
|
||||
for _, key := range []string{"session_id", "tun_device", "sandbox_user", "sandbox_group"} {
|
||||
if _, exists := raw[key]; exists {
|
||||
t.Errorf("Expected %q to be omitted from JSON, but it was present", key)
|
||||
}
|
||||
}
|
||||
|
||||
// Error should be present.
|
||||
if _, exists := raw["error"]; !exists {
|
||||
t.Error("Expected 'error' field in JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateSessionID(t *testing.T) {
|
||||
// Verify session IDs are unique and properly formatted.
|
||||
seen := make(map[string]bool)
|
||||
for i := 0; i < 100; i++ {
|
||||
id, err := generateSessionID()
|
||||
if err != nil {
|
||||
t.Fatalf("generateSessionID failed: %v", err)
|
||||
}
|
||||
if len(id) != 32 { // 16 bytes = 32 hex chars
|
||||
t.Errorf("Expected 32-char hex ID, got %d chars: %q", len(id), id)
|
||||
}
|
||||
if seen[id] {
|
||||
t.Errorf("Duplicate session ID: %s", id)
|
||||
}
|
||||
seen[id] = true
|
||||
}
|
||||
}
|
||||
|
||||
// sendTestRequest connects to the server, sends a JSON request, and returns
|
||||
// the JSON response. This is a low-level helper that bypasses the Client
|
||||
// to test the raw protocol.
|
||||
func sendTestRequest(t *testing.T, sockPath string, req Request) Response {
|
||||
t.Helper()
|
||||
|
||||
conn, err := net.DialTimeout("unix", sockPath, 2*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to connect to server: %v", err)
|
||||
}
|
||||
defer conn.Close() //nolint:errcheck // test cleanup
|
||||
|
||||
_ = conn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
|
||||
encoder := json.NewEncoder(conn)
|
||||
if err := encoder.Encode(req); err != nil {
|
||||
t.Fatalf("Failed to encode request: %v", err)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(conn)
|
||||
var resp Response
|
||||
if err := decoder.Decode(&resp); err != nil {
|
||||
t.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
567
internal/daemon/tun.go
Normal file
567
internal/daemon/tun.go
Normal file
@@ -0,0 +1,567 @@
|
||||
//go:build darwin
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
tunIP = "198.18.0.1"
|
||||
dnsRelayIP = "127.0.0.2"
|
||||
dnsRelayPort = "15353" // high port; pf rdr rewrites port 53 → this port
|
||||
defaultDNSTarget = "127.0.0.1:42053" // proxy's DNS resolver (UDP), used when dnsAddr is not configured
|
||||
pfAnchorName = "co.greyhaven.greywall"
|
||||
|
||||
// tun2socksStopGracePeriod is the time to wait for tun2socks to exit
|
||||
// after SIGTERM before sending SIGKILL.
|
||||
tun2socksStopGracePeriod = 5 * time.Second
|
||||
)
|
||||
|
||||
// utunDevicePattern matches "utunN" device names in tun2socks output or ifconfig.
|
||||
var utunDevicePattern = regexp.MustCompile(`(utun\d+)`)
|
||||
|
||||
// TunManager handles utun device creation via tun2socks, tun2socks process
|
||||
// lifecycle, and pf (packet filter) rule management for routing sandboxed
|
||||
// traffic through the tunnel on macOS.
|
||||
type TunManager struct {
|
||||
tunDevice string // e.g., "utun7"
|
||||
tun2socksPath string // path to tun2socks binary
|
||||
tun2socksCmd *exec.Cmd // running tun2socks process
|
||||
proxyURL string // SOCKS5 proxy URL for tun2socks
|
||||
pfAnchor string // pf anchor name
|
||||
debug bool
|
||||
done chan struct{}
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewTunManager creates a new TunManager that will use the given tun2socks
|
||||
// binary and SOCKS5 proxy URL. The pf anchor is set to "co.greyhaven.greywall".
|
||||
func NewTunManager(tun2socksPath string, proxyURL string, debug bool) *TunManager {
|
||||
return &TunManager{
|
||||
tun2socksPath: tun2socksPath,
|
||||
proxyURL: proxyURL,
|
||||
pfAnchor: pfAnchorName,
|
||||
debug: debug,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start brings up the full tunnel stack:
|
||||
// 1. Start tun2socks with "-device utun" (it auto-creates a utunN device)
|
||||
// 2. Discover which utunN device was created
|
||||
// 3. Configure the utun interface IP
|
||||
// 4. Set up a loopback alias for the DNS relay
|
||||
// 5. Load pf anchor rules (deferred until LoadPFRules is called explicitly)
|
||||
func (t *TunManager) Start() error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.tun2socksCmd != nil {
|
||||
return fmt.Errorf("tun manager already started")
|
||||
}
|
||||
|
||||
// Step 1: Start tun2socks. It creates the utun device automatically.
|
||||
if err := t.startTun2Socks(); err != nil {
|
||||
return fmt.Errorf("failed to start tun2socks: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Configure the utun interface with a point-to-point IP.
|
||||
if err := t.configureInterface(); err != nil {
|
||||
_ = t.stopTun2Socks()
|
||||
return fmt.Errorf("failed to configure interface %s: %w", t.tunDevice, err)
|
||||
}
|
||||
|
||||
// Step 3: Add a loopback alias for the DNS relay address.
|
||||
if err := t.addLoopbackAlias(); err != nil {
|
||||
_ = t.stopTun2Socks()
|
||||
return fmt.Errorf("failed to add loopback alias: %w", err)
|
||||
}
|
||||
|
||||
t.logDebug("Tunnel stack started: device=%s proxy=%s", t.tunDevice, t.proxyURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop tears down the tunnel stack in reverse order:
|
||||
// 1. Unload pf rules
|
||||
// 2. Stop tun2socks (SIGTERM, then SIGKILL after grace period)
|
||||
// 3. Remove loopback alias
|
||||
// 4. The utun device is destroyed automatically when tun2socks exits
|
||||
func (t *TunManager) Stop() error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
var errs []string
|
||||
|
||||
// Signal the monitoring goroutine to stop.
|
||||
select {
|
||||
case <-t.done:
|
||||
// Already closed.
|
||||
default:
|
||||
close(t.done)
|
||||
}
|
||||
|
||||
// Step 1: Unload pf rules (best effort).
|
||||
if err := t.unloadPFRulesLocked(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("unload pf rules: %v", err))
|
||||
}
|
||||
|
||||
// Step 2: Stop tun2socks.
|
||||
if err := t.stopTun2Socks(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("stop tun2socks: %v", err))
|
||||
}
|
||||
|
||||
// Step 3: Remove loopback alias (best effort).
|
||||
if err := t.removeLoopbackAlias(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("remove loopback alias: %v", err))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("stop errors: %s", strings.Join(errs, "; "))
|
||||
}
|
||||
|
||||
t.logDebug("Tunnel stack stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TunDevice returns the name of the utun device (e.g., "utun7").
|
||||
// Returns an empty string if the tunnel has not been started.
|
||||
func (t *TunManager) TunDevice() string {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.tunDevice
|
||||
}
|
||||
|
||||
// LoadPFRules loads pf anchor rules that route traffic from the given sandbox
|
||||
// group through the utun device. The rules:
|
||||
// - Route all TCP from the sandbox group through the utun interface
|
||||
// - Redirect DNS (UDP port 53) from the sandbox group to the local DNS relay
|
||||
//
|
||||
// This requires root privileges and an active pf firewall.
|
||||
func (t *TunManager) LoadPFRules(sandboxGroup string) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.tunDevice == "" {
|
||||
return fmt.Errorf("tunnel not started, no device available")
|
||||
}
|
||||
|
||||
// Ensure the anchor reference exists in the main pf.conf.
|
||||
if err := t.ensureAnchorInPFConf(); err != nil {
|
||||
return fmt.Errorf("failed to ensure pf anchor: %w", err)
|
||||
}
|
||||
|
||||
// Build pf anchor rules for the sandbox group:
|
||||
// 1. Route all non-loopback TCP through the utun → tun2socks → SOCKS proxy.
|
||||
// Loopback (127.0.0.0/8) is excluded so that ALL_PROXY=socks5h://
|
||||
// connections to the local proxy don't get double-proxied.
|
||||
// 2. (DNS is handled via ALL_PROXY=socks5h:// env var, not via pf,
|
||||
// because macOS getaddrinfo uses mDNSResponder via Mach IPC and
|
||||
// blocking those services doesn't cause a UDP DNS fallback.)
|
||||
rules := fmt.Sprintf(
|
||||
"pass out route-to (%s %s) proto tcp from any to !127.0.0.0/8 group %s\n",
|
||||
t.tunDevice, tunIP, sandboxGroup,
|
||||
)
|
||||
|
||||
t.logDebug("Loading pf rules into anchor %s:\n%s", t.pfAnchor, rules)
|
||||
|
||||
// Load the rules into the anchor.
|
||||
//nolint:gosec // arguments are controlled internal constants, not user input
|
||||
cmd := exec.Command("pfctl", "-a", t.pfAnchor, "-f", "-")
|
||||
cmd.Stdin = strings.NewReader(rules)
|
||||
cmd.Stderr = os.Stderr
|
||||
if output, err := cmd.Output(); err != nil {
|
||||
return fmt.Errorf("pfctl load rules failed: %w (output: %s)", err, string(output))
|
||||
}
|
||||
|
||||
// Enable pf if it is not already enabled.
|
||||
if err := t.enablePF(); err != nil {
|
||||
// Non-fatal: pf may already be enabled.
|
||||
t.logDebug("Warning: failed to enable pf (may already be active): %v", err)
|
||||
}
|
||||
|
||||
t.logDebug("pf rules loaded for group %s on %s", sandboxGroup, t.tunDevice)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnloadPFRules removes the pf rules from the anchor.
|
||||
func (t *TunManager) UnloadPFRules() error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.unloadPFRulesLocked()
|
||||
}
|
||||
|
||||
// startTun2Socks launches the tun2socks process with "-device utun" so that it
|
||||
// auto-creates a utun device. The device name is discovered by scanning tun2socks
|
||||
// stderr output for the utunN identifier.
|
||||
func (t *TunManager) startTun2Socks() error {
|
||||
//nolint:gosec // tun2socksPath is an internal path, not user input
|
||||
cmd := exec.Command(t.tun2socksPath, "-device", "utun", "-proxy", t.proxyURL)
|
||||
|
||||
// Capture both stdout and stderr to discover the device name.
|
||||
// tun2socks may log the device name on either stream depending on version.
|
||||
stderrPipe, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stderr pipe: %w", err)
|
||||
}
|
||||
stdoutPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start tun2socks: %w", err)
|
||||
}
|
||||
t.tun2socksCmd = cmd
|
||||
|
||||
// Read both stdout and stderr to discover the utun device name.
|
||||
// tun2socks logs the device name shortly after startup
|
||||
// (e.g., "level=INFO msg=[STACK] tun://utun7 <-> ...").
|
||||
deviceCh := make(chan string, 2) // buffered for both goroutines
|
||||
stderrLines := make(chan string, 100)
|
||||
|
||||
// scanPipe scans lines from a pipe, looking for the utun device name.
|
||||
scanPipe := func(pipe io.Reader, label string) {
|
||||
scanner := bufio.NewScanner(pipe)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
fmt.Fprintf(os.Stderr, "[greywall:tun] tun2socks(%s): %s\n", label, line) //nolint:gosec // logging tun2socks output
|
||||
if match := utunDevicePattern.FindString(line); match != "" {
|
||||
select {
|
||||
case deviceCh <- match:
|
||||
default:
|
||||
// Already found by the other pipe.
|
||||
}
|
||||
}
|
||||
select {
|
||||
case stderrLines <- line:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go scanPipe(stderrPipe, "stderr")
|
||||
go scanPipe(stdoutPipe, "stdout")
|
||||
|
||||
// Wait for the device name with a timeout.
|
||||
select {
|
||||
case device := <-deviceCh:
|
||||
if device == "" {
|
||||
t.logDebug("Empty device from tun2socks output, trying ifconfig")
|
||||
device, err = t.discoverUtunFromIfconfig()
|
||||
if err != nil {
|
||||
_ = cmd.Process.Kill()
|
||||
return fmt.Errorf("failed to discover utun device: %w", err)
|
||||
}
|
||||
}
|
||||
t.tunDevice = device
|
||||
case <-time.After(10 * time.Second):
|
||||
// Timeout: try ifconfig fallback.
|
||||
t.logDebug("Timeout waiting for tun2socks device name, trying ifconfig")
|
||||
device, err := t.discoverUtunFromIfconfig()
|
||||
if err != nil {
|
||||
_ = cmd.Process.Kill()
|
||||
return fmt.Errorf("tun2socks did not report device name within timeout: %w", err)
|
||||
}
|
||||
t.tunDevice = device
|
||||
}
|
||||
|
||||
t.logDebug("tun2socks started (pid=%d, device=%s)", cmd.Process.Pid, t.tunDevice)
|
||||
|
||||
// Monitor tun2socks in the background.
|
||||
go t.monitorTun2Socks(stderrLines)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// discoverUtunFromIfconfig runs ifconfig and looks for a utun device. This is
|
||||
// used as a fallback when we cannot parse the device name from tun2socks output.
|
||||
func (t *TunManager) discoverUtunFromIfconfig() (string, error) {
|
||||
out, err := exec.Command("ifconfig").Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ifconfig failed: %w", err)
|
||||
}
|
||||
|
||||
// Look for utun interfaces. We scan for lines starting with "utunN:"
|
||||
// and return the highest-numbered one (most recently created).
|
||||
ifPattern := regexp.MustCompile(`^(utun\d+):`)
|
||||
var lastDevice string
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if m := ifPattern.FindStringSubmatch(line); m != nil {
|
||||
lastDevice = m[1]
|
||||
}
|
||||
}
|
||||
|
||||
if lastDevice == "" {
|
||||
return "", fmt.Errorf("no utun device found in ifconfig output")
|
||||
}
|
||||
|
||||
return lastDevice, nil
|
||||
}
|
||||
|
||||
// monitorTun2Socks watches the tun2socks process and logs if it exits unexpectedly.
|
||||
func (t *TunManager) monitorTun2Socks(stderrLines <-chan string) {
|
||||
if t.tun2socksCmd == nil || t.tun2socksCmd.Process == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Drain any remaining stderr lines.
|
||||
go func() {
|
||||
for range stderrLines {
|
||||
// Already logged in the scanner goroutine when debug is on.
|
||||
}
|
||||
}()
|
||||
|
||||
err := t.tun2socksCmd.Wait()
|
||||
|
||||
select {
|
||||
case <-t.done:
|
||||
// Expected shutdown.
|
||||
t.logDebug("tun2socks exited (expected shutdown)")
|
||||
default:
|
||||
// Unexpected exit.
|
||||
fmt.Fprintf(os.Stderr, "[greywall:tun] ERROR: tun2socks exited unexpectedly: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// stopTun2Socks sends SIGTERM to the tun2socks process and waits for it to exit.
|
||||
// If it does not exit within the grace period, SIGKILL is sent.
|
||||
func (t *TunManager) stopTun2Socks() error {
|
||||
if t.tun2socksCmd == nil || t.tun2socksCmd.Process == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
t.logDebug("Stopping tun2socks (pid=%d)", t.tun2socksCmd.Process.Pid)
|
||||
|
||||
// Send SIGTERM.
|
||||
if err := t.tun2socksCmd.Process.Signal(os.Interrupt); err != nil {
|
||||
// Process may have already exited.
|
||||
t.logDebug("SIGTERM failed (process may have exited): %v", err)
|
||||
t.tun2socksCmd = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait for exit with a timeout.
|
||||
exited := make(chan error, 1)
|
||||
go func() {
|
||||
// Wait may have already been called by the monitor goroutine,
|
||||
// in which case this will return immediately.
|
||||
exited <- t.tun2socksCmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-exited:
|
||||
if err != nil {
|
||||
t.logDebug("tun2socks exited with: %v", err)
|
||||
}
|
||||
case <-time.After(tun2socksStopGracePeriod):
|
||||
t.logDebug("tun2socks did not exit after SIGTERM, sending SIGKILL")
|
||||
_ = t.tun2socksCmd.Process.Kill()
|
||||
}
|
||||
|
||||
t.tun2socksCmd = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// configureInterface sets up the utun interface with a point-to-point IP address.
|
||||
func (t *TunManager) configureInterface() error {
|
||||
t.logDebug("Configuring interface %s with IP %s", t.tunDevice, tunIP)
|
||||
|
||||
//nolint:gosec // tunDevice and tunIP are controlled internal values
|
||||
cmd := exec.Command("ifconfig", t.tunDevice, tunIP, tunIP, "up")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("ifconfig %s failed: %w (output: %s)", t.tunDevice, err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addLoopbackAlias adds an alias IP on lo0 for the DNS relay.
|
||||
func (t *TunManager) addLoopbackAlias() error {
|
||||
t.logDebug("Adding loopback alias %s on lo0", dnsRelayIP)
|
||||
|
||||
cmd := exec.Command("ifconfig", "lo0", "alias", dnsRelayIP, "up")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("ifconfig lo0 alias failed: %w (output: %s)", err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeLoopbackAlias removes the DNS relay alias from lo0.
|
||||
func (t *TunManager) removeLoopbackAlias() error {
|
||||
t.logDebug("Removing loopback alias %s from lo0", dnsRelayIP)
|
||||
|
||||
cmd := exec.Command("ifconfig", "lo0", "-alias", dnsRelayIP)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("ifconfig lo0 -alias failed: %w (output: %s)", err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureAnchorInPFConf checks whether the pf anchor reference exists in
|
||||
// /etc/pf.conf. If not, it inserts the anchor lines at the correct positions
|
||||
// (pf requires strict ordering: rdr-anchor before anchor, both before load anchor)
|
||||
// and reloads the main ruleset.
|
||||
func (t *TunManager) ensureAnchorInPFConf() error {
|
||||
const pfConfPath = "/etc/pf.conf"
|
||||
anchorLine := fmt.Sprintf(`anchor "%s"`, t.pfAnchor)
|
||||
rdrAnchorLine := fmt.Sprintf(`rdr-anchor "%s"`, t.pfAnchor)
|
||||
|
||||
data, err := os.ReadFile(pfConfPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %s: %w", pfConfPath, err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(data), "\n")
|
||||
|
||||
// Line-level presence check avoids substring false positives
|
||||
// (e.g. 'anchor "X"' matching inside 'rdr-anchor "X"').
|
||||
hasAnchor := false
|
||||
hasRdrAnchor := false
|
||||
lastRdrIdx := -1
|
||||
lastAnchorIdx := -1
|
||||
|
||||
for i, line := range lines {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if trimmed == rdrAnchorLine {
|
||||
hasRdrAnchor = true
|
||||
}
|
||||
if trimmed == anchorLine {
|
||||
hasAnchor = true
|
||||
}
|
||||
if strings.HasPrefix(trimmed, "rdr-anchor ") {
|
||||
lastRdrIdx = i
|
||||
}
|
||||
// Standalone "anchor" lines — not rdr-anchor, nat-anchor, etc.
|
||||
if strings.HasPrefix(trimmed, "anchor ") {
|
||||
lastAnchorIdx = i
|
||||
}
|
||||
}
|
||||
|
||||
if hasAnchor && hasRdrAnchor {
|
||||
t.logDebug("pf anchor already present in %s", pfConfPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
t.logDebug("Adding pf anchor to %s", pfConfPath)
|
||||
|
||||
// Insert at the correct positions. Process in reverse index order
|
||||
// so earlier insertions don't shift later indices.
|
||||
var result []string
|
||||
for i, line := range lines {
|
||||
result = append(result, line)
|
||||
if !hasRdrAnchor && i == lastRdrIdx {
|
||||
result = append(result, rdrAnchorLine)
|
||||
}
|
||||
if !hasAnchor && i == lastAnchorIdx {
|
||||
result = append(result, anchorLine)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: if no existing rdr-anchor/anchor found, append at end.
|
||||
if !hasRdrAnchor && lastRdrIdx == -1 {
|
||||
result = append(result, rdrAnchorLine)
|
||||
}
|
||||
if !hasAnchor && lastAnchorIdx == -1 {
|
||||
result = append(result, anchorLine)
|
||||
}
|
||||
|
||||
newContent := strings.Join(result, "\n")
|
||||
|
||||
//nolint:gosec // pf.conf must be writable by root; the daemon runs as root
|
||||
if err := os.WriteFile(pfConfPath, []byte(newContent), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write %s: %w", pfConfPath, err)
|
||||
}
|
||||
|
||||
// Reload the main pf.conf so the anchor reference is recognized.
|
||||
//nolint:gosec // pfConfPath is a constant
|
||||
reloadCmd := exec.Command("pfctl", "-f", pfConfPath)
|
||||
if output, err := reloadCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("pfctl reload failed: %w (output: %s)", err, string(output))
|
||||
}
|
||||
|
||||
t.logDebug("pf anchor added and pf.conf reloaded")
|
||||
return nil
|
||||
}
|
||||
|
||||
// enablePF enables the pf firewall if it is not already active.
|
||||
func (t *TunManager) enablePF() error {
|
||||
// Check current pf status.
|
||||
out, err := exec.Command("pfctl", "-s", "info").CombinedOutput()
|
||||
if err == nil && strings.Contains(string(out), "Status: Enabled") {
|
||||
t.logDebug("pf is already enabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.logDebug("Enabling pf")
|
||||
cmd := exec.Command("pfctl", "-e")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("pfctl -e failed: %w (output: %s)", err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unloadPFRulesLocked flushes all rules from the pf anchor. Must be called
|
||||
// with t.mu held.
|
||||
func (t *TunManager) unloadPFRulesLocked() error {
|
||||
t.logDebug("Flushing pf anchor %s", t.pfAnchor)
|
||||
|
||||
//nolint:gosec // pfAnchor is a controlled internal constant
|
||||
cmd := exec.Command("pfctl", "-a", t.pfAnchor, "-F", "all")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("pfctl flush anchor failed: %w (output: %s)", err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeAnchorFromPFConf removes greywall anchor lines from /etc/pf.conf.
|
||||
// Called during uninstall to clean up.
|
||||
func removeAnchorFromPFConf(debug bool) error {
|
||||
const pfConfPath = "/etc/pf.conf"
|
||||
anchorLine := fmt.Sprintf(`anchor "%s"`, pfAnchorName)
|
||||
rdrAnchorLine := fmt.Sprintf(`rdr-anchor "%s"`, pfAnchorName)
|
||||
|
||||
data, err := os.ReadFile(pfConfPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %s: %w", pfConfPath, err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(data), "\n")
|
||||
var filtered []string
|
||||
removed := 0
|
||||
for _, line := range lines {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if trimmed == anchorLine || trimmed == rdrAnchorLine {
|
||||
removed++
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, line)
|
||||
}
|
||||
|
||||
if removed == 0 {
|
||||
logDebug(debug, "No pf anchor lines to remove from %s", pfConfPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint:gosec // pf.conf must be writable by root; the daemon runs as root
|
||||
if err := os.WriteFile(pfConfPath, []byte(strings.Join(filtered, "\n")), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write %s: %w", pfConfPath, err)
|
||||
}
|
||||
|
||||
logDebug(debug, "Removed %d pf anchor lines from %s", removed, pfConfPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// logDebug writes a debug message to stderr with the [greywall:tun] prefix.
|
||||
func (t *TunManager) logDebug(format string, args ...interface{}) {
|
||||
if t.debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:tun] "+format+"\n", args...)
|
||||
}
|
||||
}
|
||||
38
internal/daemon/tun_stub.go
Normal file
38
internal/daemon/tun_stub.go
Normal file
@@ -0,0 +1,38 @@
|
||||
//go:build !darwin
|
||||
|
||||
package daemon
|
||||
|
||||
import "fmt"
|
||||
|
||||
// TunManager is a stub for non-macOS platforms.
|
||||
type TunManager struct{}
|
||||
|
||||
// NewTunManager returns an error on non-macOS platforms.
|
||||
func NewTunManager(tun2socksPath string, proxyURL string, debug bool) *TunManager {
|
||||
return &TunManager{}
|
||||
}
|
||||
|
||||
// Start returns an error on non-macOS platforms.
|
||||
func (t *TunManager) Start() error {
|
||||
return fmt.Errorf("tun manager is only available on macOS")
|
||||
}
|
||||
|
||||
// Stop returns an error on non-macOS platforms.
|
||||
func (t *TunManager) Stop() error {
|
||||
return fmt.Errorf("tun manager is only available on macOS")
|
||||
}
|
||||
|
||||
// TunDevice returns an empty string on non-macOS platforms.
|
||||
func (t *TunManager) TunDevice() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// LoadPFRules returns an error on non-macOS platforms.
|
||||
func (t *TunManager) LoadPFRules(sandboxUser string) error {
|
||||
return fmt.Errorf("pf rules are only available on macOS")
|
||||
}
|
||||
|
||||
// UnloadPFRules returns an error on non-macOS platforms.
|
||||
func (t *TunManager) UnloadPFRules() error {
|
||||
return fmt.Errorf("pf rules are only available on macOS")
|
||||
}
|
||||
@@ -1,444 +0,0 @@
|
||||
// Package importer provides functionality to import settings from other tools.
|
||||
package importer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"github.com/tidwall/jsonc"
|
||||
)
|
||||
|
||||
// ClaudeSettings represents the Claude Code settings.json structure.
|
||||
type ClaudeSettings struct {
|
||||
Permissions ClaudePermissions `json:"permissions"`
|
||||
}
|
||||
|
||||
// ClaudePermissions represents the permissions block in Claude Code settings.
|
||||
type ClaudePermissions struct {
|
||||
Allow []string `json:"allow"`
|
||||
Deny []string `json:"deny"`
|
||||
Ask []string `json:"ask"`
|
||||
}
|
||||
|
||||
// ClaudeSettingsPaths returns the standard paths where Claude Code stores settings.
|
||||
func ClaudeSettingsPaths() []string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
paths := []string{
|
||||
filepath.Join(home, ".claude", "settings.json"),
|
||||
}
|
||||
|
||||
// Also check project-level settings in current directory
|
||||
cwd, err := os.Getwd()
|
||||
if err == nil {
|
||||
paths = append(paths,
|
||||
filepath.Join(cwd, ".claude", "settings.json"),
|
||||
filepath.Join(cwd, ".claude", "settings.local.json"),
|
||||
)
|
||||
}
|
||||
|
||||
return paths
|
||||
}
|
||||
|
||||
// DefaultClaudeSettingsPath returns the default user-level Claude settings path.
|
||||
func DefaultClaudeSettingsPath() string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(home, ".claude", "settings.json")
|
||||
}
|
||||
|
||||
// LoadClaudeSettings loads Claude Code settings from a file.
|
||||
func LoadClaudeSettings(path string) (*ClaudeSettings, error) {
|
||||
data, err := os.ReadFile(path) //nolint:gosec // user-provided path - intentional
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read Claude settings: %w", err)
|
||||
}
|
||||
|
||||
// Handle empty file
|
||||
if len(strings.TrimSpace(string(data))) == 0 {
|
||||
return &ClaudeSettings{}, nil
|
||||
}
|
||||
|
||||
var settings ClaudeSettings
|
||||
if err := json.Unmarshal(jsonc.ToJSON(data), &settings); err != nil {
|
||||
return nil, fmt.Errorf("invalid JSON in Claude settings: %w", err)
|
||||
}
|
||||
|
||||
return &settings, nil
|
||||
}
|
||||
|
||||
// ConvertClaudeToFence converts Claude Code settings to a fence config.
|
||||
func ConvertClaudeToFence(settings *ClaudeSettings) *config.Config {
|
||||
cfg := config.Default()
|
||||
|
||||
// Process allow rules
|
||||
for _, rule := range settings.Permissions.Allow {
|
||||
processClaudeRule(rule, cfg, true)
|
||||
}
|
||||
|
||||
// Process deny rules
|
||||
for _, rule := range settings.Permissions.Deny {
|
||||
processClaudeRule(rule, cfg, false)
|
||||
}
|
||||
|
||||
// Process ask rules (treat as deny for fence, since fence doesn't have interactive prompts)
|
||||
// Users can review and move to allow if needed
|
||||
for _, rule := range settings.Permissions.Ask {
|
||||
processClaudeRule(rule, cfg, false)
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// bashPattern matches Bash permission rules like "Bash(npm run test:*)" or "Bash(curl:*)"
|
||||
var bashPattern = regexp.MustCompile(`^Bash\((.+)\)$`)
|
||||
|
||||
// readPattern matches Read permission rules like "Read(./.env)" or "Read(./secrets/**)"
|
||||
var readPattern = regexp.MustCompile(`^Read\((.+)\)$`)
|
||||
|
||||
// writePattern matches Write permission rules like "Write(./output/**)"
|
||||
var writePattern = regexp.MustCompile(`^Write\((.+)\)$`)
|
||||
|
||||
// editPattern matches Edit permission rules (similar to Write)
|
||||
var editPattern = regexp.MustCompile(`^Edit\((.+)\)$`)
|
||||
|
||||
// processClaudeRule processes a single Claude permission rule and updates the fence config.
|
||||
func processClaudeRule(rule string, cfg *config.Config, isAllow bool) {
|
||||
rule = strings.TrimSpace(rule)
|
||||
if rule == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle Bash(command) rules
|
||||
if matches := bashPattern.FindStringSubmatch(rule); len(matches) == 2 {
|
||||
cmd := normalizeClaudeCommand(matches[1])
|
||||
if cmd != "" {
|
||||
if isAllow {
|
||||
cfg.Command.Allow = appendUnique(cfg.Command.Allow, cmd)
|
||||
} else {
|
||||
cfg.Command.Deny = appendUnique(cfg.Command.Deny, cmd)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle Read(path) rules
|
||||
if matches := readPattern.FindStringSubmatch(rule); len(matches) == 2 {
|
||||
path := normalizeClaudePath(matches[1])
|
||||
if path != "" {
|
||||
if !isAllow {
|
||||
// Read deny -> filesystem.denyRead
|
||||
cfg.Filesystem.DenyRead = appendUnique(cfg.Filesystem.DenyRead, path)
|
||||
}
|
||||
// Note: fence doesn't have an "allowRead" concept - everything is readable by default
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle Write(path) rules
|
||||
if matches := writePattern.FindStringSubmatch(rule); len(matches) == 2 {
|
||||
path := normalizeClaudePath(matches[1])
|
||||
if path != "" {
|
||||
if isAllow {
|
||||
cfg.Filesystem.AllowWrite = appendUnique(cfg.Filesystem.AllowWrite, path)
|
||||
} else {
|
||||
cfg.Filesystem.DenyWrite = appendUnique(cfg.Filesystem.DenyWrite, path)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle Edit(path) rules (same as Write)
|
||||
if matches := editPattern.FindStringSubmatch(rule); len(matches) == 2 {
|
||||
path := normalizeClaudePath(matches[1])
|
||||
if path != "" {
|
||||
if isAllow {
|
||||
cfg.Filesystem.AllowWrite = appendUnique(cfg.Filesystem.AllowWrite, path)
|
||||
} else {
|
||||
cfg.Filesystem.DenyWrite = appendUnique(cfg.Filesystem.DenyWrite, path)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle bare tool names (e.g., "Read", "Write", "Bash")
|
||||
// These are global permissions that don't map directly to fence's path-based model
|
||||
// We skip them as they don't provide actionable path/command restrictions
|
||||
}
|
||||
|
||||
// normalizeClaudeCommand converts Claude's command format to fence format.
|
||||
// Claude uses "npm:*" style, fence uses "npm" for prefix matching.
|
||||
func normalizeClaudeCommand(cmd string) string {
|
||||
cmd = strings.TrimSpace(cmd)
|
||||
|
||||
// Handle wildcard patterns like "npm:*" -> "npm"
|
||||
// Claude uses ":" as separator, fence uses space-separated commands
|
||||
// Also handles "npm run test:*" -> "npm run test"
|
||||
cmd = strings.TrimSuffix(cmd, ":*")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// normalizeClaudePath converts Claude's path format to fence format.
|
||||
func normalizeClaudePath(path string) string {
|
||||
path = strings.TrimSpace(path)
|
||||
|
||||
// Claude uses ./ prefix for relative paths, fence doesn't require it
|
||||
// but fence does support it, so we can keep it
|
||||
|
||||
// Convert ** glob patterns - both Claude and fence support these
|
||||
// No conversion needed
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
// appendUnique appends a value to a slice if it's not already present.
|
||||
func appendUnique(slice []string, value string) []string {
|
||||
for _, v := range slice {
|
||||
if v == value {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, value)
|
||||
}
|
||||
|
||||
// ImportResult contains the result of an import operation.
|
||||
type ImportResult struct {
|
||||
Config *config.Config
|
||||
SourcePath string
|
||||
RulesImported int
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
// ImportOptions configures the import behavior.
|
||||
type ImportOptions struct {
|
||||
// Extends specifies a template or file to extend. Empty string means no extends.
|
||||
Extends string
|
||||
}
|
||||
|
||||
// DefaultImportOptions returns the default import options.
|
||||
// By default, imports extend the "code" template for sensible defaults.
|
||||
func DefaultImportOptions() ImportOptions {
|
||||
return ImportOptions{
|
||||
Extends: "code",
|
||||
}
|
||||
}
|
||||
|
||||
// ImportFromClaude imports settings from Claude Code and returns a fence config.
|
||||
// If path is empty, it tries the default Claude settings path.
|
||||
func ImportFromClaude(path string, opts ImportOptions) (*ImportResult, error) {
|
||||
if path == "" {
|
||||
path = DefaultClaudeSettingsPath()
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
return nil, fmt.Errorf("could not determine Claude settings path")
|
||||
}
|
||||
|
||||
settings, err := LoadClaudeSettings(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := ConvertClaudeToFence(settings)
|
||||
|
||||
// Set extends if specified
|
||||
if opts.Extends != "" {
|
||||
cfg.Extends = opts.Extends
|
||||
}
|
||||
|
||||
result := &ImportResult{
|
||||
Config: cfg,
|
||||
SourcePath: path,
|
||||
RulesImported: len(settings.Permissions.Allow) +
|
||||
len(settings.Permissions.Deny) +
|
||||
len(settings.Permissions.Ask),
|
||||
}
|
||||
|
||||
// Add warnings for rules that couldn't be fully converted
|
||||
for _, rule := range settings.Permissions.Allow {
|
||||
if isGlobalToolRule(rule) {
|
||||
result.Warnings = append(result.Warnings,
|
||||
fmt.Sprintf("Global tool permission %q skipped (fence uses path/command-based rules)", rule))
|
||||
}
|
||||
}
|
||||
for _, rule := range settings.Permissions.Deny {
|
||||
if isGlobalToolRule(rule) {
|
||||
result.Warnings = append(result.Warnings,
|
||||
fmt.Sprintf("Global tool permission %q skipped (fence uses path/command-based rules)", rule))
|
||||
}
|
||||
}
|
||||
for _, rule := range settings.Permissions.Ask {
|
||||
if isGlobalToolRule(rule) {
|
||||
result.Warnings = append(result.Warnings,
|
||||
fmt.Sprintf("Global tool permission %q skipped (fence uses path/command-based rules)", rule))
|
||||
} else {
|
||||
result.Warnings = append(result.Warnings,
|
||||
fmt.Sprintf("Ask rule %q converted to deny (fence doesn't support interactive prompts)", rule))
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// isGlobalToolRule checks if a rule is a global tool permission (no path/command specified).
|
||||
func isGlobalToolRule(rule string) bool {
|
||||
rule = strings.TrimSpace(rule)
|
||||
// Global rules are bare tool names without parentheses
|
||||
return !strings.Contains(rule, "(")
|
||||
}
|
||||
|
||||
// cleanNetworkConfig is used for JSON output with omitempty to skip empty fields.
|
||||
type cleanNetworkConfig struct {
|
||||
AllowedDomains []string `json:"allowedDomains,omitempty"`
|
||||
DeniedDomains []string `json:"deniedDomains,omitempty"`
|
||||
AllowUnixSockets []string `json:"allowUnixSockets,omitempty"`
|
||||
AllowAllUnixSockets bool `json:"allowAllUnixSockets,omitempty"`
|
||||
AllowLocalBinding bool `json:"allowLocalBinding,omitempty"`
|
||||
AllowLocalOutbound *bool `json:"allowLocalOutbound,omitempty"`
|
||||
HTTPProxyPort int `json:"httpProxyPort,omitempty"`
|
||||
SOCKSProxyPort int `json:"socksProxyPort,omitempty"`
|
||||
}
|
||||
|
||||
// cleanFilesystemConfig is used for JSON output with omitempty to skip empty fields.
|
||||
type cleanFilesystemConfig struct {
|
||||
DenyRead []string `json:"denyRead,omitempty"`
|
||||
AllowWrite []string `json:"allowWrite,omitempty"`
|
||||
DenyWrite []string `json:"denyWrite,omitempty"`
|
||||
AllowGitConfig bool `json:"allowGitConfig,omitempty"`
|
||||
}
|
||||
|
||||
// cleanCommandConfig is used for JSON output with omitempty to skip empty fields.
|
||||
type cleanCommandConfig struct {
|
||||
Deny []string `json:"deny,omitempty"`
|
||||
Allow []string `json:"allow,omitempty"`
|
||||
UseDefaults *bool `json:"useDefaults,omitempty"`
|
||||
}
|
||||
|
||||
// cleanConfig is used for JSON output with fields in desired order and omitempty.
|
||||
type cleanConfig struct {
|
||||
Extends string `json:"extends,omitempty"`
|
||||
AllowPty bool `json:"allowPty,omitempty"`
|
||||
Network *cleanNetworkConfig `json:"network,omitempty"`
|
||||
Filesystem *cleanFilesystemConfig `json:"filesystem,omitempty"`
|
||||
Command *cleanCommandConfig `json:"command,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalConfigJSON marshals a fence config to clean JSON, omitting empty arrays
|
||||
// and with fields in a logical order (extends first).
|
||||
func MarshalConfigJSON(cfg *config.Config) ([]byte, error) {
|
||||
clean := cleanConfig{
|
||||
Extends: cfg.Extends,
|
||||
AllowPty: cfg.AllowPty,
|
||||
}
|
||||
|
||||
// Network config - only include if non-empty
|
||||
network := cleanNetworkConfig{
|
||||
AllowedDomains: cfg.Network.AllowedDomains,
|
||||
DeniedDomains: cfg.Network.DeniedDomains,
|
||||
AllowUnixSockets: cfg.Network.AllowUnixSockets,
|
||||
AllowAllUnixSockets: cfg.Network.AllowAllUnixSockets,
|
||||
AllowLocalBinding: cfg.Network.AllowLocalBinding,
|
||||
AllowLocalOutbound: cfg.Network.AllowLocalOutbound,
|
||||
HTTPProxyPort: cfg.Network.HTTPProxyPort,
|
||||
SOCKSProxyPort: cfg.Network.SOCKSProxyPort,
|
||||
}
|
||||
if !isNetworkEmpty(network) {
|
||||
clean.Network = &network
|
||||
}
|
||||
|
||||
// Filesystem config - only include if non-empty
|
||||
filesystem := cleanFilesystemConfig{
|
||||
DenyRead: cfg.Filesystem.DenyRead,
|
||||
AllowWrite: cfg.Filesystem.AllowWrite,
|
||||
DenyWrite: cfg.Filesystem.DenyWrite,
|
||||
AllowGitConfig: cfg.Filesystem.AllowGitConfig,
|
||||
}
|
||||
if !isFilesystemEmpty(filesystem) {
|
||||
clean.Filesystem = &filesystem
|
||||
}
|
||||
|
||||
// Command config - only include if non-empty
|
||||
command := cleanCommandConfig{
|
||||
Deny: cfg.Command.Deny,
|
||||
Allow: cfg.Command.Allow,
|
||||
UseDefaults: cfg.Command.UseDefaults,
|
||||
}
|
||||
if !isCommandEmpty(command) {
|
||||
clean.Command = &command
|
||||
}
|
||||
|
||||
return json.MarshalIndent(clean, "", " ")
|
||||
}
|
||||
|
||||
func isNetworkEmpty(n cleanNetworkConfig) bool {
|
||||
return len(n.AllowedDomains) == 0 &&
|
||||
len(n.DeniedDomains) == 0 &&
|
||||
len(n.AllowUnixSockets) == 0 &&
|
||||
!n.AllowAllUnixSockets &&
|
||||
!n.AllowLocalBinding &&
|
||||
n.AllowLocalOutbound == nil &&
|
||||
n.HTTPProxyPort == 0 &&
|
||||
n.SOCKSProxyPort == 0
|
||||
}
|
||||
|
||||
func isFilesystemEmpty(f cleanFilesystemConfig) bool {
|
||||
return len(f.DenyRead) == 0 &&
|
||||
len(f.AllowWrite) == 0 &&
|
||||
len(f.DenyWrite) == 0 &&
|
||||
!f.AllowGitConfig
|
||||
}
|
||||
|
||||
func isCommandEmpty(c cleanCommandConfig) bool {
|
||||
return len(c.Deny) == 0 &&
|
||||
len(c.Allow) == 0 &&
|
||||
c.UseDefaults == nil
|
||||
}
|
||||
|
||||
// FormatConfigWithComment returns the config JSON with a comment header
|
||||
// explaining that values are inherited from the extended template.
|
||||
func FormatConfigWithComment(cfg *config.Config) (string, error) {
|
||||
data, err := MarshalConfigJSON(cfg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var output strings.Builder
|
||||
|
||||
// Add comment about inherited values if extending a template
|
||||
if cfg.Extends != "" {
|
||||
output.WriteString(fmt.Sprintf("// This config extends %q.\n", cfg.Extends))
|
||||
output.WriteString(fmt.Sprintf("// Network, filesystem, and command rules from %q are inherited.\n", cfg.Extends))
|
||||
output.WriteString("// Only your additional rules are shown below.\n")
|
||||
output.WriteString("// Run `fence --list-templates` to see available templates.\n")
|
||||
}
|
||||
|
||||
output.Write(data)
|
||||
output.WriteByte('\n')
|
||||
|
||||
return output.String(), nil
|
||||
}
|
||||
|
||||
// WriteConfig writes a fence config to a file.
|
||||
func WriteConfig(cfg *config.Config, path string) error {
|
||||
output, err := FormatConfigWithComment(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(path, []byte(output), 0o644); err != nil { //nolint:gosec // config file permissions
|
||||
return fmt.Errorf("failed to write config: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,581 +0,0 @@
|
||||
package importer
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConvertClaudeToFence(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
settings *ClaudeSettings
|
||||
wantCmd struct {
|
||||
allow []string
|
||||
deny []string
|
||||
}
|
||||
wantFS struct {
|
||||
denyRead []string
|
||||
allowWrite []string
|
||||
denyWrite []string
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "empty settings",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{},
|
||||
},
|
||||
wantCmd: struct {
|
||||
allow []string
|
||||
deny []string
|
||||
}{},
|
||||
wantFS: struct {
|
||||
denyRead []string
|
||||
allowWrite []string
|
||||
denyWrite []string
|
||||
}{},
|
||||
},
|
||||
{
|
||||
name: "bash allow rules",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{
|
||||
Allow: []string{
|
||||
"Bash(npm run lint)",
|
||||
"Bash(npm run test:*)",
|
||||
"Bash(git status)",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantCmd: struct {
|
||||
allow []string
|
||||
deny []string
|
||||
}{
|
||||
allow: []string{"npm run lint", "npm run test", "git status"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bash deny rules",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{
|
||||
Deny: []string{
|
||||
"Bash(curl:*)",
|
||||
"Bash(sudo:*)",
|
||||
"Bash(rm -rf /)",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantCmd: struct {
|
||||
allow []string
|
||||
deny []string
|
||||
}{
|
||||
deny: []string{"curl", "sudo", "rm -rf /"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "read deny rules",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{
|
||||
Deny: []string{
|
||||
"Read(./.env)",
|
||||
"Read(./secrets/**)",
|
||||
"Read(~/.ssh/*)",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantFS: struct {
|
||||
denyRead []string
|
||||
allowWrite []string
|
||||
denyWrite []string
|
||||
}{
|
||||
denyRead: []string{"./.env", "./secrets/**", "~/.ssh/*"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "write allow rules",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{
|
||||
Allow: []string{
|
||||
"Write(./output/**)",
|
||||
"Write(./build)",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantFS: struct {
|
||||
denyRead []string
|
||||
allowWrite []string
|
||||
denyWrite []string
|
||||
}{
|
||||
allowWrite: []string{"./output/**", "./build"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "write deny rules",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{
|
||||
Deny: []string{
|
||||
"Write(./.git/**)",
|
||||
"Edit(./package-lock.json)",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantFS: struct {
|
||||
denyRead []string
|
||||
allowWrite []string
|
||||
denyWrite []string
|
||||
}{
|
||||
denyWrite: []string{"./.git/**", "./package-lock.json"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ask rules converted to deny",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{
|
||||
Ask: []string{
|
||||
"Write(./config.json)",
|
||||
"Bash(npm publish)",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantCmd: struct {
|
||||
allow []string
|
||||
deny []string
|
||||
}{
|
||||
deny: []string{"npm publish"},
|
||||
},
|
||||
wantFS: struct {
|
||||
denyRead []string
|
||||
allowWrite []string
|
||||
denyWrite []string
|
||||
}{
|
||||
denyWrite: []string{"./config.json"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "global tool rules are skipped",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{
|
||||
Allow: []string{
|
||||
"Read",
|
||||
"Grep",
|
||||
"LS",
|
||||
"Bash(npm run build)", // This should be included
|
||||
},
|
||||
Deny: []string{
|
||||
"Edit",
|
||||
"Bash(sudo:*)", // This should be included
|
||||
},
|
||||
},
|
||||
},
|
||||
wantCmd: struct {
|
||||
allow []string
|
||||
deny []string
|
||||
}{
|
||||
allow: []string{"npm run build"},
|
||||
deny: []string{"sudo"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed rules",
|
||||
settings: &ClaudeSettings{
|
||||
Permissions: ClaudePermissions{
|
||||
Allow: []string{
|
||||
"Bash(npm install)",
|
||||
"Bash(npm run:*)",
|
||||
"Write(./dist/**)",
|
||||
},
|
||||
Deny: []string{
|
||||
"Bash(curl:*)",
|
||||
"Read(./.env)",
|
||||
"Write(./.git/**)",
|
||||
},
|
||||
Ask: []string{
|
||||
"Bash(git push)",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantCmd: struct {
|
||||
allow []string
|
||||
deny []string
|
||||
}{
|
||||
allow: []string{"npm install", "npm run"},
|
||||
deny: []string{"curl", "git push"},
|
||||
},
|
||||
wantFS: struct {
|
||||
denyRead []string
|
||||
allowWrite []string
|
||||
denyWrite []string
|
||||
}{
|
||||
denyRead: []string{"./.env"},
|
||||
allowWrite: []string{"./dist/**"},
|
||||
denyWrite: []string{"./.git/**"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := ConvertClaudeToFence(tt.settings)
|
||||
|
||||
assert.ElementsMatch(t, tt.wantCmd.allow, cfg.Command.Allow, "command.allow mismatch")
|
||||
assert.ElementsMatch(t, tt.wantCmd.deny, cfg.Command.Deny, "command.deny mismatch")
|
||||
assert.ElementsMatch(t, tt.wantFS.denyRead, cfg.Filesystem.DenyRead, "filesystem.denyRead mismatch")
|
||||
assert.ElementsMatch(t, tt.wantFS.allowWrite, cfg.Filesystem.AllowWrite, "filesystem.allowWrite mismatch")
|
||||
assert.ElementsMatch(t, tt.wantFS.denyWrite, cfg.Filesystem.DenyWrite, "filesystem.denyWrite mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeClaudeCommand(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{"npm:*", "npm"},
|
||||
{"curl:*", "curl"},
|
||||
{"npm run test:*", "npm run test"},
|
||||
{"git status", "git status"},
|
||||
{"sudo rm -rf", "sudo rm -rf"},
|
||||
{"", ""},
|
||||
{" npm ", "npm"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
got := normalizeClaudeCommand(tt.input)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadClaudeSettings(t *testing.T) {
|
||||
t.Run("valid settings", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
settingsPath := filepath.Join(tmpDir, "settings.json")
|
||||
|
||||
content := `{
|
||||
"permissions": {
|
||||
"allow": ["Bash(npm install)", "Read"],
|
||||
"deny": ["Bash(sudo:*)"],
|
||||
"ask": ["Write"]
|
||||
}
|
||||
}`
|
||||
err := os.WriteFile(settingsPath, []byte(content), 0o600) //nolint:gosec // test file
|
||||
require.NoError(t, err)
|
||||
|
||||
settings, err := LoadClaudeSettings(settingsPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []string{"Bash(npm install)", "Read"}, settings.Permissions.Allow)
|
||||
assert.Equal(t, []string{"Bash(sudo:*)"}, settings.Permissions.Deny)
|
||||
assert.Equal(t, []string{"Write"}, settings.Permissions.Ask)
|
||||
})
|
||||
|
||||
t.Run("settings with comments (JSONC)", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
settingsPath := filepath.Join(tmpDir, "settings.json")
|
||||
|
||||
content := `{
|
||||
// This is a comment
|
||||
"permissions": {
|
||||
"allow": ["Bash(npm install)"],
|
||||
"deny": [], // Another comment
|
||||
"ask": []
|
||||
}
|
||||
}`
|
||||
err := os.WriteFile(settingsPath, []byte(content), 0o600) //nolint:gosec // test file
|
||||
require.NoError(t, err)
|
||||
|
||||
settings, err := LoadClaudeSettings(settingsPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []string{"Bash(npm install)"}, settings.Permissions.Allow)
|
||||
})
|
||||
|
||||
t.Run("empty file", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
settingsPath := filepath.Join(tmpDir, "settings.json")
|
||||
|
||||
err := os.WriteFile(settingsPath, []byte(""), 0o600) //nolint:gosec // test file
|
||||
require.NoError(t, err)
|
||||
|
||||
settings, err := LoadClaudeSettings(settingsPath)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, settings)
|
||||
})
|
||||
|
||||
t.Run("file not found", func(t *testing.T) {
|
||||
_, err := LoadClaudeSettings("/nonexistent/path/settings.json")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalid json", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
settingsPath := filepath.Join(tmpDir, "settings.json")
|
||||
|
||||
err := os.WriteFile(settingsPath, []byte("not json"), 0o600) //nolint:gosec // test file
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = LoadClaudeSettings(settingsPath)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestImportFromClaude(t *testing.T) {
|
||||
t.Run("successful import with default extends", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
settingsPath := filepath.Join(tmpDir, "settings.json")
|
||||
|
||||
content := `{
|
||||
"permissions": {
|
||||
"allow": ["Bash(npm install)", "Write(./dist/**)"],
|
||||
"deny": ["Bash(curl:*)", "Read(./.env)"],
|
||||
"ask": ["Bash(git push)"]
|
||||
}
|
||||
}`
|
||||
err := os.WriteFile(settingsPath, []byte(content), 0o600) //nolint:gosec // test file
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ImportFromClaude(settingsPath, DefaultImportOptions())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, settingsPath, result.SourcePath)
|
||||
assert.Equal(t, 5, result.RulesImported)
|
||||
assert.Equal(t, "code", result.Config.Extends) // default extends
|
||||
|
||||
// Check converted config
|
||||
assert.Contains(t, result.Config.Command.Allow, "npm install")
|
||||
assert.Contains(t, result.Config.Command.Deny, "curl")
|
||||
assert.Contains(t, result.Config.Command.Deny, "git push") // ask -> deny
|
||||
assert.Contains(t, result.Config.Filesystem.AllowWrite, "./dist/**")
|
||||
assert.Contains(t, result.Config.Filesystem.DenyRead, "./.env")
|
||||
})
|
||||
|
||||
t.Run("import with no extend", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
settingsPath := filepath.Join(tmpDir, "settings.json")
|
||||
|
||||
content := `{
|
||||
"permissions": {
|
||||
"allow": ["Bash(npm install)"],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}`
|
||||
err := os.WriteFile(settingsPath, []byte(content), 0o600) //nolint:gosec // test file
|
||||
require.NoError(t, err)
|
||||
|
||||
opts := ImportOptions{Extends: ""}
|
||||
result, err := ImportFromClaude(settingsPath, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "", result.Config.Extends) // no extends
|
||||
assert.Contains(t, result.Config.Command.Allow, "npm install")
|
||||
})
|
||||
|
||||
t.Run("import with custom extend", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
settingsPath := filepath.Join(tmpDir, "settings.json")
|
||||
|
||||
content := `{
|
||||
"permissions": {
|
||||
"allow": ["Bash(npm install)"],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}`
|
||||
err := os.WriteFile(settingsPath, []byte(content), 0o600) //nolint:gosec // test file
|
||||
require.NoError(t, err)
|
||||
|
||||
opts := ImportOptions{Extends: "local-dev-server"}
|
||||
result, err := ImportFromClaude(settingsPath, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "local-dev-server", result.Config.Extends)
|
||||
})
|
||||
|
||||
t.Run("warnings for global rules", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
settingsPath := filepath.Join(tmpDir, "settings.json")
|
||||
|
||||
content := `{
|
||||
"permissions": {
|
||||
"allow": ["Read", "Grep", "Bash(npm install)"],
|
||||
"deny": ["Edit"],
|
||||
"ask": ["Write"]
|
||||
}
|
||||
}`
|
||||
err := os.WriteFile(settingsPath, []byte(content), 0o600) //nolint:gosec // test file
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ImportFromClaude(settingsPath, DefaultImportOptions())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have warnings for global rules: Read, Grep, Edit, Write (all global)
|
||||
assert.Len(t, result.Warnings, 4)
|
||||
|
||||
// Verify the warnings mention the right rules
|
||||
warningsStr := strings.Join(result.Warnings, " ")
|
||||
assert.Contains(t, warningsStr, "Read")
|
||||
assert.Contains(t, warningsStr, "Grep")
|
||||
assert.Contains(t, warningsStr, "Edit")
|
||||
assert.Contains(t, warningsStr, "Write")
|
||||
assert.Contains(t, warningsStr, "skipped")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteConfig(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
outputPath := filepath.Join(tmpDir, "fence.json")
|
||||
|
||||
cfg := &config.Config{}
|
||||
cfg.Command.Allow = []string{"npm install"}
|
||||
cfg.Command.Deny = []string{"curl"}
|
||||
cfg.Filesystem.DenyRead = []string{"./.env"}
|
||||
|
||||
err := WriteConfig(cfg, outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the file was written correctly
|
||||
data, err := os.ReadFile(outputPath) //nolint:gosec // test reads file we just wrote
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, string(data), `"npm install"`)
|
||||
assert.Contains(t, string(data), `"curl"`)
|
||||
assert.Contains(t, string(data), `"./.env"`)
|
||||
}
|
||||
|
||||
func TestMarshalConfigJSON(t *testing.T) {
|
||||
t.Run("omits empty arrays", func(t *testing.T) {
|
||||
cfg := &config.Config{}
|
||||
cfg.Command.Allow = []string{"npm install"}
|
||||
// Leave all other arrays empty
|
||||
|
||||
data, err := MarshalConfigJSON(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
output := string(data)
|
||||
assert.Contains(t, output, `"npm install"`)
|
||||
assert.NotContains(t, output, `"allowedDomains"`)
|
||||
assert.NotContains(t, output, `"deniedDomains"`)
|
||||
assert.NotContains(t, output, `"denyRead"`)
|
||||
assert.NotContains(t, output, `"allowWrite"`)
|
||||
assert.NotContains(t, output, `"denyWrite"`)
|
||||
assert.NotContains(t, output, `"network"`) // entire network section should be omitted
|
||||
})
|
||||
|
||||
t.Run("includes extends field", func(t *testing.T) {
|
||||
cfg := &config.Config{}
|
||||
cfg.Extends = "code"
|
||||
cfg.Command.Allow = []string{"npm install"}
|
||||
|
||||
data, err := MarshalConfigJSON(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
output := string(data)
|
||||
assert.Contains(t, output, `"extends": "code"`)
|
||||
})
|
||||
|
||||
t.Run("includes non-empty arrays", func(t *testing.T) {
|
||||
cfg := &config.Config{}
|
||||
cfg.Network.AllowedDomains = []string{"example.com"}
|
||||
cfg.Filesystem.DenyRead = []string{".env"}
|
||||
cfg.Command.Deny = []string{"sudo"}
|
||||
|
||||
data, err := MarshalConfigJSON(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
output := string(data)
|
||||
assert.Contains(t, output, `"example.com"`)
|
||||
assert.Contains(t, output, `".env"`)
|
||||
assert.Contains(t, output, `"sudo"`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFormatConfigWithComment(t *testing.T) {
|
||||
t.Run("adds comment when extends is set", func(t *testing.T) {
|
||||
cfg := &config.Config{}
|
||||
cfg.Extends = "code"
|
||||
cfg.Command.Allow = []string{"npm install"}
|
||||
|
||||
output, err := FormatConfigWithComment(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, output, `// This config extends "code".`)
|
||||
assert.Contains(t, output, `// Network, filesystem, and command rules from "code" are inherited.`)
|
||||
assert.Contains(t, output, `"npm install"`)
|
||||
})
|
||||
|
||||
t.Run("no comment when extends is empty", func(t *testing.T) {
|
||||
cfg := &config.Config{}
|
||||
cfg.Command.Allow = []string{"npm install"}
|
||||
|
||||
output, err := FormatConfigWithComment(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotContains(t, output, "//")
|
||||
assert.Contains(t, output, `"npm install"`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsGlobalToolRule(t *testing.T) {
|
||||
tests := []struct {
|
||||
rule string
|
||||
expected bool
|
||||
}{
|
||||
{"Read", true},
|
||||
{"Write", true},
|
||||
{"Grep", true},
|
||||
{"LS", true},
|
||||
{"Bash", true},
|
||||
{"Read(./.env)", false},
|
||||
{"Write(./dist/**)", false},
|
||||
{"Bash(npm install)", false},
|
||||
{"Bash(curl:*)", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.rule, func(t *testing.T) {
|
||||
assert.Equal(t, tt.expected, isGlobalToolRule(tt.rule))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUnique(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
slice []string
|
||||
value string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "append to empty",
|
||||
slice: []string{},
|
||||
value: "a",
|
||||
expected: []string{"a"},
|
||||
},
|
||||
{
|
||||
name: "append new value",
|
||||
slice: []string{"a", "b"},
|
||||
value: "c",
|
||||
expected: []string{"a", "b", "c"},
|
||||
},
|
||||
{
|
||||
name: "skip duplicate",
|
||||
slice: []string{"a", "b"},
|
||||
value: "a",
|
||||
expected: []string{"a", "b"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := appendUnique(tt.slice, tt.value)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,331 +0,0 @@
|
||||
// Package proxy provides HTTP and SOCKS5 proxy servers with domain filtering.
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
)
|
||||
|
||||
// FilterFunc determines if a connection to host:port should be allowed.
|
||||
type FilterFunc func(host string, port int) bool
|
||||
|
||||
// HTTPProxy is an HTTP/HTTPS proxy server with domain filtering.
|
||||
type HTTPProxy struct {
|
||||
server *http.Server
|
||||
listener net.Listener
|
||||
filter FilterFunc
|
||||
debug bool
|
||||
monitor bool
|
||||
mu sync.RWMutex
|
||||
running bool
|
||||
}
|
||||
|
||||
// NewHTTPProxy creates a new HTTP proxy with the given filter.
|
||||
// If monitor is true, only blocked requests are logged.
|
||||
// If debug is true, all requests and filter rules are logged.
|
||||
func NewHTTPProxy(filter FilterFunc, debug, monitor bool) *HTTPProxy {
|
||||
return &HTTPProxy{
|
||||
filter: filter,
|
||||
debug: debug,
|
||||
monitor: monitor,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the HTTP proxy on a random available port.
|
||||
func (p *HTTPProxy) Start() (int, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to listen: %w", err)
|
||||
}
|
||||
|
||||
p.listener = listener
|
||||
p.server = &http.Server{
|
||||
Handler: http.HandlerFunc(p.handleRequest),
|
||||
ReadHeaderTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
p.running = true
|
||||
p.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
if err := p.server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
p.logDebug("HTTP proxy server error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
addr := listener.Addr().(*net.TCPAddr)
|
||||
p.logDebug("HTTP proxy listening on localhost:%d", addr.Port)
|
||||
return addr.Port, nil
|
||||
}
|
||||
|
||||
// Stop stops the HTTP proxy.
|
||||
func (p *HTTPProxy) Stop() error {
|
||||
p.mu.Lock()
|
||||
p.running = false
|
||||
p.mu.Unlock()
|
||||
|
||||
if p.server != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
return p.server.Shutdown(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Port returns the port the proxy is listening on.
|
||||
func (p *HTTPProxy) Port() int {
|
||||
if p.listener == nil {
|
||||
return 0
|
||||
}
|
||||
return p.listener.Addr().(*net.TCPAddr).Port
|
||||
}
|
||||
|
||||
func (p *HTTPProxy) handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == http.MethodConnect {
|
||||
p.handleConnect(w, r)
|
||||
} else {
|
||||
p.handleHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// handleConnect handles HTTPS CONNECT requests (tunnel).
|
||||
func (p *HTTPProxy) handleConnect(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
host, portStr, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
host = r.Host
|
||||
portStr = "443"
|
||||
}
|
||||
|
||||
port := 443
|
||||
if portStr != "" {
|
||||
if p, err := strconv.Atoi(portStr); err == nil {
|
||||
port = p
|
||||
}
|
||||
}
|
||||
|
||||
// Check if allowed
|
||||
if !p.filter(host, port) {
|
||||
p.logRequest("CONNECT", fmt.Sprintf("https://%s:%d", host, port), host, 403, "BLOCKED", time.Since(start))
|
||||
http.Error(w, "Connection blocked by network allowlist", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
p.logRequest("CONNECT", fmt.Sprintf("https://%s:%d", host, port), host, 200, "ALLOWED", time.Since(start))
|
||||
|
||||
// Connect to target
|
||||
targetConn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", host, port), 10*time.Second)
|
||||
if err != nil {
|
||||
p.logDebug("CONNECT dial failed: %s:%d: %v", host, port, err)
|
||||
http.Error(w, "Bad Gateway", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = targetConn.Close() }()
|
||||
|
||||
// Hijack the connection
|
||||
hijacker, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
http.Error(w, "Hijacking not supported", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
clientConn, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to hijack connection", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer func() { _ = clientConn.Close() }()
|
||||
|
||||
if _, err := clientConn.Write([]byte("HTTP/1.1 200 Connection Established\r\n\r\n")); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Pipe data bidirectionally
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_, _ = io.Copy(targetConn, clientConn)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_, _ = io.Copy(clientConn, targetConn)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// handleHTTP handles regular HTTP proxy requests.
|
||||
func (p *HTTPProxy) handleHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
targetURL, err := url.Parse(r.RequestURI)
|
||||
if err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
host := targetURL.Hostname()
|
||||
port := 80
|
||||
if targetURL.Port() != "" {
|
||||
if p, err := strconv.Atoi(targetURL.Port()); err == nil {
|
||||
port = p
|
||||
}
|
||||
} else if targetURL.Scheme == "https" {
|
||||
port = 443
|
||||
}
|
||||
|
||||
if !p.filter(host, port) {
|
||||
p.logRequest(r.Method, r.RequestURI, host, 403, "BLOCKED", time.Since(start))
|
||||
http.Error(w, "Connection blocked by network allowlist", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
// Create new request and copy headers
|
||||
proxyReq, err := http.NewRequest(r.Method, r.RequestURI, r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
for key, values := range r.Header {
|
||||
for _, value := range values {
|
||||
proxyReq.Header.Add(key, value)
|
||||
}
|
||||
}
|
||||
proxyReq.Host = targetURL.Host
|
||||
|
||||
// Remove hop-by-hop headers
|
||||
proxyReq.Header.Del("Proxy-Connection")
|
||||
proxyReq.Header.Del("Proxy-Authorization")
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := client.Do(proxyReq)
|
||||
if err != nil {
|
||||
p.logRequest(r.Method, r.RequestURI, host, 502, "ERROR", time.Since(start))
|
||||
http.Error(w, "Bad Gateway", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
// Copy response headers
|
||||
for key, values := range resp.Header {
|
||||
for _, value := range values {
|
||||
w.Header().Add(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
|
||||
p.logRequest(r.Method, r.RequestURI, host, resp.StatusCode, "ALLOWED", time.Since(start))
|
||||
}
|
||||
|
||||
func (p *HTTPProxy) logDebug(format string, args ...interface{}) {
|
||||
if p.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:http] "+format+"\n", args...)
|
||||
}
|
||||
}
|
||||
|
||||
// logRequest logs a detailed request entry.
|
||||
// In monitor mode (-m), only blocked/error requests are logged.
|
||||
// In debug mode (-d), all requests are logged.
|
||||
func (p *HTTPProxy) logRequest(method, url, host string, status int, action string, duration time.Duration) {
|
||||
isBlocked := action == "BLOCKED" || action == "ERROR"
|
||||
|
||||
if p.monitor && !p.debug && !isBlocked {
|
||||
return
|
||||
}
|
||||
|
||||
if !p.debug && !p.monitor {
|
||||
return
|
||||
}
|
||||
|
||||
timestamp := time.Now().Format("15:04:05")
|
||||
statusIcon := "✓"
|
||||
switch action {
|
||||
case "BLOCKED":
|
||||
statusIcon = "✗"
|
||||
case "ERROR":
|
||||
statusIcon = "!"
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "[fence:http] %s %s %-7s %d %s %s (%v)\n", timestamp, statusIcon, method, status, host, truncateURL(url, 60), duration.Round(time.Millisecond))
|
||||
}
|
||||
|
||||
// truncateURL shortens a URL for display.
|
||||
func truncateURL(url string, maxLen int) string {
|
||||
if len(url) <= maxLen {
|
||||
return url
|
||||
}
|
||||
return url[:maxLen-3] + "..."
|
||||
}
|
||||
|
||||
// CreateDomainFilter creates a filter function from a config.
|
||||
// When debug is true, logs filter rule matches to stderr.
|
||||
func CreateDomainFilter(cfg *config.Config, debug bool) FilterFunc {
|
||||
return func(host string, port int) bool {
|
||||
if cfg == nil {
|
||||
// No config = deny all
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:filter] No config, denying: %s:%d\n", host, port)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Check denied domains first
|
||||
for _, denied := range cfg.Network.DeniedDomains {
|
||||
if config.MatchesDomain(host, denied) {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:filter] Denied by rule: %s:%d (matched %s)\n", host, port, denied)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Check allowed domains
|
||||
for _, allowed := range cfg.Network.AllowedDomains {
|
||||
if config.MatchesDomain(host, allowed) {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:filter] Allowed by rule: %s:%d (matched %s)\n", host, port, allowed)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:filter] No matching rule, denying: %s:%d\n", host, port)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// GetHostFromRequest extracts the hostname from a request.
|
||||
func GetHostFromRequest(r *http.Request) string {
|
||||
host := r.Host
|
||||
if h := r.URL.Hostname(); h != "" {
|
||||
host = h
|
||||
}
|
||||
// Strip port
|
||||
if idx := strings.LastIndex(host, ":"); idx != -1 {
|
||||
host = host[:idx]
|
||||
}
|
||||
return host
|
||||
}
|
||||
@@ -1,308 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
)
|
||||
|
||||
func TestTruncateURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
maxLen int
|
||||
want string
|
||||
}{
|
||||
{"short url", "https://example.com", 50, "https://example.com"},
|
||||
{"exact length", "https://example.com", 19, "https://example.com"},
|
||||
{"needs truncation", "https://example.com/very/long/path/to/resource", 30, "https://example.com/very/lo..."},
|
||||
{"empty url", "", 50, ""},
|
||||
{"very short max", "https://example.com", 10, "https:/..."},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := truncateURL(tt.url, tt.maxLen)
|
||||
if got != tt.want {
|
||||
t.Errorf("truncateURL(%q, %d) = %q, want %q", tt.url, tt.maxLen, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHostFromRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
host string
|
||||
urlStr string
|
||||
wantHost string
|
||||
}{
|
||||
{
|
||||
name: "host header only",
|
||||
host: "example.com",
|
||||
urlStr: "/path",
|
||||
wantHost: "example.com",
|
||||
},
|
||||
{
|
||||
name: "host header with port",
|
||||
host: "example.com:8080",
|
||||
urlStr: "/path",
|
||||
wantHost: "example.com",
|
||||
},
|
||||
{
|
||||
name: "full URL overrides host",
|
||||
host: "other.com",
|
||||
urlStr: "http://example.com/path",
|
||||
wantHost: "example.com",
|
||||
},
|
||||
{
|
||||
name: "url with port",
|
||||
host: "other.com",
|
||||
urlStr: "http://example.com:9000/path",
|
||||
wantHost: "example.com",
|
||||
},
|
||||
{
|
||||
name: "ipv6 host",
|
||||
host: "[::1]:8080",
|
||||
urlStr: "/path",
|
||||
wantHost: "[::1]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
parsedURL, _ := url.Parse(tt.urlStr)
|
||||
req := &http.Request{
|
||||
Host: tt.host,
|
||||
URL: parsedURL,
|
||||
}
|
||||
|
||||
got := GetHostFromRequest(req)
|
||||
if got != tt.wantHost {
|
||||
t.Errorf("GetHostFromRequest() = %q, want %q", got, tt.wantHost)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateDomainFilter(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg *config.Config
|
||||
host string
|
||||
port int
|
||||
allowed bool
|
||||
}{
|
||||
{
|
||||
name: "nil config denies all",
|
||||
cfg: nil,
|
||||
host: "example.com",
|
||||
port: 443,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "allowed domain",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"example.com"},
|
||||
},
|
||||
},
|
||||
host: "example.com",
|
||||
port: 443,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "denied domain takes precedence",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"example.com"},
|
||||
DeniedDomains: []string{"example.com"},
|
||||
},
|
||||
},
|
||||
host: "example.com",
|
||||
port: 443,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "wildcard allowed",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"*.example.com"},
|
||||
},
|
||||
},
|
||||
host: "api.example.com",
|
||||
port: 443,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard denied",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"*.example.com"},
|
||||
DeniedDomains: []string{"*.blocked.example.com"},
|
||||
},
|
||||
},
|
||||
host: "api.blocked.example.com",
|
||||
port: 443,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "unmatched domain denied",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"example.com"},
|
||||
},
|
||||
},
|
||||
host: "other.com",
|
||||
port: 443,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "empty allowed list denies all",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{},
|
||||
},
|
||||
},
|
||||
host: "example.com",
|
||||
port: 443,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "star wildcard allows all",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"*"},
|
||||
},
|
||||
},
|
||||
host: "any-domain.example.com",
|
||||
port: 443,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "star wildcard with deny list",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"*"},
|
||||
DeniedDomains: []string{"blocked.com"},
|
||||
},
|
||||
},
|
||||
host: "blocked.com",
|
||||
port: 443,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "star wildcard allows non-denied",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"*"},
|
||||
DeniedDomains: []string{"blocked.com"},
|
||||
},
|
||||
},
|
||||
host: "allowed.com",
|
||||
port: 443,
|
||||
allowed: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
filter := CreateDomainFilter(tt.cfg, false)
|
||||
got := filter(tt.host, tt.port)
|
||||
if got != tt.allowed {
|
||||
t.Errorf("CreateDomainFilter() filter(%q, %d) = %v, want %v", tt.host, tt.port, got, tt.allowed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateDomainFilterCaseInsensitive(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"Example.COM"},
|
||||
},
|
||||
}
|
||||
|
||||
filter := CreateDomainFilter(cfg, false)
|
||||
|
||||
tests := []struct {
|
||||
host string
|
||||
allowed bool
|
||||
}{
|
||||
{"example.com", true},
|
||||
{"EXAMPLE.COM", true},
|
||||
{"Example.Com", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.host, func(t *testing.T) {
|
||||
got := filter(tt.host, 443)
|
||||
if got != tt.allowed {
|
||||
t.Errorf("filter(%q) = %v, want %v", tt.host, got, tt.allowed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewHTTPProxy(t *testing.T) {
|
||||
filter := func(host string, port int) bool { return true }
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
debug bool
|
||||
monitor bool
|
||||
}{
|
||||
{"default", false, false},
|
||||
{"debug mode", true, false},
|
||||
{"monitor mode", false, true},
|
||||
{"both modes", true, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
proxy := NewHTTPProxy(filter, tt.debug, tt.monitor)
|
||||
if proxy == nil {
|
||||
t.Fatal("NewHTTPProxy() returned nil")
|
||||
}
|
||||
if proxy.debug != tt.debug {
|
||||
t.Errorf("debug = %v, want %v", proxy.debug, tt.debug)
|
||||
}
|
||||
if proxy.monitor != tt.monitor {
|
||||
t.Errorf("monitor = %v, want %v", proxy.monitor, tt.monitor)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPProxyStartStop(t *testing.T) {
|
||||
filter := func(host string, port int) bool { return true }
|
||||
proxy := NewHTTPProxy(filter, false, false)
|
||||
|
||||
port, err := proxy.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Start() error = %v", err)
|
||||
}
|
||||
|
||||
if port <= 0 {
|
||||
t.Errorf("Start() returned invalid port: %d", port)
|
||||
}
|
||||
|
||||
if proxy.Port() != port {
|
||||
t.Errorf("Port() = %d, want %d", proxy.Port(), port)
|
||||
}
|
||||
|
||||
if err := proxy.Stop(); err != nil {
|
||||
t.Errorf("Stop() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPProxyPortBeforeStart(t *testing.T) {
|
||||
filter := func(host string, port int) bool { return true }
|
||||
proxy := NewHTTPProxy(filter, false, false)
|
||||
|
||||
if proxy.Port() != 0 {
|
||||
t.Errorf("Port() before Start() = %d, want 0", proxy.Port())
|
||||
}
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/things-go/go-socks5"
|
||||
)
|
||||
|
||||
// SOCKSProxy is a SOCKS5 proxy server with domain filtering.
|
||||
type SOCKSProxy struct {
|
||||
server *socks5.Server
|
||||
listener net.Listener
|
||||
filter FilterFunc
|
||||
debug bool
|
||||
monitor bool
|
||||
port int
|
||||
}
|
||||
|
||||
// NewSOCKSProxy creates a new SOCKS5 proxy with the given filter.
|
||||
// If monitor is true, only blocked connections are logged.
|
||||
// If debug is true, all connections are logged.
|
||||
func NewSOCKSProxy(filter FilterFunc, debug, monitor bool) *SOCKSProxy {
|
||||
return &SOCKSProxy{
|
||||
filter: filter,
|
||||
debug: debug,
|
||||
monitor: monitor,
|
||||
}
|
||||
}
|
||||
|
||||
// fenceRuleSet implements socks5.RuleSet for domain filtering.
|
||||
type fenceRuleSet struct {
|
||||
filter FilterFunc
|
||||
debug bool
|
||||
monitor bool
|
||||
}
|
||||
|
||||
func (r *fenceRuleSet) Allow(ctx context.Context, req *socks5.Request) (context.Context, bool) {
|
||||
host := req.DestAddr.FQDN
|
||||
if host == "" {
|
||||
host = req.DestAddr.IP.String()
|
||||
}
|
||||
port := req.DestAddr.Port
|
||||
|
||||
allowed := r.filter(host, port)
|
||||
|
||||
shouldLog := r.debug || (r.monitor && !allowed)
|
||||
if shouldLog {
|
||||
timestamp := time.Now().Format("15:04:05")
|
||||
if allowed {
|
||||
fmt.Fprintf(os.Stderr, "[fence:socks] %s ✓ CONNECT %s:%d ALLOWED\n", timestamp, host, port)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "[fence:socks] %s ✗ CONNECT %s:%d BLOCKED\n", timestamp, host, port)
|
||||
}
|
||||
}
|
||||
return ctx, allowed
|
||||
}
|
||||
|
||||
// Start starts the SOCKS5 proxy on a random available port.
|
||||
func (p *SOCKSProxy) Start() (int, error) {
|
||||
// Create listener first to get a random port
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to listen: %w", err)
|
||||
}
|
||||
p.listener = listener
|
||||
p.port = listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
server := socks5.NewServer(
|
||||
socks5.WithRule(&fenceRuleSet{
|
||||
filter: p.filter,
|
||||
debug: p.debug,
|
||||
monitor: p.monitor,
|
||||
}),
|
||||
)
|
||||
p.server = server
|
||||
|
||||
go func() {
|
||||
if err := p.server.Serve(p.listener); err != nil {
|
||||
if p.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:socks] Server error: %v\n", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if p.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:socks] SOCKS5 proxy listening on localhost:%d\n", p.port)
|
||||
}
|
||||
return p.port, nil
|
||||
}
|
||||
|
||||
// Stop stops the SOCKS5 proxy.
|
||||
func (p *SOCKSProxy) Stop() error {
|
||||
if p.listener != nil {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Port returns the port the proxy is listening on.
|
||||
func (p *SOCKSProxy) Port() int {
|
||||
return p.port
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/things-go/go-socks5"
|
||||
"github.com/things-go/go-socks5/statute"
|
||||
)
|
||||
|
||||
func TestFenceRuleSetAllow(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fqdn string
|
||||
ip net.IP
|
||||
port int
|
||||
allowed bool
|
||||
}{
|
||||
{
|
||||
name: "allow by FQDN",
|
||||
fqdn: "allowed.com",
|
||||
port: 443,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "deny by FQDN",
|
||||
fqdn: "blocked.com",
|
||||
port: 443,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "fallback to IP when FQDN empty",
|
||||
fqdn: "",
|
||||
ip: net.ParseIP("1.2.3.4"),
|
||||
port: 80,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "allow with IP fallback",
|
||||
fqdn: "",
|
||||
ip: net.ParseIP("127.0.0.1"),
|
||||
port: 8080,
|
||||
allowed: true,
|
||||
},
|
||||
}
|
||||
|
||||
filter := func(host string, port int) bool {
|
||||
return host == "allowed.com" || host == "127.0.0.1"
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rs := &fenceRuleSet{filter: filter, debug: false, monitor: false}
|
||||
req := &socks5.Request{
|
||||
DestAddr: &statute.AddrSpec{
|
||||
FQDN: tt.fqdn,
|
||||
IP: tt.ip,
|
||||
Port: tt.port,
|
||||
},
|
||||
}
|
||||
|
||||
_, allowed := rs.Allow(context.Background(), req)
|
||||
if allowed != tt.allowed {
|
||||
t.Errorf("Allow() = %v, want %v", allowed, tt.allowed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSOCKSProxy(t *testing.T) {
|
||||
filter := func(host string, port int) bool { return true }
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
debug bool
|
||||
monitor bool
|
||||
}{
|
||||
{"default", false, false},
|
||||
{"debug mode", true, false},
|
||||
{"monitor mode", false, true},
|
||||
{"both modes", true, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
proxy := NewSOCKSProxy(filter, tt.debug, tt.monitor)
|
||||
if proxy == nil {
|
||||
t.Fatal("NewSOCKSProxy() returned nil")
|
||||
}
|
||||
if proxy.debug != tt.debug {
|
||||
t.Errorf("debug = %v, want %v", proxy.debug, tt.debug)
|
||||
}
|
||||
if proxy.monitor != tt.monitor {
|
||||
t.Errorf("monitor = %v, want %v", proxy.monitor, tt.monitor)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSOCKSProxyStartStop(t *testing.T) {
|
||||
filter := func(host string, port int) bool { return true }
|
||||
proxy := NewSOCKSProxy(filter, false, false)
|
||||
|
||||
port, err := proxy.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Start() error = %v", err)
|
||||
}
|
||||
|
||||
if port <= 0 {
|
||||
t.Errorf("Start() returned invalid port: %d", port)
|
||||
}
|
||||
|
||||
if proxy.Port() != port {
|
||||
t.Errorf("Port() = %d, want %d", proxy.Port(), port)
|
||||
}
|
||||
|
||||
if err := proxy.Stop(); err != nil {
|
||||
t.Errorf("Stop() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSOCKSProxyPortBeforeStart(t *testing.T) {
|
||||
filter := func(host string, port int) bool { return true }
|
||||
proxy := NewSOCKSProxy(filter, false, false)
|
||||
|
||||
if proxy.Port() != 0 {
|
||||
t.Errorf("Port() before Start() = %d, want 0", proxy.Port())
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
)
|
||||
|
||||
// ============================================================================
|
||||
@@ -305,16 +305,14 @@ func BenchmarkOverhead(b *testing.B) {
|
||||
|
||||
func skipBenchIfSandboxed(b *testing.B) {
|
||||
b.Helper()
|
||||
if os.Getenv("FENCE_SANDBOX") == "1" {
|
||||
b.Skip("already running inside Fence sandbox")
|
||||
if os.Getenv("GREYWALL_SANDBOX") == "1" {
|
||||
b.Skip("already running inside Greywall sandbox")
|
||||
}
|
||||
}
|
||||
|
||||
func benchConfig(workspace string) *config.Config {
|
||||
return &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{},
|
||||
},
|
||||
Network: config.NetworkConfig{},
|
||||
Filesystem: config.FilesystemConfig{
|
||||
AllowWrite: []string{workspace},
|
||||
},
|
||||
@@ -335,7 +333,7 @@ func execBenchCommand(b *testing.B, command string, workDir string) {
|
||||
shell = "/bin/bash"
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, shell, "-c", command)
|
||||
cmd := exec.CommandContext(ctx, shell, "-c", command) //nolint:gosec // test helper running shell commands
|
||||
cmd.Dir = workDir
|
||||
cmd.Stdout = &bytes.Buffer{}
|
||||
cmd.Stderr = &bytes.Buffer{}
|
||||
|
||||
0
internal/sandbox/bin/.gitkeep
Normal file
0
internal/sandbox/bin/.gitkeep
Normal file
@@ -5,7 +5,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
)
|
||||
|
||||
// CommandBlockedError is returned when a command is blocked by policy.
|
||||
|
||||
@@ -3,7 +3,7 @@ package sandbox
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
)
|
||||
|
||||
func TestCheckCommand_BasicDeny(t *testing.T) {
|
||||
|
||||
@@ -28,6 +28,30 @@ var DangerousDirectories = []string{
|
||||
".claude/agents",
|
||||
}
|
||||
|
||||
// SensitiveProjectFiles lists files within the project directory that should be
|
||||
// denied for both read and write access. These commonly contain secrets.
|
||||
var SensitiveProjectFiles = []string{
|
||||
".env",
|
||||
".env.local",
|
||||
".env.development",
|
||||
".env.production",
|
||||
".env.staging",
|
||||
".env.test",
|
||||
}
|
||||
|
||||
// GetSensitiveProjectPaths returns concrete paths for sensitive files within the
|
||||
// given directory. Only returns paths for files that actually exist.
|
||||
func GetSensitiveProjectPaths(cwd string) []string {
|
||||
var paths []string
|
||||
for _, f := range SensitiveProjectFiles {
|
||||
p := filepath.Join(cwd, f)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
paths = append(paths, p)
|
||||
}
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
// GetDefaultWritePaths returns system paths that should be writable for commands to work.
|
||||
func GetDefaultWritePaths() []string {
|
||||
home, _ := os.UserHomeDir()
|
||||
@@ -39,14 +63,14 @@ func GetDefaultWritePaths() []string {
|
||||
"/dev/tty",
|
||||
"/dev/dtracehelper",
|
||||
"/dev/autofs_nowait",
|
||||
"/tmp/fence",
|
||||
"/private/tmp/fence",
|
||||
"/tmp/greywall",
|
||||
"/private/tmp/greywall",
|
||||
}
|
||||
|
||||
if home != "" {
|
||||
paths = append(paths,
|
||||
filepath.Join(home, ".npm/_logs"),
|
||||
filepath.Join(home, ".fence/debug"),
|
||||
filepath.Join(home, ".greywall/debug"),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestGetDefaultWritePaths(t *testing.T) {
|
||||
t.Error("GetDefaultWritePaths() returned empty slice")
|
||||
}
|
||||
|
||||
essentialPaths := []string{"/dev/stdout", "/dev/stderr", "/dev/null", "/tmp/fence"}
|
||||
essentialPaths := []string{"/dev/stdout", "/dev/stderr", "/dev/null", "/tmp/greywall"}
|
||||
for _, essential := range essentialPaths {
|
||||
found := slices.Contains(paths, essential)
|
||||
if !found {
|
||||
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
|
||||
// skipIfLandlockNotUsable skips tests that require the Landlock wrapper.
|
||||
// The Landlock wrapper re-executes the binary with --landlock-apply, which only
|
||||
// the fence CLI understands. Test binaries (e.g., sandbox.test) don't have this
|
||||
// handler, so Landlock tests must be skipped when not running as the fence CLI.
|
||||
// the greywall CLI understands. Test binaries (e.g., sandbox.test) don't have this
|
||||
// handler, so Landlock tests must be skipped when not running as the greywall CLI.
|
||||
// TODO: consider removing tests that call this function, for now can keep them
|
||||
// as documentation.
|
||||
func skipIfLandlockNotUsable(t *testing.T) {
|
||||
@@ -28,22 +28,18 @@ func skipIfLandlockNotUsable(t *testing.T) {
|
||||
t.Skip("skipping: Landlock not available on this kernel")
|
||||
}
|
||||
exePath, _ := os.Executable()
|
||||
if !strings.Contains(filepath.Base(exePath), "fence") {
|
||||
t.Skip("skipping: Landlock wrapper requires fence CLI (test binary cannot use --landlock-apply)")
|
||||
if !strings.Contains(filepath.Base(exePath), "greywall") {
|
||||
t.Skip("skipping: Landlock wrapper requires greywall CLI (test binary cannot use --landlock-apply)")
|
||||
}
|
||||
}
|
||||
|
||||
// assertNetworkBlocked verifies that a network command was blocked.
|
||||
// It checks for either a non-zero exit code OR the proxy's blocked message.
|
||||
// With no proxy configured, --unshare-net blocks all network at the kernel level.
|
||||
func assertNetworkBlocked(t *testing.T, result *SandboxTestResult) {
|
||||
t.Helper()
|
||||
blockedMessage := "Connection blocked by network allowlist"
|
||||
if result.Failed() {
|
||||
return // Command failed = blocked
|
||||
}
|
||||
if strings.Contains(result.Stdout, blockedMessage) || strings.Contains(result.Stderr, blockedMessage) {
|
||||
return // Proxy blocked the request
|
||||
}
|
||||
t.Errorf("expected network request to be blocked, but it succeeded\nstdout: %s\nstderr: %s",
|
||||
result.Stdout, result.Stderr)
|
||||
}
|
||||
@@ -55,7 +51,7 @@ func TestLinux_LandlockBlocksWriteOutsideWorkspace(t *testing.T) {
|
||||
skipIfLandlockNotUsable(t)
|
||||
|
||||
workspace := createTempWorkspace(t)
|
||||
outsideFile := "/tmp/fence-test-outside-" + filepath.Base(workspace) + ".txt"
|
||||
outsideFile := "/tmp/greywall-test-outside-" + filepath.Base(workspace) + ".txt"
|
||||
defer func() { _ = os.Remove(outsideFile) }()
|
||||
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
@@ -202,24 +198,24 @@ func TestLinux_LandlockBlocksWriteSystemFiles(t *testing.T) {
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
|
||||
// Attempting to write to /etc should fail
|
||||
result := runUnderSandbox(t, cfg, "touch /etc/fence-test-file", workspace)
|
||||
result := runUnderSandbox(t, cfg, "touch /etc/greywall-test-file", workspace)
|
||||
|
||||
assertBlocked(t, result)
|
||||
assertFileNotExists(t, "/etc/fence-test-file")
|
||||
assertFileNotExists(t, "/etc/greywall-test-file")
|
||||
}
|
||||
|
||||
// TestLinux_LandlockAllowsTmpFence verifies /tmp/fence is writable.
|
||||
func TestLinux_LandlockAllowsTmpFence(t *testing.T) {
|
||||
// TestLinux_LandlockAllowsTmpGreywall verifies /tmp/greywall is writable.
|
||||
func TestLinux_LandlockAllowsTmpGreywall(t *testing.T) {
|
||||
skipIfAlreadySandboxed(t)
|
||||
skipIfLandlockNotUsable(t)
|
||||
|
||||
workspace := createTempWorkspace(t)
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
|
||||
// Ensure /tmp/fence exists
|
||||
_ = os.MkdirAll("/tmp/fence", 0o750)
|
||||
// Ensure /tmp/greywall exists
|
||||
_ = os.MkdirAll("/tmp/greywall", 0o750)
|
||||
|
||||
testFile := "/tmp/fence/test-file-" + filepath.Base(workspace)
|
||||
testFile := "/tmp/greywall/test-file-" + filepath.Base(workspace)
|
||||
defer func() { _ = os.Remove(testFile) }()
|
||||
|
||||
result := runUnderSandbox(t, cfg, "echo 'test' > "+testFile, workspace)
|
||||
@@ -277,7 +273,7 @@ func TestLinux_NetworkBlocksCurl(t *testing.T) {
|
||||
|
||||
workspace := createTempWorkspace(t)
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
// No domains allowed = all network blocked
|
||||
// No proxy = all network blocked
|
||||
|
||||
result := runUnderSandboxWithTimeout(t, cfg, "curl -s --connect-timeout 2 --max-time 3 http://example.com", workspace, 10*time.Second)
|
||||
|
||||
@@ -344,18 +340,19 @@ func TestLinux_NetworkBlocksDevTcp(t *testing.T) {
|
||||
assertBlocked(t, result)
|
||||
}
|
||||
|
||||
// TestLinux_ProxyAllowsAllowedDomains verifies the proxy allows configured domains.
|
||||
func TestLinux_ProxyAllowsAllowedDomains(t *testing.T) {
|
||||
// TestLinux_TransparentProxyRoutesThroughSocks verifies traffic routes through SOCKS5 proxy.
|
||||
// This test requires a running SOCKS5 proxy and actual network connectivity.
|
||||
func TestLinux_TransparentProxyRoutesThroughSocks(t *testing.T) {
|
||||
skipIfAlreadySandboxed(t)
|
||||
skipIfCommandNotFound(t, "curl")
|
||||
|
||||
workspace := createTempWorkspace(t)
|
||||
cfg := testConfigWithNetwork("httpbin.org")
|
||||
cfg := testConfigWithProxy("socks5://localhost:1080")
|
||||
cfg.Filesystem.AllowWrite = []string{workspace}
|
||||
|
||||
// This test requires actual network - skip in CI if network is unavailable
|
||||
if os.Getenv("FENCE_TEST_NETWORK") != "1" {
|
||||
t.Skip("skipping: set FENCE_TEST_NETWORK=1 to run network tests")
|
||||
// This test requires actual network and a running SOCKS5 proxy
|
||||
if os.Getenv("GREYWALL_TEST_NETWORK") != "1" {
|
||||
t.Skip("skipping: set GREYWALL_TEST_NETWORK=1 to run network tests (requires SOCKS5 proxy on localhost:1080)")
|
||||
}
|
||||
|
||||
result := runUnderSandboxWithTimeout(t, cfg, "curl -s --connect-timeout 5 --max-time 10 https://httpbin.org/get", workspace, 15*time.Second)
|
||||
@@ -458,10 +455,10 @@ func TestLinux_SymlinkEscapeBlocked(t *testing.T) {
|
||||
_ = os.Symlink("/etc", symlinkPath)
|
||||
|
||||
// Try to write through the symlink
|
||||
result := runUnderSandbox(t, cfg, "echo 'test' > "+symlinkPath+"/fence-test", workspace)
|
||||
result := runUnderSandbox(t, cfg, "echo 'test' > "+symlinkPath+"/greywall-test", workspace)
|
||||
|
||||
assertBlocked(t, result)
|
||||
assertFileNotExists(t, "/etc/fence-test")
|
||||
assertFileNotExists(t, "/etc/greywall-test")
|
||||
}
|
||||
|
||||
// TestLinux_PathTraversalBlocked verifies path traversal attacks are prevented.
|
||||
@@ -473,10 +470,10 @@ func TestLinux_PathTraversalBlocked(t *testing.T) {
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
|
||||
// Try to escape using ../../../
|
||||
result := runUnderSandbox(t, cfg, "touch ../../../../tmp/fence-escape-test", workspace)
|
||||
result := runUnderSandbox(t, cfg, "touch ../../../../tmp/greywall-escape-test", workspace)
|
||||
|
||||
assertBlocked(t, result)
|
||||
assertFileNotExists(t, "/tmp/fence-escape-test")
|
||||
assertFileNotExists(t, "/tmp/greywall-escape-test")
|
||||
}
|
||||
|
||||
// TestLinux_DeviceAccessBlocked verifies device files cannot be accessed.
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestMacOS_SeatbeltBlocksWriteOutsideWorkspace(t *testing.T) {
|
||||
skipIfAlreadySandboxed(t)
|
||||
|
||||
workspace := createTempWorkspace(t)
|
||||
outsideFile := "/tmp/fence-test-outside-" + filepath.Base(workspace) + ".txt"
|
||||
outsideFile := "/tmp/greywall-test-outside-" + filepath.Base(workspace) + ".txt"
|
||||
defer func() { _ = os.Remove(outsideFile) }()
|
||||
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
@@ -138,23 +138,23 @@ func TestMacOS_SeatbeltBlocksWriteSystemFiles(t *testing.T) {
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
|
||||
// Attempting to write to /etc should fail
|
||||
result := runUnderSandbox(t, cfg, "touch /etc/fence-test-file", workspace)
|
||||
result := runUnderSandbox(t, cfg, "touch /etc/greywall-test-file", workspace)
|
||||
|
||||
assertBlocked(t, result)
|
||||
assertFileNotExists(t, "/etc/fence-test-file")
|
||||
assertFileNotExists(t, "/etc/greywall-test-file")
|
||||
}
|
||||
|
||||
// TestMacOS_SeatbeltAllowsTmpFence verifies /tmp/fence is writable.
|
||||
func TestMacOS_SeatbeltAllowsTmpFence(t *testing.T) {
|
||||
// TestMacOS_SeatbeltAllowsTmpGreywall verifies /tmp/greywall is writable.
|
||||
func TestMacOS_SeatbeltAllowsTmpGreywall(t *testing.T) {
|
||||
skipIfAlreadySandboxed(t)
|
||||
|
||||
workspace := createTempWorkspace(t)
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
|
||||
// Ensure /tmp/fence exists
|
||||
_ = os.MkdirAll("/tmp/fence", 0o750)
|
||||
// Ensure /tmp/greywall exists
|
||||
_ = os.MkdirAll("/tmp/greywall", 0o750)
|
||||
|
||||
testFile := "/tmp/fence/test-file-" + filepath.Base(workspace)
|
||||
testFile := "/tmp/greywall/test-file-" + filepath.Base(workspace)
|
||||
defer func() { _ = os.Remove(testFile) }()
|
||||
|
||||
result := runUnderSandbox(t, cfg, "echo 'test' > "+testFile, workspace)
|
||||
@@ -211,18 +211,18 @@ func TestMacOS_NetworkBlocksNc(t *testing.T) {
|
||||
assertBlocked(t, result)
|
||||
}
|
||||
|
||||
// TestMacOS_ProxyAllowsAllowedDomains verifies the proxy allows configured domains.
|
||||
func TestMacOS_ProxyAllowsAllowedDomains(t *testing.T) {
|
||||
// TestMacOS_ProxyAllowsTrafficViaProxy verifies the proxy allows traffic via external proxy.
|
||||
func TestMacOS_ProxyAllowsTrafficViaProxy(t *testing.T) {
|
||||
skipIfAlreadySandboxed(t)
|
||||
skipIfCommandNotFound(t, "curl")
|
||||
|
||||
workspace := createTempWorkspace(t)
|
||||
cfg := testConfigWithNetwork("httpbin.org")
|
||||
cfg := testConfigWithProxy("socks5://localhost:1080")
|
||||
cfg.Filesystem.AllowWrite = []string{workspace}
|
||||
|
||||
// This test requires actual network - skip in CI if network is unavailable
|
||||
if os.Getenv("FENCE_TEST_NETWORK") != "1" {
|
||||
t.Skip("skipping: set FENCE_TEST_NETWORK=1 to run network tests")
|
||||
// This test requires actual network and a running SOCKS5 proxy
|
||||
if os.Getenv("GREYWALL_TEST_NETWORK") != "1" {
|
||||
t.Skip("skipping: set GREYWALL_TEST_NETWORK=1 to run network tests (requires SOCKS5 proxy on localhost:1080)")
|
||||
}
|
||||
|
||||
result := runUnderSandboxWithTimeout(t, cfg, "curl -s --connect-timeout 5 --max-time 10 https://httpbin.org/get", workspace, 15*time.Second)
|
||||
@@ -290,10 +290,10 @@ func TestMacOS_SymlinkEscapeBlocked(t *testing.T) {
|
||||
}
|
||||
|
||||
// Try to write through the symlink
|
||||
result := runUnderSandbox(t, cfg, "echo 'test' > "+symlinkPath+"/fence-test", workspace)
|
||||
result := runUnderSandbox(t, cfg, "echo 'test' > "+symlinkPath+"/greywall-test", workspace)
|
||||
|
||||
assertBlocked(t, result)
|
||||
assertFileNotExists(t, "/etc/fence-test")
|
||||
assertFileNotExists(t, "/etc/greywall-test")
|
||||
}
|
||||
|
||||
// TestMacOS_PathTraversalBlocked verifies path traversal attacks are prevented.
|
||||
@@ -303,10 +303,10 @@ func TestMacOS_PathTraversalBlocked(t *testing.T) {
|
||||
workspace := createTempWorkspace(t)
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
|
||||
result := runUnderSandbox(t, cfg, "touch ../../../../tmp/fence-escape-test", workspace)
|
||||
result := runUnderSandbox(t, cfg, "touch ../../../../tmp/greywall-escape-test", workspace)
|
||||
|
||||
assertBlocked(t, result)
|
||||
assertFileNotExists(t, "/tmp/fence-escape-test")
|
||||
assertFileNotExists(t, "/tmp/greywall-escape-test")
|
||||
}
|
||||
|
||||
// TestMacOS_DeviceAccessBlocked verifies device files cannot be written.
|
||||
@@ -332,7 +332,7 @@ func TestMacOS_DeviceAccessBlocked(t *testing.T) {
|
||||
// ============================================================================
|
||||
|
||||
// TestMacOS_ReadOnlyPolicy verifies that files outside the allowed write paths cannot be written.
|
||||
// Note: Fence always adds some default writable paths (/tmp/fence, /dev/null, etc.)
|
||||
// Note: Greywall always adds some default writable paths (/tmp/greywall, /dev/null, etc.)
|
||||
// so "read-only" here means "outside the workspace".
|
||||
func TestMacOS_ReadOnlyPolicy(t *testing.T) {
|
||||
skipIfAlreadySandboxed(t)
|
||||
@@ -353,7 +353,7 @@ func TestMacOS_ReadOnlyPolicy(t *testing.T) {
|
||||
assertAllowed(t, result)
|
||||
|
||||
// Writing outside workspace should fail
|
||||
outsidePath := "/tmp/fence-test-readonly-" + filepath.Base(workspace) + ".txt"
|
||||
outsidePath := "/tmp/greywall-test-readonly-" + filepath.Base(workspace) + ".txt"
|
||||
defer func() { _ = os.Remove(outsidePath) }()
|
||||
result = runUnderSandbox(t, cfg, "echo 'outside' > "+outsidePath, workspace)
|
||||
assertBlocked(t, result)
|
||||
@@ -373,7 +373,7 @@ func TestMacOS_WorkspaceWritePolicy(t *testing.T) {
|
||||
assertFileExists(t, filepath.Join(workspace, "test.txt"))
|
||||
|
||||
// Writing outside workspace should fail
|
||||
outsideFile := "/tmp/fence-test-outside.txt"
|
||||
outsideFile := "/tmp/greywall-test-outside.txt"
|
||||
defer func() { _ = os.Remove(outsideFile) }()
|
||||
result = runUnderSandbox(t, cfg, "echo 'test' > "+outsideFile, workspace)
|
||||
assertBlocked(t, result)
|
||||
@@ -399,7 +399,7 @@ func TestMacOS_MultipleWritableRoots(t *testing.T) {
|
||||
assertAllowed(t, result)
|
||||
|
||||
// Writing outside both should fail
|
||||
outsideFile := "/tmp/fence-test-outside-multi.txt"
|
||||
outsideFile := "/tmp/greywall-test-outside-multi.txt"
|
||||
defer func() { _ = os.Remove(outsideFile) }()
|
||||
result = runUnderSandbox(t, cfg, "echo 'test' > "+outsideFile, workspace1)
|
||||
assertBlocked(t, result)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
)
|
||||
|
||||
// ============================================================================
|
||||
@@ -44,8 +44,8 @@ func (r *SandboxTestResult) Failed() bool {
|
||||
// skipIfAlreadySandboxed skips the test if running inside a sandbox.
|
||||
func skipIfAlreadySandboxed(t *testing.T) {
|
||||
t.Helper()
|
||||
if os.Getenv("FENCE_SANDBOX") == "1" {
|
||||
t.Skip("skipping: already running inside Fence sandbox")
|
||||
if os.Getenv("GREYWALL_SANDBOX") == "1" {
|
||||
t.Skip("skipping: already running inside Greywall sandbox")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,16 +123,17 @@ func assertContains(t *testing.T, haystack, needle string) {
|
||||
// ============================================================================
|
||||
|
||||
// testConfig creates a test configuration with sensible defaults.
|
||||
// Uses legacy mode (defaultDenyRead=false) for predictable testing of
|
||||
// existing integration tests. Use testConfigDenyByDefault() for tests
|
||||
// that specifically test deny-by-default behavior.
|
||||
func testConfig() *config.Config {
|
||||
return &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{},
|
||||
DeniedDomains: []string{},
|
||||
},
|
||||
Network: config.NetworkConfig{},
|
||||
Filesystem: config.FilesystemConfig{
|
||||
DenyRead: []string{},
|
||||
AllowWrite: []string{},
|
||||
DenyWrite: []string{},
|
||||
DefaultDenyRead: boolPtr(false), // Legacy mode for existing tests
|
||||
DenyRead: []string{},
|
||||
AllowWrite: []string{},
|
||||
DenyWrite: []string{},
|
||||
},
|
||||
Command: config.CommandConfig{
|
||||
Deny: []string{},
|
||||
@@ -149,10 +150,10 @@ func testConfigWithWorkspace(workspacePath string) *config.Config {
|
||||
return cfg
|
||||
}
|
||||
|
||||
// testConfigWithNetwork creates a config that allows specific domains.
|
||||
func testConfigWithNetwork(domains ...string) *config.Config {
|
||||
// testConfigWithProxy creates a config with a proxy URL set.
|
||||
func testConfigWithProxy(proxyURL string) *config.Config {
|
||||
cfg := testConfig()
|
||||
cfg.Network.AllowedDomains = domains
|
||||
cfg.Network.ProxyURL = proxyURL
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -160,7 +161,7 @@ func testConfigWithNetwork(domains ...string) *config.Config {
|
||||
// Sandbox Execution Helpers
|
||||
// ============================================================================
|
||||
|
||||
// runUnderSandbox executes a command under the fence sandbox.
|
||||
// runUnderSandbox executes a command under the greywall sandbox.
|
||||
// This uses the sandbox Manager directly for integration testing.
|
||||
func runUnderSandbox(t *testing.T, cfg *config.Config, command string, workDir string) *SandboxTestResult {
|
||||
t.Helper()
|
||||
@@ -244,7 +245,7 @@ func executeShellCommandWithTimeout(t *testing.T, command string, workDir string
|
||||
shell = "/bin/bash"
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, shell, "-c", command)
|
||||
cmd := exec.CommandContext(ctx, shell, "-c", command) //nolint:gosec // test helper running shell commands
|
||||
cmd.Dir = workDir
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
@@ -284,7 +285,7 @@ func executeShellCommandWithTimeout(t *testing.T, command string, workDir string
|
||||
// createTempWorkspace creates a temporary directory for testing.
|
||||
func createTempWorkspace(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir, err := os.MkdirTemp("", "fence-test-*")
|
||||
dir, err := os.MkdirTemp("", "greywall-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp workspace: %v", err)
|
||||
}
|
||||
@@ -495,10 +496,10 @@ func TestIntegration_EnvWorks(t *testing.T) {
|
||||
workspace := createTempWorkspace(t)
|
||||
cfg := testConfigWithWorkspace(workspace)
|
||||
|
||||
result := runUnderSandbox(t, cfg, "env | grep FENCE", workspace)
|
||||
result := runUnderSandbox(t, cfg, "env | grep GREYWALL", workspace)
|
||||
|
||||
assertAllowed(t, result)
|
||||
assertContains(t, result.Stdout, "FENCE_SANDBOX=1")
|
||||
assertContains(t, result.Stdout, "GREYWALL_SANDBOX=1")
|
||||
}
|
||||
|
||||
func TestExecuteShellCommandBwrapError(t *testing.T) {
|
||||
|
||||
453
internal/sandbox/learning.go
Normal file
453
internal/sandbox/learning.go
Normal file
@@ -0,0 +1,453 @@
|
||||
package sandbox
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TraceResult holds parsed read and write paths from a system trace log
|
||||
// (strace on Linux, eslogger on macOS).
|
||||
type TraceResult struct {
|
||||
WritePaths []string
|
||||
ReadPaths []string
|
||||
}
|
||||
|
||||
// wellKnownParents are directories under $HOME where applications typically
|
||||
// create their own subdirectory (e.g., ~/.cache/opencode, ~/.config/opencode).
|
||||
var wellKnownParents = []string{
|
||||
".cache",
|
||||
".config",
|
||||
".local/share",
|
||||
".local/state",
|
||||
".local/lib",
|
||||
".data",
|
||||
}
|
||||
|
||||
// LearnedTemplateDir returns the directory where learned templates are stored.
|
||||
func LearnedTemplateDir() string {
|
||||
configDir, err := os.UserConfigDir()
|
||||
if err != nil {
|
||||
home, _ := os.UserHomeDir()
|
||||
return filepath.Join(home, ".config", "greywall", "learned")
|
||||
}
|
||||
return filepath.Join(configDir, "greywall", "learned")
|
||||
}
|
||||
|
||||
// LearnedTemplatePath returns the path where a command's learned template is stored.
|
||||
func LearnedTemplatePath(cmdName string) string {
|
||||
return filepath.Join(LearnedTemplateDir(), SanitizeTemplateName(cmdName)+".json")
|
||||
}
|
||||
|
||||
// SanitizeTemplateName sanitizes a command name for use as a filename.
|
||||
// Only allows alphanumeric, dash, underscore, and dot characters.
|
||||
func SanitizeTemplateName(name string) string {
|
||||
re := regexp.MustCompile(`[^a-zA-Z0-9._-]`)
|
||||
sanitized := re.ReplaceAllString(name, "_")
|
||||
// Collapse multiple underscores
|
||||
for strings.Contains(sanitized, "__") {
|
||||
sanitized = strings.ReplaceAll(sanitized, "__", "_")
|
||||
}
|
||||
sanitized = strings.Trim(sanitized, "_.")
|
||||
if sanitized == "" {
|
||||
return "unknown"
|
||||
}
|
||||
return sanitized
|
||||
}
|
||||
|
||||
// GenerateLearnedTemplate takes a parsed trace result, collapses paths, and saves a template.
|
||||
// Returns the path where the template was saved.
|
||||
func GenerateLearnedTemplate(result *TraceResult, cmdName string, debug bool) (string, error) {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
// Filter write paths: remove default writable and sensitive paths
|
||||
var filteredWrites []string
|
||||
for _, p := range result.WritePaths {
|
||||
if isDefaultWritablePath(p) {
|
||||
continue
|
||||
}
|
||||
if isSensitivePath(p, home) {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Skipping sensitive path: %s\n", p)
|
||||
}
|
||||
continue
|
||||
}
|
||||
filteredWrites = append(filteredWrites, p)
|
||||
}
|
||||
|
||||
// Collapse write paths into minimal directory set
|
||||
collapsed := CollapsePaths(filteredWrites)
|
||||
|
||||
// Convert write paths to tilde-relative
|
||||
var allowWrite []string
|
||||
allowWrite = append(allowWrite, ".") // Always include cwd
|
||||
for _, p := range collapsed {
|
||||
allowWrite = append(allowWrite, toTildePath(p, home))
|
||||
}
|
||||
|
||||
// Filter read paths: remove system defaults, CWD subtree, and sensitive paths
|
||||
cwd, _ := os.Getwd()
|
||||
var filteredReads []string
|
||||
defaultReadable := GetDefaultReadablePaths()
|
||||
for _, p := range result.ReadPaths {
|
||||
// Skip system defaults
|
||||
isDefault := false
|
||||
for _, dp := range defaultReadable {
|
||||
if p == dp || strings.HasPrefix(p, dp+"/") {
|
||||
isDefault = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isDefault {
|
||||
continue
|
||||
}
|
||||
// Skip CWD subtree (auto-included)
|
||||
if cwd != "" && (p == cwd || strings.HasPrefix(p, cwd+"/")) {
|
||||
continue
|
||||
}
|
||||
// Skip sensitive paths
|
||||
if isSensitivePath(p, home) {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Skipping sensitive read path: %s\n", p)
|
||||
}
|
||||
continue
|
||||
}
|
||||
filteredReads = append(filteredReads, p)
|
||||
}
|
||||
|
||||
// Collapse read paths and convert to tilde-relative
|
||||
collapsedReads := CollapsePaths(filteredReads)
|
||||
var allowRead []string
|
||||
for _, p := range collapsedReads {
|
||||
allowRead = append(allowRead, toTildePath(p, home))
|
||||
}
|
||||
|
||||
// Convert read paths to tilde-relative for display
|
||||
var readDisplay []string
|
||||
for _, p := range result.ReadPaths {
|
||||
readDisplay = append(readDisplay, toTildePath(p, home))
|
||||
}
|
||||
|
||||
// Print all discovered paths
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
|
||||
if len(readDisplay) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Discovered read paths:\n")
|
||||
for _, p := range readDisplay {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] %s\n", p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(allowRead) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Additional read paths (beyond system + CWD):\n")
|
||||
for _, p := range allowRead {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] %s\n", p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(allowWrite) > 1 { // >1 because "." is always included
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Discovered write paths (collapsed):\n")
|
||||
for _, p := range allowWrite {
|
||||
if p == "." {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "[greywall] %s\n", p)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] No additional write paths discovered.\n")
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
|
||||
// Build template
|
||||
template := buildTemplate(cmdName, allowRead, allowWrite)
|
||||
|
||||
// Save template
|
||||
templatePath := LearnedTemplatePath(cmdName)
|
||||
if err := os.MkdirAll(filepath.Dir(templatePath), 0o750); err != nil {
|
||||
return "", fmt.Errorf("failed to create template directory: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(templatePath, []byte(template), 0o600); err != nil {
|
||||
return "", fmt.Errorf("failed to write template: %w", err)
|
||||
}
|
||||
|
||||
// Display the template content
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Generated template:\n")
|
||||
for _, line := range strings.Split(template, "\n") {
|
||||
if line != "" {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] %s\n", line)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
|
||||
return templatePath, nil
|
||||
}
|
||||
|
||||
// CollapsePaths groups write paths into minimal directory set.
|
||||
// Uses "application directory" detection for well-known parents.
|
||||
func CollapsePaths(paths []string) []string {
|
||||
if len(paths) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
// Group paths by application directory
|
||||
appDirPaths := make(map[string][]string) // appDir -> list of paths
|
||||
var standalone []string // paths that don't fit an app dir
|
||||
|
||||
for _, p := range paths {
|
||||
appDir := findApplicationDirectory(p, home)
|
||||
if appDir != "" {
|
||||
appDirPaths[appDir] = append(appDirPaths[appDir], p)
|
||||
} else {
|
||||
standalone = append(standalone, p)
|
||||
}
|
||||
}
|
||||
|
||||
var result []string
|
||||
|
||||
// For each app dir group: if 2+ paths share it, use the app dir
|
||||
// If only 1 path, use its parent directory
|
||||
for appDir, groupPaths := range appDirPaths {
|
||||
if len(groupPaths) >= 2 {
|
||||
result = append(result, appDir)
|
||||
} else {
|
||||
result = append(result, filepath.Dir(groupPaths[0]))
|
||||
}
|
||||
}
|
||||
|
||||
// For standalone paths, use their parent directory — but never collapse to $HOME
|
||||
for _, p := range standalone {
|
||||
parent := filepath.Dir(p)
|
||||
if parent == home {
|
||||
// Keep exact file path to avoid opening entire home directory
|
||||
result = append(result, p)
|
||||
} else {
|
||||
result = append(result, parent)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort, remove exact duplicates, then remove sub-paths of other paths
|
||||
sort.Strings(result)
|
||||
result = removeDuplicates(result)
|
||||
result = deduplicateSubPaths(result)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// findApplicationDirectory finds the app-level directory for a path.
|
||||
// For paths under well-known parents (e.g., ~/.cache/opencode/foo),
|
||||
// returns the first directory below the well-known parent (e.g., ~/.cache/opencode).
|
||||
func findApplicationDirectory(path, home string) string {
|
||||
if home == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, parent := range wellKnownParents {
|
||||
prefix := filepath.Join(home, parent) + "/"
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
// Get the first directory below the well-known parent
|
||||
rest := strings.TrimPrefix(path, prefix)
|
||||
parts := strings.SplitN(rest, "/", 2)
|
||||
if len(parts) > 0 && parts[0] != "" {
|
||||
return filepath.Join(home, parent, parts[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// isDefaultWritablePath checks if a path is already writable by default in the sandbox.
|
||||
func isDefaultWritablePath(path string) bool {
|
||||
// /tmp is always writable (tmpfs in sandbox)
|
||||
if strings.HasPrefix(path, "/tmp/") || path == "/tmp" {
|
||||
return false // /tmp inside sandbox is tmpfs, not host /tmp
|
||||
}
|
||||
|
||||
for _, p := range GetDefaultWritePaths() {
|
||||
if path == p || strings.HasPrefix(path, p+"/") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// isSensitivePath checks if a path is sensitive and should not be made writable.
|
||||
func isSensitivePath(path, home string) bool {
|
||||
if home == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check against DangerousFiles
|
||||
for _, f := range DangerousFiles {
|
||||
dangerous := filepath.Join(home, f)
|
||||
if path == dangerous {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check for .env files
|
||||
base := filepath.Base(path)
|
||||
if base == ".env" || strings.HasPrefix(base, ".env.") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check SSH keys
|
||||
sshDir := filepath.Join(home, ".ssh")
|
||||
if strings.HasPrefix(path, sshDir+"/") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check GPG
|
||||
gnupgDir := filepath.Join(home, ".gnupg")
|
||||
return strings.HasPrefix(path, gnupgDir+"/")
|
||||
}
|
||||
|
||||
// getDangerousFilePatterns returns denyWrite entries for DangerousFiles.
|
||||
func getDangerousFilePatterns() []string {
|
||||
var patterns []string
|
||||
for _, f := range DangerousFiles {
|
||||
patterns = append(patterns, "~/"+f)
|
||||
}
|
||||
return patterns
|
||||
}
|
||||
|
||||
// getSensitiveReadPatterns returns denyRead entries for sensitive data.
|
||||
func getSensitiveReadPatterns() []string {
|
||||
return []string{
|
||||
"~/.ssh/id_*",
|
||||
"~/.gnupg/**",
|
||||
}
|
||||
}
|
||||
|
||||
// toTildePath converts an absolute path to a tilde-relative path if under home.
|
||||
func toTildePath(p, home string) string {
|
||||
if home != "" && strings.HasPrefix(p, home+"/") {
|
||||
return "~/" + strings.TrimPrefix(p, home+"/")
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// LearnedTemplateInfo holds metadata about a learned template.
|
||||
type LearnedTemplateInfo struct {
|
||||
Name string // template name (without .json)
|
||||
Path string // full path to the template file
|
||||
}
|
||||
|
||||
// ListLearnedTemplates returns all available learned templates.
|
||||
func ListLearnedTemplates() ([]LearnedTemplateInfo, error) {
|
||||
dir := LearnedTemplateDir()
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var templates []LearnedTemplateInfo
|
||||
for _, e := range entries {
|
||||
if e.IsDir() || !strings.HasSuffix(e.Name(), ".json") {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimSuffix(e.Name(), ".json")
|
||||
templates = append(templates, LearnedTemplateInfo{
|
||||
Name: name,
|
||||
Path: filepath.Join(dir, e.Name()),
|
||||
})
|
||||
}
|
||||
return templates, nil
|
||||
}
|
||||
|
||||
// removeDuplicates removes exact duplicate strings from a sorted slice.
|
||||
func removeDuplicates(paths []string) []string {
|
||||
if len(paths) <= 1 {
|
||||
return paths
|
||||
}
|
||||
result := []string{paths[0]}
|
||||
for i := 1; i < len(paths); i++ {
|
||||
if paths[i] != paths[i-1] {
|
||||
result = append(result, paths[i])
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// deduplicateSubPaths removes paths that are sub-paths of other paths in the list.
|
||||
// Assumes the input is sorted.
|
||||
func deduplicateSubPaths(paths []string) []string {
|
||||
if len(paths) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result []string
|
||||
for i, p := range paths {
|
||||
isSubPath := false
|
||||
for j, other := range paths {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(p, other+"/") {
|
||||
isSubPath = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isSubPath {
|
||||
result = append(result, p)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// getSensitiveProjectDenyPatterns returns denyRead entries for sensitive project files.
|
||||
func getSensitiveProjectDenyPatterns() []string {
|
||||
return []string{
|
||||
".env",
|
||||
".env.*",
|
||||
}
|
||||
}
|
||||
|
||||
// buildTemplate generates the JSONC template content for a learned config.
|
||||
func buildTemplate(cmdName string, allowRead, allowWrite []string) string {
|
||||
type fsConfig struct {
|
||||
AllowRead []string `json:"allowRead,omitempty"`
|
||||
AllowWrite []string `json:"allowWrite"`
|
||||
DenyWrite []string `json:"denyWrite"`
|
||||
DenyRead []string `json:"denyRead"`
|
||||
}
|
||||
type templateConfig struct {
|
||||
Filesystem fsConfig `json:"filesystem"`
|
||||
}
|
||||
|
||||
// Combine sensitive read patterns with .env project patterns
|
||||
denyRead := append(getSensitiveReadPatterns(), getSensitiveProjectDenyPatterns()...)
|
||||
|
||||
cfg := templateConfig{
|
||||
Filesystem: fsConfig{
|
||||
AllowRead: allowRead,
|
||||
AllowWrite: allowWrite,
|
||||
DenyWrite: getDangerousFilePatterns(),
|
||||
DenyRead: denyRead,
|
||||
},
|
||||
}
|
||||
|
||||
data, _ := json.MarshalIndent(cfg, "", " ")
|
||||
|
||||
var sb strings.Builder
|
||||
fmt.Fprintf(&sb, "// Learned template for %q\n", cmdName)
|
||||
fmt.Fprintf(&sb, "// Generated by: greywall --learning -- %s\n", cmdName)
|
||||
sb.WriteString("// Review and adjust paths as needed\n")
|
||||
sb.Write(data)
|
||||
sb.WriteString("\n")
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
459
internal/sandbox/learning_darwin.go
Normal file
459
internal/sandbox/learning_darwin.go
Normal file
@@ -0,0 +1,459 @@
|
||||
//go:build darwin
|
||||
|
||||
package sandbox
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/daemon"
|
||||
)
|
||||
|
||||
// opClass classifies a filesystem operation.
|
||||
type opClass int
|
||||
|
||||
const (
|
||||
opSkip opClass = iota
|
||||
opRead
|
||||
opWrite
|
||||
)
|
||||
|
||||
// fwriteFlag is the macOS FWRITE flag value (O_WRONLY or O_RDWR includes this).
|
||||
const fwriteFlag = 0x0002
|
||||
|
||||
// eslogger JSON types — mirrors the real Endpoint Security framework output.
|
||||
// eslogger emits one JSON object per line to stdout.
|
||||
//
|
||||
// Key structural details from real eslogger output:
|
||||
// - event_type is an integer (e.g., 10=open, 11=fork, 13=create, 32=unlink, 33=write, 41=truncate)
|
||||
// - Event data is nested under event.{event_name} (e.g., event.open, event.fork)
|
||||
// - write/unlink/truncate use "target" not "file"
|
||||
// - create uses destination.existing_file
|
||||
// - fork child has full process info including audit_token
|
||||
|
||||
// esloggerEvent is the top-level event from eslogger.
|
||||
type esloggerEvent struct {
|
||||
EventType int `json:"event_type"`
|
||||
Process esloggerProcess `json:"process"`
|
||||
Event map[string]json.RawMessage `json:"event"`
|
||||
}
|
||||
|
||||
type esloggerProcess struct {
|
||||
AuditToken esloggerAuditToken `json:"audit_token"`
|
||||
Executable esloggerExec `json:"executable"`
|
||||
PPID int `json:"ppid"`
|
||||
}
|
||||
|
||||
type esloggerAuditToken struct {
|
||||
PID int `json:"pid"`
|
||||
}
|
||||
|
||||
type esloggerExec struct {
|
||||
Path string `json:"path"`
|
||||
PathTruncated bool `json:"path_truncated"`
|
||||
}
|
||||
|
||||
// Event-specific types.
|
||||
|
||||
type esloggerOpenEvent struct {
|
||||
File esloggerFile `json:"file"`
|
||||
Fflag int `json:"fflag"`
|
||||
}
|
||||
|
||||
type esloggerTargetEvent struct {
|
||||
Target esloggerFile `json:"target"`
|
||||
}
|
||||
|
||||
type esloggerCreateEvent struct {
|
||||
DestinationType int `json:"destination_type"`
|
||||
Destination esloggerCreateDest `json:"destination"`
|
||||
}
|
||||
|
||||
type esloggerCreateDest struct {
|
||||
ExistingFile *esloggerFile `json:"existing_file,omitempty"`
|
||||
NewPath *esloggerNewPath `json:"new_path,omitempty"`
|
||||
}
|
||||
|
||||
type esloggerNewPath struct {
|
||||
Dir esloggerFile `json:"dir"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
type esloggerRenameEvent struct {
|
||||
Source esloggerFile `json:"source"`
|
||||
Destination esloggerFile `json:"destination_new_path"` // TODO: verify actual field name
|
||||
}
|
||||
|
||||
type esloggerForkEvent struct {
|
||||
Child esloggerForkChild `json:"child"`
|
||||
}
|
||||
|
||||
type esloggerForkChild struct {
|
||||
AuditToken esloggerAuditToken `json:"audit_token"`
|
||||
Executable esloggerExec `json:"executable"`
|
||||
PPID int `json:"ppid"`
|
||||
}
|
||||
|
||||
type esloggerLinkEvent struct {
|
||||
Source esloggerFile `json:"source"`
|
||||
TargetDir esloggerFile `json:"target_dir"`
|
||||
}
|
||||
|
||||
type esloggerFile struct {
|
||||
Path string `json:"path"`
|
||||
PathTruncated bool `json:"path_truncated"`
|
||||
}
|
||||
|
||||
// CheckLearningAvailable verifies that eslogger exists and the daemon is running.
|
||||
func CheckLearningAvailable() error {
|
||||
if _, err := os.Stat("/usr/bin/eslogger"); err != nil {
|
||||
return fmt.Errorf("eslogger not found at /usr/bin/eslogger (requires macOS 13+): %w", err)
|
||||
}
|
||||
|
||||
client := daemon.NewClient(daemon.DefaultSocketPath, false)
|
||||
if !client.IsRunning() {
|
||||
return fmt.Errorf("greywall daemon is not running (required for macOS learning mode)\n\n" +
|
||||
" Install and start: sudo greywall daemon install\n" +
|
||||
" Check status: greywall daemon status")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// eventName extracts the event name string from the event map.
|
||||
// eslogger nests event data under event.{name}, e.g., event.open, event.fork.
|
||||
func eventName(ev *esloggerEvent) string {
|
||||
for key := range ev.Event {
|
||||
return key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ParseEsloggerLog reads an eslogger JSON log, builds the process tree from
|
||||
// fork events starting at rootPID, then filters filesystem events by the PID set.
|
||||
// Uses a two-pass approach: pass 1 scans fork events to build the PID tree,
|
||||
// pass 2 filters filesystem events by the PID set.
|
||||
func ParseEsloggerLog(logPath string, rootPID int, debug bool) (*TraceResult, error) {
|
||||
home, _ := os.UserHomeDir()
|
||||
seenWrite := make(map[string]bool)
|
||||
seenRead := make(map[string]bool)
|
||||
result := &TraceResult{}
|
||||
|
||||
// Pass 1: Build the PID set from fork events.
|
||||
pidSet := map[int]bool{rootPID: true}
|
||||
forkEvents, err := scanForkEvents(logPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// BFS: expand PID set using fork parent→child relationships.
|
||||
// We may need multiple rounds since a child can itself fork.
|
||||
changed := true
|
||||
for changed {
|
||||
changed = false
|
||||
for _, fe := range forkEvents {
|
||||
if pidSet[fe.parentPID] && !pidSet[fe.childPID] {
|
||||
pidSet[fe.childPID] = true
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] eslogger PID tree from root %d: %d PIDs\n", rootPID, len(pidSet))
|
||||
}
|
||||
|
||||
// Pass 2: Scan filesystem events, filter by PID set.
|
||||
f, err := os.Open(logPath) //nolint:gosec // daemon-controlled temp file path
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open eslogger log: %w", err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
scanner.Buffer(make([]byte, 0, 256*1024), 4*1024*1024)
|
||||
|
||||
lineCount := 0
|
||||
matchedLines := 0
|
||||
writeCount := 0
|
||||
readCount := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
lineCount++
|
||||
|
||||
var ev esloggerEvent
|
||||
if err := json.Unmarshal(line, &ev); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
name := eventName(&ev)
|
||||
|
||||
// Skip fork events (already processed in pass 1)
|
||||
if name == "fork" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter by PID set
|
||||
pid := ev.Process.AuditToken.PID
|
||||
if !pidSet[pid] {
|
||||
continue
|
||||
}
|
||||
matchedLines++
|
||||
|
||||
// Extract path and classify operation
|
||||
paths, class := classifyEsloggerEvent(&ev, name)
|
||||
if class == opSkip || len(paths) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
if shouldFilterPathMacOS(path, home) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch class {
|
||||
case opWrite:
|
||||
writeCount++
|
||||
if !seenWrite[path] {
|
||||
seenWrite[path] = true
|
||||
result.WritePaths = append(result.WritePaths, path)
|
||||
}
|
||||
case opRead:
|
||||
readCount++
|
||||
if !seenRead[path] {
|
||||
seenRead[path] = true
|
||||
result.ReadPaths = append(result.ReadPaths, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error reading eslogger log: %w", err)
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Parsed eslogger log: %d lines, %d matched PIDs, %d writes, %d reads, %d unique write paths, %d unique read paths\n",
|
||||
lineCount, matchedLines, writeCount, readCount, len(result.WritePaths), len(result.ReadPaths))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// forkRecord stores a parent→child PID relationship from a fork event.
|
||||
type forkRecord struct {
|
||||
parentPID int
|
||||
childPID int
|
||||
}
|
||||
|
||||
// scanForkEvents reads the log and extracts all fork parent→child PID pairs.
|
||||
func scanForkEvents(logPath string) ([]forkRecord, error) {
|
||||
f, err := os.Open(logPath) //nolint:gosec // daemon-controlled temp file path
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open eslogger log: %w", err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
scanner.Buffer(make([]byte, 0, 256*1024), 4*1024*1024)
|
||||
|
||||
var forks []forkRecord
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
|
||||
// Quick pre-check to avoid parsing non-fork lines.
|
||||
// Fork events have "fork" as a key in the event object.
|
||||
if !strings.Contains(string(line), `"fork"`) {
|
||||
continue
|
||||
}
|
||||
|
||||
var ev esloggerEvent
|
||||
if err := json.Unmarshal(line, &ev); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
forkRaw, ok := ev.Event["fork"]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
var fe esloggerForkEvent
|
||||
if err := json.Unmarshal(forkRaw, &fe); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
forks = append(forks, forkRecord{
|
||||
parentPID: ev.Process.AuditToken.PID,
|
||||
childPID: fe.Child.AuditToken.PID,
|
||||
})
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error reading eslogger log for fork events: %w", err)
|
||||
}
|
||||
|
||||
return forks, nil
|
||||
}
|
||||
|
||||
// classifyEsloggerEvent extracts paths and classifies the operation from an eslogger event.
|
||||
// The event name is the key inside the event map (e.g., "open", "fork", "write").
|
||||
func classifyEsloggerEvent(ev *esloggerEvent, name string) ([]string, opClass) {
|
||||
eventRaw, ok := ev.Event[name]
|
||||
if !ok {
|
||||
return nil, opSkip
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "open":
|
||||
var oe esloggerOpenEvent
|
||||
if err := json.Unmarshal(eventRaw, &oe); err != nil {
|
||||
return nil, opSkip
|
||||
}
|
||||
path := oe.File.Path
|
||||
if path == "" || oe.File.PathTruncated {
|
||||
return nil, opSkip
|
||||
}
|
||||
if oe.Fflag&fwriteFlag != 0 {
|
||||
return []string{path}, opWrite
|
||||
}
|
||||
return []string{path}, opRead
|
||||
|
||||
case "create":
|
||||
var ce esloggerCreateEvent
|
||||
if err := json.Unmarshal(eventRaw, &ce); err != nil {
|
||||
return nil, opSkip
|
||||
}
|
||||
// create events use destination.existing_file or destination.new_path
|
||||
if ce.Destination.ExistingFile != nil {
|
||||
path := ce.Destination.ExistingFile.Path
|
||||
if path != "" && !ce.Destination.ExistingFile.PathTruncated {
|
||||
return []string{path}, opWrite
|
||||
}
|
||||
}
|
||||
if ce.Destination.NewPath != nil {
|
||||
dir := ce.Destination.NewPath.Dir.Path
|
||||
filename := ce.Destination.NewPath.Filename
|
||||
if dir != "" && filename != "" {
|
||||
return []string{dir + "/" + filename}, opWrite
|
||||
}
|
||||
}
|
||||
return nil, opSkip
|
||||
|
||||
case "write", "unlink", "truncate":
|
||||
// These events use "target" not "file"
|
||||
var te esloggerTargetEvent
|
||||
if err := json.Unmarshal(eventRaw, &te); err != nil {
|
||||
return nil, opSkip
|
||||
}
|
||||
path := te.Target.Path
|
||||
if path == "" || te.Target.PathTruncated {
|
||||
return nil, opSkip
|
||||
}
|
||||
return []string{path}, opWrite
|
||||
|
||||
case "rename":
|
||||
var re esloggerRenameEvent
|
||||
if err := json.Unmarshal(eventRaw, &re); err != nil {
|
||||
return nil, opSkip
|
||||
}
|
||||
var paths []string
|
||||
if re.Source.Path != "" && !re.Source.PathTruncated {
|
||||
paths = append(paths, re.Source.Path)
|
||||
}
|
||||
if re.Destination.Path != "" && !re.Destination.PathTruncated {
|
||||
paths = append(paths, re.Destination.Path)
|
||||
}
|
||||
if len(paths) == 0 {
|
||||
return nil, opSkip
|
||||
}
|
||||
return paths, opWrite
|
||||
|
||||
case "link":
|
||||
var le esloggerLinkEvent
|
||||
if err := json.Unmarshal(eventRaw, &le); err != nil {
|
||||
return nil, opSkip
|
||||
}
|
||||
var paths []string
|
||||
if le.Source.Path != "" && !le.Source.PathTruncated {
|
||||
paths = append(paths, le.Source.Path)
|
||||
}
|
||||
if le.TargetDir.Path != "" && !le.TargetDir.PathTruncated {
|
||||
paths = append(paths, le.TargetDir.Path)
|
||||
}
|
||||
if len(paths) == 0 {
|
||||
return nil, opSkip
|
||||
}
|
||||
return paths, opWrite
|
||||
|
||||
default:
|
||||
return nil, opSkip
|
||||
}
|
||||
}
|
||||
|
||||
// shouldFilterPathMacOS returns true if a path should be excluded from macOS learning results.
|
||||
func shouldFilterPathMacOS(path, home string) bool {
|
||||
if path == "" || !strings.HasPrefix(path, "/") {
|
||||
return true
|
||||
}
|
||||
|
||||
// macOS system path prefixes to filter
|
||||
systemPrefixes := []string{
|
||||
"/dev/",
|
||||
"/private/var/run/",
|
||||
"/private/var/db/",
|
||||
"/private/var/folders/",
|
||||
"/System/",
|
||||
"/Library/",
|
||||
"/usr/lib/",
|
||||
"/usr/share/",
|
||||
"/private/etc/",
|
||||
"/tmp/",
|
||||
"/private/tmp/",
|
||||
}
|
||||
for _, prefix := range systemPrefixes {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter .dylib files (macOS shared libraries)
|
||||
if strings.HasSuffix(path, ".dylib") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter greywall infrastructure files
|
||||
if strings.Contains(path, "greywall-") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter paths outside home directory
|
||||
if home != "" && !strings.HasPrefix(path, home+"/") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter exact home directory match
|
||||
if path == home {
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter shell infrastructure directories (PATH lookups, plugin dirs)
|
||||
if home != "" {
|
||||
shellInfraPrefixes := []string{
|
||||
home + "/.antigen/",
|
||||
home + "/.oh-my-zsh/",
|
||||
home + "/.pyenv/shims/",
|
||||
home + "/.bun/bin/",
|
||||
home + "/.local/bin/",
|
||||
}
|
||||
for _, prefix := range shellInfraPrefixes {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
557
internal/sandbox/learning_darwin_test.go
Normal file
557
internal/sandbox/learning_darwin_test.go
Normal file
@@ -0,0 +1,557 @@
|
||||
//go:build darwin
|
||||
|
||||
package sandbox
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// makeEsloggerLine builds a single JSON line matching real eslogger output format.
|
||||
// event_type is an int, and event data is nested under event.{eventName}.
|
||||
func makeEsloggerLine(eventName string, eventTypeInt int, pid int, eventData interface{}) string {
|
||||
eventJSON, _ := json.Marshal(eventData)
|
||||
ev := map[string]interface{}{
|
||||
"event_type": eventTypeInt,
|
||||
"process": map[string]interface{}{
|
||||
"audit_token": map[string]interface{}{
|
||||
"pid": pid,
|
||||
},
|
||||
"executable": map[string]interface{}{
|
||||
"path": "/usr/bin/test",
|
||||
"path_truncated": false,
|
||||
},
|
||||
"ppid": 1,
|
||||
},
|
||||
"event": map[string]json.RawMessage{
|
||||
eventName: json.RawMessage(eventJSON),
|
||||
},
|
||||
}
|
||||
data, _ := json.Marshal(ev)
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func TestClassifyEsloggerEvent(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
eventName string
|
||||
eventData interface{}
|
||||
expectPaths []string
|
||||
expectClass opClass
|
||||
}{
|
||||
{
|
||||
name: "open read-only",
|
||||
eventName: "open",
|
||||
eventData: map[string]interface{}{
|
||||
"file": map[string]interface{}{"path": "/Users/test/file.txt", "path_truncated": false},
|
||||
"fflag": 0x0001, // FREAD only
|
||||
},
|
||||
expectPaths: []string{"/Users/test/file.txt"},
|
||||
expectClass: opRead,
|
||||
},
|
||||
{
|
||||
name: "open with write flag",
|
||||
eventName: "open",
|
||||
eventData: map[string]interface{}{
|
||||
"file": map[string]interface{}{"path": "/Users/test/file.txt", "path_truncated": false},
|
||||
"fflag": 0x0003, // FREAD | FWRITE
|
||||
},
|
||||
expectPaths: []string{"/Users/test/file.txt"},
|
||||
expectClass: opWrite,
|
||||
},
|
||||
{
|
||||
name: "create event with existing_file",
|
||||
eventName: "create",
|
||||
eventData: map[string]interface{}{
|
||||
"destination_type": 0,
|
||||
"destination": map[string]interface{}{
|
||||
"existing_file": map[string]interface{}{"path": "/Users/test/new.txt", "path_truncated": false},
|
||||
},
|
||||
},
|
||||
expectPaths: []string{"/Users/test/new.txt"},
|
||||
expectClass: opWrite,
|
||||
},
|
||||
{
|
||||
name: "write event uses target",
|
||||
eventName: "write",
|
||||
eventData: map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": "/Users/test/data.db", "path_truncated": false},
|
||||
},
|
||||
expectPaths: []string{"/Users/test/data.db"},
|
||||
expectClass: opWrite,
|
||||
},
|
||||
{
|
||||
name: "unlink event uses target",
|
||||
eventName: "unlink",
|
||||
eventData: map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": "/Users/test/old.txt", "path_truncated": false},
|
||||
},
|
||||
expectPaths: []string{"/Users/test/old.txt"},
|
||||
expectClass: opWrite,
|
||||
},
|
||||
{
|
||||
name: "truncate event uses target",
|
||||
eventName: "truncate",
|
||||
eventData: map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": "/Users/test/trunc.log", "path_truncated": false},
|
||||
},
|
||||
expectPaths: []string{"/Users/test/trunc.log"},
|
||||
expectClass: opWrite,
|
||||
},
|
||||
{
|
||||
name: "rename event with source and destination",
|
||||
eventName: "rename",
|
||||
eventData: map[string]interface{}{
|
||||
"source": map[string]interface{}{"path": "/Users/test/old.txt", "path_truncated": false},
|
||||
"destination_new_path": map[string]interface{}{"path": "/Users/test/new.txt", "path_truncated": false},
|
||||
},
|
||||
expectPaths: []string{"/Users/test/old.txt", "/Users/test/new.txt"},
|
||||
expectClass: opWrite,
|
||||
},
|
||||
{
|
||||
name: "truncated path is skipped",
|
||||
eventName: "open",
|
||||
eventData: map[string]interface{}{
|
||||
"file": map[string]interface{}{"path": "/Users/test/very/long/path", "path_truncated": true},
|
||||
"fflag": 0x0001,
|
||||
},
|
||||
expectPaths: nil,
|
||||
expectClass: opSkip,
|
||||
},
|
||||
{
|
||||
name: "empty path is skipped",
|
||||
eventName: "write",
|
||||
eventData: map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": "", "path_truncated": false},
|
||||
},
|
||||
expectPaths: nil,
|
||||
expectClass: opSkip,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
eventJSON, _ := json.Marshal(tt.eventData)
|
||||
ev := &esloggerEvent{
|
||||
EventType: 0,
|
||||
Event: map[string]json.RawMessage{
|
||||
tt.eventName: json.RawMessage(eventJSON),
|
||||
},
|
||||
}
|
||||
|
||||
paths, class := classifyEsloggerEvent(ev, tt.eventName)
|
||||
if class != tt.expectClass {
|
||||
t.Errorf("class = %d, want %d", class, tt.expectClass)
|
||||
}
|
||||
if tt.expectPaths == nil {
|
||||
if len(paths) != 0 {
|
||||
t.Errorf("paths = %v, want nil", paths)
|
||||
}
|
||||
} else {
|
||||
if len(paths) != len(tt.expectPaths) {
|
||||
t.Errorf("paths = %v, want %v", paths, tt.expectPaths)
|
||||
} else {
|
||||
for i, p := range paths {
|
||||
if p != tt.expectPaths[i] {
|
||||
t.Errorf("paths[%d] = %q, want %q", i, p, tt.expectPaths[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEsloggerLog(t *testing.T) {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
// Root PID is 100; it forks child PID 101, which forks grandchild 102.
|
||||
// PID 200 is an unrelated process.
|
||||
lines := []string{
|
||||
// Fork: root (100) -> child (101)
|
||||
makeEsloggerLine("fork", 11, 100, map[string]interface{}{
|
||||
"child": map[string]interface{}{
|
||||
"audit_token": map[string]interface{}{"pid": 101},
|
||||
"executable": map[string]interface{}{"path": "/usr/bin/child", "path_truncated": false},
|
||||
"ppid": 100,
|
||||
},
|
||||
}),
|
||||
// Fork: child (101) -> grandchild (102)
|
||||
makeEsloggerLine("fork", 11, 101, map[string]interface{}{
|
||||
"child": map[string]interface{}{
|
||||
"audit_token": map[string]interface{}{"pid": 102},
|
||||
"executable": map[string]interface{}{"path": "/usr/bin/grandchild", "path_truncated": false},
|
||||
"ppid": 101,
|
||||
},
|
||||
}),
|
||||
// Write by root process (should be included) — write uses "target"
|
||||
makeEsloggerLine("write", 33, 100, map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": filepath.Join(home, ".cache/testapp/db.sqlite"), "path_truncated": false},
|
||||
}),
|
||||
// Create by child (should be included) — create uses destination.existing_file
|
||||
makeEsloggerLine("create", 13, 101, map[string]interface{}{
|
||||
"destination_type": 0,
|
||||
"destination": map[string]interface{}{
|
||||
"existing_file": map[string]interface{}{"path": filepath.Join(home, ".config/testapp/conf.json"), "path_truncated": false},
|
||||
},
|
||||
}),
|
||||
// Open (read-only) by grandchild (should be included as read)
|
||||
makeEsloggerLine("open", 10, 102, map[string]interface{}{
|
||||
"file": map[string]interface{}{"path": filepath.Join(home, ".config/testapp/extra.json"), "path_truncated": false},
|
||||
"fflag": 0x0001,
|
||||
}),
|
||||
// Open (write) by grandchild (should be included as write)
|
||||
makeEsloggerLine("open", 10, 102, map[string]interface{}{
|
||||
"file": map[string]interface{}{"path": filepath.Join(home, ".cache/testapp/version"), "path_truncated": false},
|
||||
"fflag": 0x0003,
|
||||
}),
|
||||
// Write by unrelated PID 200 (should NOT be included)
|
||||
makeEsloggerLine("write", 33, 200, map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": filepath.Join(home, ".cache/otherapp/data"), "path_truncated": false},
|
||||
}),
|
||||
// System path write by root PID (should be filtered)
|
||||
makeEsloggerLine("write", 33, 100, map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": "/dev/null", "path_truncated": false},
|
||||
}),
|
||||
// Unlink by child (should be included) — unlink uses "target"
|
||||
makeEsloggerLine("unlink", 32, 101, map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": filepath.Join(home, ".cache/testapp/old.tmp"), "path_truncated": false},
|
||||
}),
|
||||
}
|
||||
|
||||
logContent := strings.Join(lines, "\n")
|
||||
logFile := filepath.Join(t.TempDir(), "eslogger.log")
|
||||
if err := os.WriteFile(logFile, []byte(logContent), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result, err := ParseEsloggerLog(logFile, 100, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseEsloggerLog() error: %v", err)
|
||||
}
|
||||
|
||||
// Check write paths
|
||||
expectedWrites := map[string]bool{
|
||||
filepath.Join(home, ".cache/testapp/db.sqlite"): false,
|
||||
filepath.Join(home, ".config/testapp/conf.json"): false,
|
||||
filepath.Join(home, ".cache/testapp/version"): false,
|
||||
filepath.Join(home, ".cache/testapp/old.tmp"): false,
|
||||
}
|
||||
for _, p := range result.WritePaths {
|
||||
if _, ok := expectedWrites[p]; ok {
|
||||
expectedWrites[p] = true
|
||||
}
|
||||
}
|
||||
for p, found := range expectedWrites {
|
||||
if !found {
|
||||
t.Errorf("WritePaths missing expected: %q, got: %v", p, result.WritePaths)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that unrelated PID 200 paths were not included
|
||||
for _, p := range result.WritePaths {
|
||||
if strings.Contains(p, "otherapp") {
|
||||
t.Errorf("WritePaths should not contain otherapp path: %q", p)
|
||||
}
|
||||
}
|
||||
|
||||
// Check read paths
|
||||
expectedReads := map[string]bool{
|
||||
filepath.Join(home, ".config/testapp/extra.json"): false,
|
||||
}
|
||||
for _, p := range result.ReadPaths {
|
||||
if _, ok := expectedReads[p]; ok {
|
||||
expectedReads[p] = true
|
||||
}
|
||||
}
|
||||
for p, found := range expectedReads {
|
||||
if !found {
|
||||
t.Errorf("ReadPaths missing expected: %q, got: %v", p, result.ReadPaths)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEsloggerLogForkChaining(t *testing.T) {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
// Test deep fork chains: 100 -> 101 -> 102 -> 103
|
||||
lines := []string{
|
||||
makeEsloggerLine("fork", 11, 100, map[string]interface{}{
|
||||
"child": map[string]interface{}{
|
||||
"audit_token": map[string]interface{}{"pid": 101},
|
||||
"executable": map[string]interface{}{"path": "/bin/sh", "path_truncated": false},
|
||||
"ppid": 100,
|
||||
},
|
||||
}),
|
||||
makeEsloggerLine("fork", 11, 101, map[string]interface{}{
|
||||
"child": map[string]interface{}{
|
||||
"audit_token": map[string]interface{}{"pid": 102},
|
||||
"executable": map[string]interface{}{"path": "/usr/bin/node", "path_truncated": false},
|
||||
"ppid": 101,
|
||||
},
|
||||
}),
|
||||
makeEsloggerLine("fork", 11, 102, map[string]interface{}{
|
||||
"child": map[string]interface{}{
|
||||
"audit_token": map[string]interface{}{"pid": 103},
|
||||
"executable": map[string]interface{}{"path": "/usr/bin/ruby", "path_truncated": false},
|
||||
"ppid": 102,
|
||||
},
|
||||
}),
|
||||
// Write from the deepest child
|
||||
makeEsloggerLine("write", 33, 103, map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": filepath.Join(home, ".cache/app/deep.log"), "path_truncated": false},
|
||||
}),
|
||||
}
|
||||
|
||||
logContent := strings.Join(lines, "\n")
|
||||
logFile := filepath.Join(t.TempDir(), "eslogger.log")
|
||||
if err := os.WriteFile(logFile, []byte(logContent), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result, err := ParseEsloggerLog(logFile, 100, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseEsloggerLog() error: %v", err)
|
||||
}
|
||||
|
||||
// The deep child's write should be included
|
||||
found := false
|
||||
for _, p := range result.WritePaths {
|
||||
if strings.Contains(p, "deep.log") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("WritePaths should include deep child write, got: %v", result.WritePaths)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldFilterPathMacOS(t *testing.T) {
|
||||
home := "/Users/testuser"
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
{"/dev/null", true},
|
||||
{"/private/var/run/syslog", true},
|
||||
{"/private/var/db/something", true},
|
||||
{"/private/var/folders/xx/yy", true},
|
||||
{"/System/Library/Frameworks/foo", true},
|
||||
{"/Library/Preferences/com.apple.foo", true},
|
||||
{"/usr/lib/libSystem.B.dylib", true},
|
||||
{"/usr/share/zoneinfo/UTC", true},
|
||||
{"/private/etc/hosts", true},
|
||||
{"/tmp/somefile", true},
|
||||
{"/private/tmp/somefile", true},
|
||||
{"/usr/local/lib/libfoo.dylib", true}, // .dylib
|
||||
{"/other/user/file", true}, // outside home
|
||||
{"/Users/testuser", true}, // exact home match
|
||||
{"", true}, // empty
|
||||
{"relative/path", true}, // relative
|
||||
{"/Users/testuser/.cache/app/db", false},
|
||||
{"/Users/testuser/project/main.go", false},
|
||||
{"/Users/testuser/.config/app/conf.json", false},
|
||||
{"/tmp/greywall-eslogger-abc.log", true}, // greywall infrastructure
|
||||
{"/Users/testuser/.antigen/bundles/rupa/z/zig", true}, // shell infra
|
||||
{"/Users/testuser/.oh-my-zsh/plugins/git/git.plugin.zsh", true}, // shell infra
|
||||
{"/Users/testuser/.pyenv/shims/ruby", true}, // shell infra
|
||||
{"/Users/testuser/.bun/bin/node", true}, // shell infra
|
||||
{"/Users/testuser/.local/bin/rg", true}, // shell infra
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
got := shouldFilterPathMacOS(tt.path, home)
|
||||
if got != tt.expected {
|
||||
t.Errorf("shouldFilterPathMacOS(%q, %q) = %v, want %v", tt.path, home, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckLearningAvailable(t *testing.T) {
|
||||
err := CheckLearningAvailable()
|
||||
if err != nil {
|
||||
t.Logf("learning not available (expected when daemon not running): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEsloggerLogEmpty(t *testing.T) {
|
||||
logFile := filepath.Join(t.TempDir(), "empty.log")
|
||||
if err := os.WriteFile(logFile, []byte(""), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result, err := ParseEsloggerLog(logFile, 100, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseEsloggerLog() error: %v", err)
|
||||
}
|
||||
|
||||
if len(result.WritePaths) != 0 {
|
||||
t.Errorf("expected 0 write paths, got %d", len(result.WritePaths))
|
||||
}
|
||||
if len(result.ReadPaths) != 0 {
|
||||
t.Errorf("expected 0 read paths, got %d", len(result.ReadPaths))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEsloggerLogMalformedJSON(t *testing.T) {
|
||||
lines := []string{
|
||||
"not valid json at all",
|
||||
"{partial json",
|
||||
makeEsloggerLine("write", 33, 100, map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": "/Users/test/.cache/app/good.txt", "path_truncated": false},
|
||||
}),
|
||||
}
|
||||
|
||||
logContent := strings.Join(lines, "\n")
|
||||
logFile := filepath.Join(t.TempDir(), "malformed.log")
|
||||
if err := os.WriteFile(logFile, []byte(logContent), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Should not error — malformed lines are skipped
|
||||
result, err := ParseEsloggerLog(logFile, 100, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseEsloggerLog() error: %v", err)
|
||||
}
|
||||
_ = result
|
||||
}
|
||||
|
||||
func TestScanForkEvents(t *testing.T) {
|
||||
lines := []string{
|
||||
makeEsloggerLine("fork", 11, 100, map[string]interface{}{
|
||||
"child": map[string]interface{}{
|
||||
"audit_token": map[string]interface{}{"pid": 101},
|
||||
"executable": map[string]interface{}{"path": "/bin/sh", "path_truncated": false},
|
||||
"ppid": 100,
|
||||
},
|
||||
}),
|
||||
makeEsloggerLine("write", 33, 100, map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": "/Users/test/file.txt", "path_truncated": false},
|
||||
}),
|
||||
makeEsloggerLine("fork", 11, 101, map[string]interface{}{
|
||||
"child": map[string]interface{}{
|
||||
"audit_token": map[string]interface{}{"pid": 102},
|
||||
"executable": map[string]interface{}{"path": "/usr/bin/node", "path_truncated": false},
|
||||
"ppid": 101,
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
logContent := strings.Join(lines, "\n")
|
||||
logFile := filepath.Join(t.TempDir(), "forks.log")
|
||||
if err := os.WriteFile(logFile, []byte(logContent), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
forks, err := scanForkEvents(logFile)
|
||||
if err != nil {
|
||||
t.Fatalf("scanForkEvents() error: %v", err)
|
||||
}
|
||||
|
||||
if len(forks) != 2 {
|
||||
t.Fatalf("expected 2 fork records, got %d", len(forks))
|
||||
}
|
||||
|
||||
expected := []forkRecord{
|
||||
{parentPID: 100, childPID: 101},
|
||||
{parentPID: 101, childPID: 102},
|
||||
}
|
||||
for i, f := range forks {
|
||||
if f.parentPID != expected[i].parentPID || f.childPID != expected[i].childPID {
|
||||
t.Errorf("fork[%d] = {parent:%d, child:%d}, want {parent:%d, child:%d}",
|
||||
i, f.parentPID, f.childPID, expected[i].parentPID, expected[i].childPID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFwriteFlag(t *testing.T) {
|
||||
if fwriteFlag != 0x0002 {
|
||||
t.Errorf("fwriteFlag = 0x%04x, want 0x0002", fwriteFlag)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fflag int
|
||||
isWrite bool
|
||||
}{
|
||||
{"FREAD only", 0x0001, false},
|
||||
{"FWRITE only", 0x0002, true},
|
||||
{"FREAD|FWRITE", 0x0003, true},
|
||||
{"FREAD|FWRITE|O_CREAT", 0x0203, true},
|
||||
{"zero", 0x0000, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.fflag&fwriteFlag != 0
|
||||
if got != tt.isWrite {
|
||||
t.Errorf("fflag 0x%04x & FWRITE = %v, want %v", tt.fflag, got, tt.isWrite)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEsloggerLogLink(t *testing.T) {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
lines := []string{
|
||||
makeEsloggerLine("link", 42, 100, map[string]interface{}{
|
||||
"source": map[string]interface{}{"path": filepath.Join(home, ".cache/app/source.txt"), "path_truncated": false},
|
||||
"target_dir": map[string]interface{}{"path": filepath.Join(home, ".cache/app/links"), "path_truncated": false},
|
||||
}),
|
||||
}
|
||||
|
||||
logContent := strings.Join(lines, "\n")
|
||||
logFile := filepath.Join(t.TempDir(), "link.log")
|
||||
if err := os.WriteFile(logFile, []byte(logContent), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result, err := ParseEsloggerLog(logFile, 100, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseEsloggerLog() error: %v", err)
|
||||
}
|
||||
|
||||
expectedWrites := map[string]bool{
|
||||
filepath.Join(home, ".cache/app/source.txt"): false,
|
||||
filepath.Join(home, ".cache/app/links"): false,
|
||||
}
|
||||
for _, p := range result.WritePaths {
|
||||
if _, ok := expectedWrites[p]; ok {
|
||||
expectedWrites[p] = true
|
||||
}
|
||||
}
|
||||
for p, found := range expectedWrites {
|
||||
if !found {
|
||||
t.Errorf("WritePaths missing expected: %q, got: %v", p, result.WritePaths)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEsloggerLogDebugOutput(t *testing.T) {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
lines := []string{
|
||||
makeEsloggerLine("write", 33, 100, map[string]interface{}{
|
||||
"target": map[string]interface{}{"path": filepath.Join(home, ".cache/app/test.txt"), "path_truncated": false},
|
||||
}),
|
||||
}
|
||||
|
||||
logContent := strings.Join(lines, "\n")
|
||||
logFile := filepath.Join(t.TempDir(), "debug.log")
|
||||
if err := os.WriteFile(logFile, []byte(logContent), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Just verify debug=true doesn't panic
|
||||
_, err := ParseEsloggerLog(logFile, 100, true)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseEsloggerLog() with debug=true error: %v", err)
|
||||
}
|
||||
}
|
||||
292
internal/sandbox/learning_linux.go
Normal file
292
internal/sandbox/learning_linux.go
Normal file
@@ -0,0 +1,292 @@
|
||||
//go:build linux
|
||||
|
||||
package sandbox
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// straceSyscallRegex matches strace output lines for file-access syscalls.
|
||||
var straceSyscallRegex = regexp.MustCompile(
|
||||
`(openat|mkdirat|unlinkat|renameat2|creat|symlinkat|linkat)\(`,
|
||||
)
|
||||
|
||||
// openatWriteFlags matches O_WRONLY, O_RDWR, O_CREAT, O_TRUNC, O_APPEND flags in strace output.
|
||||
var openatWriteFlags = regexp.MustCompile(`O_(?:WRONLY|RDWR|CREAT|TRUNC|APPEND)`)
|
||||
|
||||
// CheckLearningAvailable verifies that strace is installed and accessible.
|
||||
func CheckLearningAvailable() error {
|
||||
_, err := exec.LookPath("strace")
|
||||
if err != nil {
|
||||
return fmt.Errorf("strace is required for learning mode but not found: %w\n\nInstall it with: sudo apt install strace (Debian/Ubuntu) or sudo pacman -S strace (Arch)", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseStraceLog reads an strace output file and extracts unique read and write paths.
|
||||
func ParseStraceLog(logPath string, debug bool) (*TraceResult, error) {
|
||||
f, err := os.Open(logPath) //nolint:gosec // user-controlled path from temp file - intentional
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open strace log: %w", err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
home, _ := os.UserHomeDir()
|
||||
seenWrite := make(map[string]bool)
|
||||
seenRead := make(map[string]bool)
|
||||
result := &TraceResult{}
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
// Increase buffer for long strace lines
|
||||
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024)
|
||||
|
||||
lineCount := 0
|
||||
writeCount := 0
|
||||
readCount := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
lineCount++
|
||||
|
||||
// Try extracting as a write path first
|
||||
writePath := extractWritePath(line)
|
||||
if writePath != "" {
|
||||
writeCount++
|
||||
if !shouldFilterPath(writePath, home) && !seenWrite[writePath] {
|
||||
seenWrite[writePath] = true
|
||||
result.WritePaths = append(result.WritePaths, writePath)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Try extracting as a read path
|
||||
readPath := extractReadPath(line)
|
||||
if readPath != "" {
|
||||
readCount++
|
||||
if !shouldFilterPath(readPath, home) && !seenRead[readPath] {
|
||||
seenRead[readPath] = true
|
||||
result.ReadPaths = append(result.ReadPaths, readPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error reading strace log: %w", err)
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall] Parsed strace log: %d lines, %d write syscalls, %d read syscalls, %d unique write paths, %d unique read paths\n",
|
||||
lineCount, writeCount, readCount, len(result.WritePaths), len(result.ReadPaths))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// extractReadPath parses a single strace line and returns the read path, if any.
|
||||
// Only matches openat() with O_RDONLY (no write flags).
|
||||
func extractReadPath(line string) string {
|
||||
if !strings.Contains(line, "openat(") {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Skip failed syscalls
|
||||
if strings.Contains(line, "= -1 ") {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Skip resumed/unfinished lines
|
||||
if strings.Contains(line, "<unfinished") || strings.Contains(line, "resumed>") {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Only care about read-only opens (no write flags)
|
||||
if openatWriteFlags.MatchString(line) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Skip directory opens (O_DIRECTORY) — these are just directory traversal
|
||||
// (readdir/stat), not meaningful file reads
|
||||
if strings.Contains(line, "O_DIRECTORY") {
|
||||
return ""
|
||||
}
|
||||
|
||||
return extractATPath(line)
|
||||
}
|
||||
|
||||
// extractWritePath parses a single strace line and returns the write target path, if any.
|
||||
func extractWritePath(line string) string {
|
||||
// Skip lines that don't contain write syscalls
|
||||
if !straceSyscallRegex.MatchString(line) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Skip failed syscalls (lines ending with = -1 ENOENT or similar errors)
|
||||
if strings.Contains(line, "= -1 ") {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Skip resumed/unfinished lines
|
||||
if strings.Contains(line, "<unfinished") || strings.Contains(line, "resumed>") {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Extract path based on syscall type
|
||||
if strings.Contains(line, "openat(") {
|
||||
return extractOpenatPath(line)
|
||||
}
|
||||
if strings.Contains(line, "mkdirat(") {
|
||||
return extractATPath(line)
|
||||
}
|
||||
if strings.Contains(line, "unlinkat(") {
|
||||
return extractATPath(line)
|
||||
}
|
||||
if strings.Contains(line, "renameat2(") {
|
||||
return extractRenameatPath(line)
|
||||
}
|
||||
if strings.Contains(line, "creat(") {
|
||||
return extractCreatPath(line)
|
||||
}
|
||||
if strings.Contains(line, "symlinkat(") {
|
||||
return extractSymlinkTarget(line)
|
||||
}
|
||||
if strings.Contains(line, "linkat(") {
|
||||
return extractLinkatTarget(line)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractOpenatPath extracts the path from an openat() line, only if write flags are present.
|
||||
func extractOpenatPath(line string) string {
|
||||
// Only care about writes
|
||||
if !openatWriteFlags.MatchString(line) {
|
||||
return ""
|
||||
}
|
||||
return extractATPath(line)
|
||||
}
|
||||
|
||||
// extractATPath extracts the second argument (path) from AT_FDCWD-based syscalls.
|
||||
// Pattern: syscall(AT_FDCWD, "/path/to/file", ...)
|
||||
func extractATPath(line string) string {
|
||||
// Find the first quoted string after AT_FDCWD
|
||||
idx := strings.Index(line, "AT_FDCWD, \"")
|
||||
if idx < 0 {
|
||||
return ""
|
||||
}
|
||||
start := idx + len("AT_FDCWD, \"")
|
||||
end := strings.Index(line[start:], "\"")
|
||||
if end < 0 {
|
||||
return ""
|
||||
}
|
||||
return line[start : start+end]
|
||||
}
|
||||
|
||||
// extractCreatPath extracts the path from a creat() call.
|
||||
// Pattern: creat("/path/to/file", mode)
|
||||
func extractCreatPath(line string) string {
|
||||
idx := strings.Index(line, "creat(\"")
|
||||
if idx < 0 {
|
||||
return ""
|
||||
}
|
||||
start := idx + len("creat(\"")
|
||||
end := strings.Index(line[start:], "\"")
|
||||
if end < 0 {
|
||||
return ""
|
||||
}
|
||||
return line[start : start+end]
|
||||
}
|
||||
|
||||
// extractRenameatPath extracts the destination path from renameat2().
|
||||
// Pattern: renameat2(AT_FDCWD, "/old", AT_FDCWD, "/new", flags)
|
||||
// We want both old and new paths, but primarily the new (destination) path.
|
||||
func extractRenameatPath(line string) string {
|
||||
// Find the second AT_FDCWD occurrence for the destination
|
||||
first := strings.Index(line, "AT_FDCWD, \"")
|
||||
if first < 0 {
|
||||
return ""
|
||||
}
|
||||
rest := line[first+len("AT_FDCWD, \""):]
|
||||
endFirst := strings.Index(rest, "\"")
|
||||
if endFirst < 0 {
|
||||
return ""
|
||||
}
|
||||
rest = rest[endFirst+1:]
|
||||
|
||||
// Find second AT_FDCWD
|
||||
second := strings.Index(rest, "AT_FDCWD, \"")
|
||||
if second < 0 {
|
||||
// Fall back to first path
|
||||
return extractATPath(line)
|
||||
}
|
||||
start := second + len("AT_FDCWD, \"")
|
||||
end := strings.Index(rest[start:], "\"")
|
||||
if end < 0 {
|
||||
return extractATPath(line)
|
||||
}
|
||||
return rest[start : start+end]
|
||||
}
|
||||
|
||||
// extractSymlinkTarget extracts the link path (destination) from symlinkat().
|
||||
// Pattern: symlinkat("/target", AT_FDCWD, "/link")
|
||||
func extractSymlinkTarget(line string) string {
|
||||
// The link path is the third argument (after AT_FDCWD)
|
||||
return extractATPath(line)
|
||||
}
|
||||
|
||||
// extractLinkatTarget extracts the new link path from linkat().
|
||||
// Pattern: linkat(AT_FDCWD, "/old", AT_FDCWD, "/new", flags)
|
||||
func extractLinkatTarget(line string) string {
|
||||
return extractRenameatPath(line)
|
||||
}
|
||||
|
||||
// shouldFilterPath returns true if a path should be excluded from learning results.
|
||||
func shouldFilterPath(path, home string) bool {
|
||||
// Filter empty or relative paths
|
||||
if path == "" || !strings.HasPrefix(path, "/") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter system paths
|
||||
systemPrefixes := []string{
|
||||
"/proc/",
|
||||
"/sys/",
|
||||
"/dev/",
|
||||
"/run/",
|
||||
"/var/run/",
|
||||
"/var/lock/",
|
||||
}
|
||||
for _, prefix := range systemPrefixes {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter /tmp (sandbox has its own tmpfs)
|
||||
if strings.HasPrefix(path, "/tmp/") || path == "/tmp" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter shared object files (.so, .so.*)
|
||||
base := filepath.Base(path)
|
||||
if strings.HasSuffix(base, ".so") || strings.Contains(base, ".so.") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter greywall infrastructure files
|
||||
if strings.Contains(path, "greywall-") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter paths outside home (they're typically system-level)
|
||||
if home != "" && !strings.HasPrefix(path, home+"/") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
243
internal/sandbox/learning_linux_test.go
Normal file
243
internal/sandbox/learning_linux_test.go
Normal file
@@ -0,0 +1,243 @@
|
||||
//go:build linux
|
||||
|
||||
package sandbox
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExtractWritePath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
line string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "openat with O_WRONLY",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user/.cache/opencode/db", O_WRONLY|O_CREAT, 0644) = 3`,
|
||||
expected: "/home/user/.cache/opencode/db",
|
||||
},
|
||||
{
|
||||
name: "openat with O_RDWR",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user/.cache/opencode/data", O_RDWR|O_CREAT, 0644) = 3`,
|
||||
expected: "/home/user/.cache/opencode/data",
|
||||
},
|
||||
{
|
||||
name: "openat with O_CREAT",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user/file.txt", O_CREAT|O_WRONLY, 0644) = 3`,
|
||||
expected: "/home/user/file.txt",
|
||||
},
|
||||
{
|
||||
name: "openat read-only ignored",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user/readme.txt", O_RDONLY) = 3`,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "mkdirat",
|
||||
line: `12345 mkdirat(AT_FDCWD, "/home/user/.cache/opencode", 0755) = 0`,
|
||||
expected: "/home/user/.cache/opencode",
|
||||
},
|
||||
{
|
||||
name: "unlinkat",
|
||||
line: `12345 unlinkat(AT_FDCWD, "/home/user/temp.txt", 0) = 0`,
|
||||
expected: "/home/user/temp.txt",
|
||||
},
|
||||
{
|
||||
name: "creat",
|
||||
line: `12345 creat("/home/user/newfile", 0644) = 3`,
|
||||
expected: "/home/user/newfile",
|
||||
},
|
||||
{
|
||||
name: "failed syscall ignored",
|
||||
line: `12345 openat(AT_FDCWD, "/nonexistent", O_WRONLY|O_CREAT, 0644) = -1 ENOENT (No such file or directory)`,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "unfinished syscall ignored",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user/file", O_WRONLY <unfinished ...>`,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "non-write syscall ignored",
|
||||
line: `12345 read(3, "data", 1024) = 5`,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "renameat2 returns destination",
|
||||
line: `12345 renameat2(AT_FDCWD, "/home/user/old.txt", AT_FDCWD, "/home/user/new.txt", 0) = 0`,
|
||||
expected: "/home/user/new.txt",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := extractWritePath(tt.line)
|
||||
if got != tt.expected {
|
||||
t.Errorf("extractWritePath(%q) = %q, want %q", tt.line, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldFilterPath(t *testing.T) {
|
||||
home := "/home/testuser"
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
{"/proc/self/maps", true},
|
||||
{"/sys/kernel/mm/transparent_hugepage", true},
|
||||
{"/dev/null", true},
|
||||
{"/tmp/somefile", true},
|
||||
{"/run/user/1000/bus", true},
|
||||
{"/home/testuser/.cache/opencode/db", false},
|
||||
{"/usr/lib/libfoo.so", true}, // .so file
|
||||
{"/usr/lib/libfoo.so.1", true}, // .so.X file
|
||||
{"/tmp/greywall-strace-abc.log", true}, // greywall infrastructure
|
||||
{"relative/path", true}, // relative path
|
||||
{"", true}, // empty path
|
||||
{"/other/user/file", true}, // outside home
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
got := shouldFilterPath(tt.path, home)
|
||||
if got != tt.expected {
|
||||
t.Errorf("shouldFilterPath(%q, %q) = %v, want %v", tt.path, home, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseStraceLog(t *testing.T) {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
logContent := strings.Join([]string{
|
||||
`12345 openat(AT_FDCWD, "` + filepath.Join(home, ".cache/testapp/db") + `", O_WRONLY|O_CREAT, 0644) = 3`,
|
||||
`12345 openat(AT_FDCWD, "` + filepath.Join(home, ".cache/testapp/ver") + `", O_WRONLY, 0644) = 4`,
|
||||
`12345 openat(AT_FDCWD, "` + filepath.Join(home, ".config/testapp/conf.json") + `", O_RDONLY) = 5`,
|
||||
`12345 openat(AT_FDCWD, "/etc/hostname", O_RDONLY) = 6`,
|
||||
`12345 mkdirat(AT_FDCWD, "` + filepath.Join(home, ".config/testapp") + `", 0755) = 0`,
|
||||
`12345 openat(AT_FDCWD, "/tmp/somefile", O_WRONLY|O_CREAT, 0644) = 7`,
|
||||
`12345 openat(AT_FDCWD, "/proc/self/maps", O_RDONLY) = 8`,
|
||||
`12345 openat(AT_FDCWD, "` + filepath.Join(home, ".cache/testapp/db") + `", O_WRONLY, 0644) = 9`, // duplicate
|
||||
}, "\n")
|
||||
|
||||
logFile := filepath.Join(t.TempDir(), "strace.log")
|
||||
if err := os.WriteFile(logFile, []byte(logContent), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result, err := ParseStraceLog(logFile, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseStraceLog() error: %v", err)
|
||||
}
|
||||
|
||||
// Write paths: should have unique home paths only (no /tmp, /proc)
|
||||
for _, p := range result.WritePaths {
|
||||
if !strings.HasPrefix(p, home+"/") {
|
||||
t.Errorf("WritePaths returned path outside home: %q", p)
|
||||
}
|
||||
}
|
||||
|
||||
// Should not have duplicates in write paths
|
||||
seen := make(map[string]bool)
|
||||
for _, p := range result.WritePaths {
|
||||
if seen[p] {
|
||||
t.Errorf("WritePaths returned duplicate: %q", p)
|
||||
}
|
||||
seen[p] = true
|
||||
}
|
||||
|
||||
// Should have the expected write paths
|
||||
expectedWrites := map[string]bool{
|
||||
filepath.Join(home, ".cache/testapp/db"): false,
|
||||
filepath.Join(home, ".cache/testapp/ver"): false,
|
||||
filepath.Join(home, ".config/testapp"): false,
|
||||
}
|
||||
for _, p := range result.WritePaths {
|
||||
if _, ok := expectedWrites[p]; ok {
|
||||
expectedWrites[p] = true
|
||||
}
|
||||
}
|
||||
for p, found := range expectedWrites {
|
||||
if !found {
|
||||
t.Errorf("WritePaths missing expected path: %q, got: %v", p, result.WritePaths)
|
||||
}
|
||||
}
|
||||
|
||||
// Should have the expected read paths (only home paths, not /etc or /proc)
|
||||
expectedRead := filepath.Join(home, ".config/testapp/conf.json")
|
||||
foundRead := false
|
||||
for _, p := range result.ReadPaths {
|
||||
if p == expectedRead {
|
||||
foundRead = true
|
||||
}
|
||||
if !strings.HasPrefix(p, home+"/") {
|
||||
t.Errorf("ReadPaths returned path outside home: %q", p)
|
||||
}
|
||||
}
|
||||
if !foundRead {
|
||||
t.Errorf("ReadPaths missing expected path: %q, got: %v", expectedRead, result.ReadPaths)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractReadPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
line string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "openat with O_RDONLY",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user/.config/app/conf", O_RDONLY) = 3`,
|
||||
expected: "/home/user/.config/app/conf",
|
||||
},
|
||||
{
|
||||
name: "openat with write flags ignored",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user/file", O_WRONLY|O_CREAT, 0644) = 3`,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "non-openat ignored",
|
||||
line: `12345 read(3, "data", 1024) = 5`,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "failed openat ignored",
|
||||
line: `12345 openat(AT_FDCWD, "/nonexistent", O_RDONLY) = -1 ENOENT (No such file or directory)`,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "directory open ignored",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user", O_RDONLY|O_DIRECTORY) = 3`,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "directory open with cloexec ignored",
|
||||
line: `12345 openat(AT_FDCWD, "/home/user/.cache", O_RDONLY|O_CLOEXEC|O_DIRECTORY) = 4`,
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := extractReadPath(tt.line)
|
||||
if got != tt.expected {
|
||||
t.Errorf("extractReadPath(%q) = %q, want %q", tt.line, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckLearningAvailable(t *testing.T) {
|
||||
// This test just verifies the function doesn't panic.
|
||||
// The result depends on whether strace is installed on the test system.
|
||||
err := CheckLearningAvailable()
|
||||
if err != nil {
|
||||
t.Logf("strace not available (expected in some CI environments): %v", err)
|
||||
}
|
||||
}
|
||||
10
internal/sandbox/learning_stub.go
Normal file
10
internal/sandbox/learning_stub.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build !linux && !darwin
|
||||
|
||||
package sandbox
|
||||
|
||||
import "fmt"
|
||||
|
||||
// CheckLearningAvailable returns an error on unsupported platforms.
|
||||
func CheckLearningAvailable() error {
|
||||
return fmt.Errorf("learning mode is only available on Linux (requires strace) and macOS (requires eslogger + daemon)")
|
||||
}
|
||||
460
internal/sandbox/learning_test.go
Normal file
460
internal/sandbox/learning_test.go
Normal file
@@ -0,0 +1,460 @@
|
||||
package sandbox
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSanitizeTemplateName(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"opencode", "opencode"},
|
||||
{"my-app", "my-app"},
|
||||
{"my_app", "my_app"},
|
||||
{"my.app", "my.app"},
|
||||
{"my app", "my_app"},
|
||||
{"/usr/bin/opencode", "usr_bin_opencode"},
|
||||
{"my@app!v2", "my_app_v2"},
|
||||
{"", "unknown"},
|
||||
{"///", "unknown"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
got := SanitizeTemplateName(tt.input)
|
||||
if got != tt.expected {
|
||||
t.Errorf("SanitizeTemplateName(%q) = %q, want %q", tt.input, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLearnedTemplatePath(t *testing.T) {
|
||||
path := LearnedTemplatePath("opencode")
|
||||
if !strings.HasSuffix(path, "/learned/opencode.json") {
|
||||
t.Errorf("LearnedTemplatePath(\"opencode\") = %q, expected suffix /learned/opencode.json", path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindApplicationDirectory(t *testing.T) {
|
||||
home := "/home/testuser"
|
||||
tests := []struct {
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{"/home/testuser/.cache/opencode/db/main.sqlite", "/home/testuser/.cache/opencode"},
|
||||
{"/home/testuser/.cache/opencode/version", "/home/testuser/.cache/opencode"},
|
||||
{"/home/testuser/.config/opencode/settings.json", "/home/testuser/.config/opencode"},
|
||||
{"/home/testuser/.local/share/myapp/data", "/home/testuser/.local/share/myapp"},
|
||||
{"/home/testuser/.local/state/myapp/log", "/home/testuser/.local/state/myapp"},
|
||||
// Not under a well-known parent
|
||||
{"/home/testuser/documents/file.txt", ""},
|
||||
{"/home/testuser/.cache", ""},
|
||||
// Different home
|
||||
{"/other/user/.cache/app/file", ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
got := findApplicationDirectory(tt.path, home)
|
||||
if got != tt.expected {
|
||||
t.Errorf("findApplicationDirectory(%q, %q) = %q, want %q", tt.path, home, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollapsePaths(t *testing.T) {
|
||||
// Temporarily override home for testing
|
||||
t.Setenv("HOME", "/home/testuser")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
paths []string
|
||||
contains []string // paths that should be in the result
|
||||
notContains []string // paths that must NOT be in the result
|
||||
}{
|
||||
{
|
||||
name: "multiple paths under same app dir",
|
||||
paths: []string{
|
||||
"/home/testuser/.cache/opencode/db/main.sqlite",
|
||||
"/home/testuser/.cache/opencode/version",
|
||||
},
|
||||
contains: []string{"/home/testuser/.cache/opencode"},
|
||||
},
|
||||
{
|
||||
name: "empty input",
|
||||
paths: nil,
|
||||
contains: nil,
|
||||
},
|
||||
{
|
||||
name: "single path uses parent dir",
|
||||
paths: []string{
|
||||
"/home/testuser/.cache/opencode/version",
|
||||
},
|
||||
contains: []string{"/home/testuser/.cache/opencode"},
|
||||
},
|
||||
{
|
||||
name: "paths from different app dirs",
|
||||
paths: []string{
|
||||
"/home/testuser/.cache/opencode/db",
|
||||
"/home/testuser/.cache/opencode/version",
|
||||
"/home/testuser/.config/opencode/settings.json",
|
||||
},
|
||||
contains: []string{
|
||||
"/home/testuser/.cache/opencode",
|
||||
"/home/testuser/.config/opencode",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "files directly under home stay as exact paths",
|
||||
paths: []string{
|
||||
"/home/testuser/.gitignore",
|
||||
"/home/testuser/.npmrc",
|
||||
},
|
||||
contains: []string{
|
||||
"/home/testuser/.gitignore",
|
||||
"/home/testuser/.npmrc",
|
||||
},
|
||||
notContains: []string{"/home/testuser"},
|
||||
},
|
||||
{
|
||||
name: "mix of home files and app dir paths",
|
||||
paths: []string{
|
||||
"/home/testuser/.gitignore",
|
||||
"/home/testuser/.cache/opencode/db/main.sqlite",
|
||||
"/home/testuser/.cache/opencode/version",
|
||||
"/home/testuser/.npmrc",
|
||||
},
|
||||
contains: []string{
|
||||
"/home/testuser/.gitignore",
|
||||
"/home/testuser/.npmrc",
|
||||
"/home/testuser/.cache/opencode",
|
||||
},
|
||||
notContains: []string{"/home/testuser"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := CollapsePaths(tt.paths)
|
||||
if tt.contains == nil {
|
||||
if got != nil {
|
||||
t.Errorf("CollapsePaths() = %v, want nil", got)
|
||||
}
|
||||
return
|
||||
}
|
||||
for _, want := range tt.contains {
|
||||
found := false
|
||||
for _, g := range got {
|
||||
if g == want {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("CollapsePaths() = %v, missing expected path %q", got, want)
|
||||
}
|
||||
}
|
||||
for _, bad := range tt.notContains {
|
||||
for _, g := range got {
|
||||
if g == bad {
|
||||
t.Errorf("CollapsePaths() = %v, should NOT contain %q", got, bad)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsDefaultWritablePath(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
{"/dev/null", true},
|
||||
{"/dev/stdout", true},
|
||||
{"/tmp/somefile", false}, // /tmp is tmpfs inside sandbox, not host /tmp
|
||||
{"/home/user/file", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
got := isDefaultWritablePath(tt.path)
|
||||
if got != tt.expected {
|
||||
t.Errorf("isDefaultWritablePath(%q) = %v, want %v", tt.path, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSensitivePath(t *testing.T) {
|
||||
home := "/home/testuser"
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
{"/home/testuser/.bashrc", true},
|
||||
{"/home/testuser/.gitconfig", true},
|
||||
{"/home/testuser/.ssh/id_rsa", true},
|
||||
{"/home/testuser/.ssh/known_hosts", true},
|
||||
{"/home/testuser/.gnupg/secring.gpg", true},
|
||||
{"/home/testuser/.env", true},
|
||||
{"/home/testuser/project/.env.local", true},
|
||||
{"/home/testuser/.cache/opencode/db", false},
|
||||
{"/home/testuser/documents/readme.txt", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
got := isSensitivePath(tt.path, home)
|
||||
if got != tt.expected {
|
||||
t.Errorf("isSensitivePath(%q, %q) = %v, want %v", tt.path, home, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeduplicateSubPaths(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
paths []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "removes sub-paths",
|
||||
paths: []string{"/home/user/.cache", "/home/user/.cache/opencode"},
|
||||
expected: []string{"/home/user/.cache"},
|
||||
},
|
||||
{
|
||||
name: "keeps independent paths",
|
||||
paths: []string{"/home/user/.cache/opencode", "/home/user/.config/opencode"},
|
||||
expected: []string{"/home/user/.cache/opencode", "/home/user/.config/opencode"},
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
paths: nil,
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := deduplicateSubPaths(tt.paths)
|
||||
if len(got) != len(tt.expected) {
|
||||
t.Errorf("deduplicateSubPaths(%v) = %v, want %v", tt.paths, got, tt.expected)
|
||||
return
|
||||
}
|
||||
for i := range got {
|
||||
if got[i] != tt.expected[i] {
|
||||
t.Errorf("deduplicateSubPaths(%v)[%d] = %q, want %q", tt.paths, i, got[i], tt.expected[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDangerousFilePatterns(t *testing.T) {
|
||||
patterns := getDangerousFilePatterns()
|
||||
if len(patterns) == 0 {
|
||||
t.Error("getDangerousFilePatterns() returned empty list")
|
||||
}
|
||||
// Check some expected patterns
|
||||
found := false
|
||||
for _, p := range patterns {
|
||||
if p == "~/.bashrc" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("getDangerousFilePatterns() missing ~/.bashrc")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSensitiveReadPatterns(t *testing.T) {
|
||||
patterns := getSensitiveReadPatterns()
|
||||
if len(patterns) == 0 {
|
||||
t.Error("getSensitiveReadPatterns() returned empty list")
|
||||
}
|
||||
found := false
|
||||
for _, p := range patterns {
|
||||
if p == "~/.ssh/id_*" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("getSensitiveReadPatterns() missing ~/.ssh/id_*")
|
||||
}
|
||||
}
|
||||
|
||||
func TestToTildePath(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
home string
|
||||
expected string
|
||||
}{
|
||||
{"/home/user/.cache/opencode", "/home/user", "~/.cache/opencode"},
|
||||
{"/other/path", "/home/user", "/other/path"},
|
||||
{"/home/user/.cache/opencode", "", "/home/user/.cache/opencode"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
got := toTildePath(tt.path, tt.home)
|
||||
if got != tt.expected {
|
||||
t.Errorf("toTildePath(%q, %q) = %q, want %q", tt.path, tt.home, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListLearnedTemplates(t *testing.T) {
|
||||
// Use a temp dir to isolate from real user config
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("XDG_CONFIG_HOME", tmpDir)
|
||||
|
||||
// Initially empty
|
||||
templates, err := ListLearnedTemplates()
|
||||
if err != nil {
|
||||
t.Fatalf("ListLearnedTemplates() error: %v", err)
|
||||
}
|
||||
if len(templates) != 0 {
|
||||
t.Errorf("expected empty list, got %v", templates)
|
||||
}
|
||||
|
||||
// Create some templates
|
||||
dir := LearnedTemplateDir()
|
||||
if err := os.MkdirAll(dir, 0o750); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(dir, "opencode.json"), []byte("{}"), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(dir, "myapp.json"), []byte("{}"), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(dir, "notjson.txt"), []byte(""), 0o600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
templates, err = ListLearnedTemplates()
|
||||
if err != nil {
|
||||
t.Fatalf("ListLearnedTemplates() error: %v", err)
|
||||
}
|
||||
if len(templates) != 2 {
|
||||
t.Errorf("expected 2 templates, got %d: %v", len(templates), templates)
|
||||
}
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, tmpl := range templates {
|
||||
names[tmpl.Name] = true
|
||||
}
|
||||
if !names["opencode"] {
|
||||
t.Error("missing template 'opencode'")
|
||||
}
|
||||
if !names["myapp"] {
|
||||
t.Error("missing template 'myapp'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTemplate(t *testing.T) {
|
||||
allowRead := []string{"~/external-data"}
|
||||
allowWrite := []string{".", "~/.cache/opencode", "~/.config/opencode"}
|
||||
result := buildTemplate("opencode", allowRead, allowWrite)
|
||||
|
||||
// Check header comments
|
||||
if !strings.Contains(result, `Learned template for "opencode"`) {
|
||||
t.Error("template missing header comment with command name")
|
||||
}
|
||||
if !strings.Contains(result, "greywall --learning -- opencode") {
|
||||
t.Error("template missing generation command")
|
||||
}
|
||||
if !strings.Contains(result, "Review and adjust paths as needed") {
|
||||
t.Error("template missing review comment")
|
||||
}
|
||||
|
||||
// Check content
|
||||
if !strings.Contains(result, `"allowRead"`) {
|
||||
t.Error("template missing allowRead field")
|
||||
}
|
||||
if !strings.Contains(result, `"~/external-data"`) {
|
||||
t.Error("template missing expected allowRead path")
|
||||
}
|
||||
if !strings.Contains(result, `"allowWrite"`) {
|
||||
t.Error("template missing allowWrite field")
|
||||
}
|
||||
if !strings.Contains(result, `"~/.cache/opencode"`) {
|
||||
t.Error("template missing expected allowWrite path")
|
||||
}
|
||||
if !strings.Contains(result, `"denyWrite"`) {
|
||||
t.Error("template missing denyWrite field")
|
||||
}
|
||||
if !strings.Contains(result, `"denyRead"`) {
|
||||
t.Error("template missing denyRead field")
|
||||
}
|
||||
// Check .env patterns are included in denyRead
|
||||
if !strings.Contains(result, `".env"`) {
|
||||
t.Error("template missing .env in denyRead")
|
||||
}
|
||||
if !strings.Contains(result, `".env.*"`) {
|
||||
t.Error("template missing .env.* in denyRead")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTemplateNoAllowRead(t *testing.T) {
|
||||
result := buildTemplate("simple-cmd", nil, []string{"."})
|
||||
|
||||
// When allowRead is nil, it should be omitted from JSON
|
||||
if strings.Contains(result, `"allowRead"`) {
|
||||
t.Error("template should omit allowRead when nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateLearnedTemplate(t *testing.T) {
|
||||
// Create a temp dir for templates
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("XDG_CONFIG_HOME", tmpDir)
|
||||
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
// Build a TraceResult directly (platform-independent test)
|
||||
result := &TraceResult{
|
||||
WritePaths: []string{
|
||||
filepath.Join(home, ".cache/testapp/db.sqlite"),
|
||||
filepath.Join(home, ".cache/testapp/version"),
|
||||
filepath.Join(home, ".config/testapp"),
|
||||
},
|
||||
ReadPaths: []string{
|
||||
filepath.Join(home, ".config/testapp/conf.json"),
|
||||
},
|
||||
}
|
||||
|
||||
templatePath, err := GenerateLearnedTemplate(result, "testapp", false)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateLearnedTemplate() error: %v", err)
|
||||
}
|
||||
|
||||
if templatePath == "" {
|
||||
t.Fatal("GenerateLearnedTemplate() returned empty path")
|
||||
}
|
||||
|
||||
// Read and verify template
|
||||
data, err := os.ReadFile(templatePath) //nolint:gosec // reading test-generated template file
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read template: %v", err)
|
||||
}
|
||||
|
||||
content := string(data)
|
||||
if !strings.Contains(content, "testapp") {
|
||||
t.Error("template doesn't contain command name")
|
||||
}
|
||||
if !strings.Contains(content, "allowWrite") {
|
||||
t.Error("template doesn't contain allowWrite")
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -39,7 +39,7 @@ func (m *EBPFMonitor) Start() error {
|
||||
features := DetectLinuxFeatures()
|
||||
if !features.HasEBPF {
|
||||
if m.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:ebpf] eBPF monitoring not available (need CAP_BPF or root)\n")
|
||||
fmt.Fprintf(os.Stderr, "[greywall:ebpf] eBPF monitoring not available (need CAP_BPF or root)\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -51,14 +51,14 @@ func (m *EBPFMonitor) Start() error {
|
||||
// Try multiple eBPF tracing approaches
|
||||
if err := m.tryBpftrace(ctx); err != nil {
|
||||
if m.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:ebpf] bpftrace not available: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:ebpf] bpftrace not available: %v\n", err)
|
||||
}
|
||||
// Fall back to other methods
|
||||
go m.traceWithPerfEvents()
|
||||
}
|
||||
|
||||
if m.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:ebpf] Started eBPF monitoring for PID %d\n", m.pid)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:ebpf] Started eBPF monitoring for PID %d\n", m.pid)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -101,7 +101,7 @@ func (m *EBPFMonitor) tryBpftrace(ctx context.Context) error {
|
||||
script := m.generateBpftraceScript()
|
||||
|
||||
// Write script to temp file
|
||||
tmpFile, err := os.CreateTemp("", "fence-ebpf-*.bt")
|
||||
tmpFile, err := os.CreateTemp("", "greywall-ebpf-*.bt")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
@@ -136,7 +136,7 @@ func (m *EBPFMonitor) tryBpftrace(ctx context.Context) error {
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if m.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:ebpf:trace] %s\n", line)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:ebpf:trace] %s\n", line)
|
||||
}
|
||||
if violation := m.parseBpftraceOutput(line); violation != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", violation)
|
||||
@@ -150,7 +150,7 @@ func (m *EBPFMonitor) tryBpftrace(ctx context.Context) error {
|
||||
scanner := bufio.NewScanner(stderr)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
fmt.Fprintf(os.Stderr, "[fence:ebpf:err] %s\n", line)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:ebpf:err] %s\n", line)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -173,7 +173,7 @@ func (m *EBPFMonitor) generateBpftraceScript() string {
|
||||
script := fmt.Sprintf(`
|
||||
BEGIN
|
||||
{
|
||||
printf("fence:ebpf monitoring started for sandbox PID %%d (filtering pid >= %%d)\n", %d, %d);
|
||||
printf("greywall:ebpf monitoring started for sandbox PID %%d (filtering pid >= %%d)\n", %d, %d);
|
||||
}
|
||||
|
||||
// Monitor filesystem errors (EPERM=-1, EACCES=-13, EROFS=-30)
|
||||
@@ -227,7 +227,7 @@ func (m *EBPFMonitor) parseBpftraceOutput(line string) string {
|
||||
errorName := getErrnoName(ret)
|
||||
timestamp := time.Now().Format("15:04:05")
|
||||
|
||||
return fmt.Sprintf("[fence:ebpf] %s ✗ %s: %s (%s, pid=%d)",
|
||||
return fmt.Sprintf("[greywall:ebpf] %s ✗ %s: %s (%s, pid=%d)",
|
||||
timestamp, syscall, errorName, comm, pid)
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ func (m *EBPFMonitor) traceWithPerfEvents() {
|
||||
tracePipe := "/sys/kernel/debug/tracing/trace_pipe"
|
||||
if _, err := os.Stat(tracePipe); err != nil {
|
||||
if m.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:ebpf] trace_pipe not available\n")
|
||||
fmt.Fprintf(os.Stderr, "[greywall:ebpf] trace_pipe not available\n")
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -247,7 +247,7 @@ func (m *EBPFMonitor) traceWithPerfEvents() {
|
||||
f, err := os.Open(tracePipe)
|
||||
if err != nil {
|
||||
if m.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:ebpf] Failed to open trace_pipe: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:ebpf] Failed to open trace_pipe: %v\n", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -317,10 +317,10 @@ func (v *ViolationEvent) FormatViolation() string {
|
||||
errName := getErrnoName(-v.Errno)
|
||||
|
||||
if v.Path != "" {
|
||||
return fmt.Sprintf("[fence:ebpf] %s ✗ %s: %s (%s, %s:%d)",
|
||||
return fmt.Sprintf("[greywall:ebpf] %s ✗ %s: %s (%s, %s:%d)",
|
||||
timestamp, v.Operation, v.Path, errName, v.Comm, v.PID)
|
||||
}
|
||||
return fmt.Sprintf("[fence:ebpf] %s ✗ %s: %s (%s:%d)",
|
||||
return fmt.Sprintf("[greywall:ebpf] %s ✗ %s: %s (%s:%d)",
|
||||
timestamp, v.Operation, errName, v.Comm, v.PID)
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,11 @@ type LinuxFeatures struct {
|
||||
// This can be false in containerized environments (Docker, CI) without CAP_NET_ADMIN
|
||||
CanUnshareNet bool
|
||||
|
||||
// Transparent proxy support
|
||||
HasIpCommand bool // ip (iproute2) available
|
||||
HasDevNetTun bool // /dev/net/tun exists
|
||||
HasTun2Socks bool // tun2socks embedded binary available
|
||||
|
||||
// Kernel version
|
||||
KernelMajor int
|
||||
KernelMinor int
|
||||
@@ -74,6 +79,12 @@ func (f *LinuxFeatures) detect() {
|
||||
|
||||
// Check if we can create network namespaces
|
||||
f.detectNetworkNamespace()
|
||||
|
||||
// Check transparent proxy support
|
||||
f.HasIpCommand = commandExists("ip")
|
||||
_, err := os.Stat("/dev/net/tun")
|
||||
f.HasDevNetTun = err == nil
|
||||
f.HasTun2Socks = true // embedded binary, always available
|
||||
}
|
||||
|
||||
func (f *LinuxFeatures) parseKernelVersion() {
|
||||
@@ -255,11 +266,117 @@ func (f *LinuxFeatures) CanUseLandlock() bool {
|
||||
return f.HasLandlock && f.LandlockABI >= 1
|
||||
}
|
||||
|
||||
// CanUseTransparentProxy returns true if transparent proxying via tun2socks is possible.
|
||||
func (f *LinuxFeatures) CanUseTransparentProxy() bool {
|
||||
return f.HasIpCommand && f.HasDevNetTun && f.CanUnshareNet
|
||||
}
|
||||
|
||||
// MinimumViable returns true if the minimum required features are available.
|
||||
func (f *LinuxFeatures) MinimumViable() bool {
|
||||
return f.HasBwrap && f.HasSocat
|
||||
}
|
||||
|
||||
// PrintDependencyStatus prints dependency status with install suggestions for Linux.
|
||||
func PrintDependencyStatus() {
|
||||
features := DetectLinuxFeatures()
|
||||
|
||||
fmt.Printf("\n Platform: linux (kernel %d.%d)\n", features.KernelMajor, features.KernelMinor)
|
||||
|
||||
fmt.Printf("\n Dependencies (required):\n")
|
||||
|
||||
allGood := true
|
||||
if features.HasBwrap {
|
||||
fmt.Printf(" ✓ bubblewrap (bwrap)\n")
|
||||
} else {
|
||||
fmt.Printf(" ✗ bubblewrap (bwrap) — REQUIRED\n")
|
||||
allGood = false
|
||||
}
|
||||
if features.HasSocat {
|
||||
fmt.Printf(" ✓ socat\n")
|
||||
} else {
|
||||
fmt.Printf(" ✗ socat — REQUIRED\n")
|
||||
allGood = false
|
||||
}
|
||||
|
||||
if !allGood {
|
||||
fmt.Printf("\n Install missing dependencies:\n")
|
||||
fmt.Printf(" %s\n", suggestInstallCmd(features))
|
||||
}
|
||||
|
||||
fmt.Printf("\n Security features: %s\n", features.Summary())
|
||||
|
||||
if features.CanUseTransparentProxy() {
|
||||
fmt.Printf(" Transparent proxy: available\n")
|
||||
} else {
|
||||
parts := []string{}
|
||||
if !features.HasIpCommand {
|
||||
parts = append(parts, "iproute2")
|
||||
}
|
||||
if !features.HasDevNetTun {
|
||||
parts = append(parts, "/dev/net/tun")
|
||||
}
|
||||
if !features.CanUnshareNet {
|
||||
parts = append(parts, "network namespace")
|
||||
}
|
||||
if len(parts) > 0 {
|
||||
fmt.Printf(" Transparent proxy: unavailable (missing: %s)\n", strings.Join(parts, ", "))
|
||||
} else {
|
||||
fmt.Printf(" Transparent proxy: unavailable\n")
|
||||
}
|
||||
|
||||
if !features.CanUnshareNet && features.HasBwrap {
|
||||
if val := readSysctl("kernel/apparmor_restrict_unprivileged_userns"); val == "1" {
|
||||
fmt.Printf("\n Note: AppArmor is restricting unprivileged user namespaces.\n")
|
||||
fmt.Printf(" This prevents bwrap --unshare-net (needed for transparent proxy).\n")
|
||||
fmt.Printf(" To fix: sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0\n")
|
||||
fmt.Printf(" Persist: echo 'kernel.apparmor_restrict_unprivileged_userns=0' | sudo tee /etc/sysctl.d/99-greywall-userns.conf\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if allGood {
|
||||
fmt.Printf("\n Status: ready\n")
|
||||
} else {
|
||||
fmt.Printf("\n Status: missing required dependencies\n")
|
||||
}
|
||||
}
|
||||
|
||||
func suggestInstallCmd(features *LinuxFeatures) string {
|
||||
var missing []string
|
||||
if !features.HasBwrap {
|
||||
missing = append(missing, "bubblewrap")
|
||||
}
|
||||
if !features.HasSocat {
|
||||
missing = append(missing, "socat")
|
||||
}
|
||||
pkgs := strings.Join(missing, " ")
|
||||
|
||||
switch {
|
||||
case commandExists("apt-get"):
|
||||
return fmt.Sprintf("sudo apt install %s", pkgs)
|
||||
case commandExists("dnf"):
|
||||
return fmt.Sprintf("sudo dnf install %s", pkgs)
|
||||
case commandExists("yum"):
|
||||
return fmt.Sprintf("sudo yum install %s", pkgs)
|
||||
case commandExists("pacman"):
|
||||
return fmt.Sprintf("sudo pacman -S %s", pkgs)
|
||||
case commandExists("apk"):
|
||||
return fmt.Sprintf("sudo apk add %s", pkgs)
|
||||
case commandExists("zypper"):
|
||||
return fmt.Sprintf("sudo zypper install %s", pkgs)
|
||||
default:
|
||||
return fmt.Sprintf("install %s using your package manager", pkgs)
|
||||
}
|
||||
}
|
||||
|
||||
func readSysctl(name string) string {
|
||||
data, err := os.ReadFile("/proc/sys/" + name) //nolint:gosec // reading sysctl values - trusted kernel path
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
func commandExists(name string) bool {
|
||||
_, err := exec.LookPath(name)
|
||||
return err == nil
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
package sandbox
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// LinuxFeatures describes available Linux sandboxing features.
|
||||
// This is a stub for non-Linux platforms.
|
||||
type LinuxFeatures struct {
|
||||
@@ -15,6 +21,9 @@ type LinuxFeatures struct {
|
||||
HasCapBPF bool
|
||||
HasCapRoot bool
|
||||
CanUnshareNet bool
|
||||
HasIpCommand bool
|
||||
HasDevNetTun bool
|
||||
HasTun2Socks bool
|
||||
KernelMajor int
|
||||
KernelMinor int
|
||||
}
|
||||
@@ -39,7 +48,30 @@ func (f *LinuxFeatures) CanUseLandlock() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// CanUseTransparentProxy returns false on non-Linux platforms.
|
||||
func (f *LinuxFeatures) CanUseTransparentProxy() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MinimumViable returns false on non-Linux platforms.
|
||||
func (f *LinuxFeatures) MinimumViable() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PrintDependencyStatus prints dependency status for non-Linux platforms.
|
||||
func PrintDependencyStatus() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
fmt.Printf("\n Platform: macOS\n")
|
||||
fmt.Printf("\n Dependencies (required):\n")
|
||||
if _, err := exec.LookPath("sandbox-exec"); err == nil {
|
||||
fmt.Printf(" ✓ sandbox-exec (Seatbelt)\n")
|
||||
fmt.Printf("\n Status: ready\n")
|
||||
} else {
|
||||
fmt.Printf(" ✗ sandbox-exec — REQUIRED (should be built-in on macOS)\n")
|
||||
fmt.Printf("\n Status: missing required dependencies\n")
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("\n Platform: %s (unsupported)\n", runtime.GOOS)
|
||||
fmt.Printf("\n Status: this platform is not supported\n")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
"github.com/bmatcuk/doublestar/v4"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -22,7 +22,7 @@ func ApplyLandlockFromConfig(cfg *config.Config, cwd string, socketPaths []strin
|
||||
features := DetectLinuxFeatures()
|
||||
if !features.CanUseLandlock() {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Not available (kernel %d.%d < 5.13), skipping\n",
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Not available (kernel %d.%d < 5.13), skipping\n",
|
||||
features.KernelMajor, features.KernelMinor)
|
||||
}
|
||||
return nil // Graceful fallback - Landlock not available
|
||||
@@ -31,7 +31,7 @@ func ApplyLandlockFromConfig(cfg *config.Config, cwd string, socketPaths []strin
|
||||
ruleset, err := NewLandlockRuleset(debug)
|
||||
if err != nil {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Failed to create ruleset: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Failed to create ruleset: %v\n", err)
|
||||
}
|
||||
return nil // Graceful fallback
|
||||
}
|
||||
@@ -39,7 +39,7 @@ func ApplyLandlockFromConfig(cfg *config.Config, cwd string, socketPaths []strin
|
||||
|
||||
if err := ruleset.Initialize(); err != nil {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Failed to initialize: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Failed to initialize: %v\n", err)
|
||||
}
|
||||
return nil // Graceful fallback
|
||||
}
|
||||
@@ -66,41 +66,89 @@ func ApplyLandlockFromConfig(cfg *config.Config, cwd string, socketPaths []strin
|
||||
if err := ruleset.AllowRead(p); err != nil && debug {
|
||||
// Ignore errors for paths that don't exist
|
||||
if !os.IsNotExist(err) {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Warning: failed to add read path %s: %v\n", p, err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add read path %s: %v\n", p, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Current working directory - read access (may be upgraded to write below)
|
||||
if cwd != "" {
|
||||
if err := ruleset.AllowRead(cwd); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Warning: failed to add cwd read path: %v\n", err)
|
||||
// If /etc/resolv.conf is a cross-mount symlink (e.g., -> /mnt/wsl/resolv.conf
|
||||
// on WSL), Landlock needs a read rule for the resolved target's parent dir,
|
||||
// otherwise following the symlink hits EACCES.
|
||||
if target, err := filepath.EvalSymlinks("/etc/resolv.conf"); err == nil && target != "/etc/resolv.conf" {
|
||||
targetDir := filepath.Dir(target)
|
||||
if err := ruleset.AllowRead(targetDir); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add resolv.conf target dir %s: %v\n", targetDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Home directory - read access
|
||||
if home, err := os.UserHomeDir(); err == nil {
|
||||
if err := ruleset.AllowRead(home); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Warning: failed to add home read path: %v\n", err)
|
||||
// Current working directory - read+write access (project directory)
|
||||
if cwd != "" {
|
||||
if err := ruleset.AllowReadWrite(cwd); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add cwd read/write path: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Home directory - read access only when not in deny-by-default mode.
|
||||
// In deny-by-default mode, only specific user tooling paths are allowed,
|
||||
// not the entire home directory. Landlock can't selectively deny files
|
||||
// within an allowed directory, so we rely on bwrap mount overlays for
|
||||
// .env file masking.
|
||||
defaultDenyRead := cfg != nil && cfg.Filesystem.IsDefaultDenyRead()
|
||||
if !defaultDenyRead {
|
||||
if home, err := os.UserHomeDir(); err == nil {
|
||||
if err := ruleset.AllowRead(home); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add home read path: %v\n", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// In deny-by-default mode, allow specific user tooling paths
|
||||
if home, err := os.UserHomeDir(); err == nil {
|
||||
for _, p := range GetDefaultReadablePaths() {
|
||||
if strings.HasPrefix(p, home) {
|
||||
if err := ruleset.AllowRead(p); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add user tooling path %s: %v\n", p, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Shell configs
|
||||
shellConfigs := []string{".bashrc", ".bash_profile", ".profile", ".zshrc", ".zprofile", ".zshenv", ".inputrc"}
|
||||
for _, f := range shellConfigs {
|
||||
p := filepath.Join(home, f)
|
||||
if err := ruleset.AllowRead(p); err != nil && debug {
|
||||
if !os.IsNotExist(err) {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add shell config %s: %v\n", p, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Home caches
|
||||
homeCaches := []string{".cache", ".npm", ".cargo", ".rustup", ".local", ".config"}
|
||||
for _, d := range homeCaches {
|
||||
p := filepath.Join(home, d)
|
||||
if err := ruleset.AllowRead(p); err != nil && debug {
|
||||
if !os.IsNotExist(err) {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add home cache %s: %v\n", p, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// /tmp - allow read+write (many programs need this)
|
||||
if err := ruleset.AllowReadWrite("/tmp"); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Warning: failed to add /tmp write path: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add /tmp write path: %v\n", err)
|
||||
}
|
||||
|
||||
// /dev needs read+write for /dev/null, /dev/zero, /dev/tty, etc.
|
||||
// Landlock doesn't support rules on device files directly, so we allow the whole /dev
|
||||
if err := ruleset.AllowReadWrite("/dev"); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Warning: failed to add /dev write path: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add /dev write path: %v\n", err)
|
||||
}
|
||||
|
||||
// Socket paths for proxy communication
|
||||
for _, p := range socketPaths {
|
||||
dir := filepath.Dir(p)
|
||||
if err := ruleset.AllowReadWrite(dir); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Warning: failed to add socket path %s: %v\n", dir, err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add socket path %s: %v\n", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,7 +157,7 @@ func ApplyLandlockFromConfig(cfg *config.Config, cwd string, socketPaths []strin
|
||||
expandedPaths := ExpandGlobPatterns(cfg.Filesystem.AllowWrite)
|
||||
for _, p := range expandedPaths {
|
||||
if err := ruleset.AllowReadWrite(p); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Warning: failed to add write path %s: %v\n", p, err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add write path %s: %v\n", p, err)
|
||||
}
|
||||
}
|
||||
// Also add non-glob paths directly
|
||||
@@ -117,7 +165,7 @@ func ApplyLandlockFromConfig(cfg *config.Config, cwd string, socketPaths []strin
|
||||
if !ContainsGlobChars(p) {
|
||||
normalized := NormalizePath(p)
|
||||
if err := ruleset.AllowReadWrite(normalized); err != nil && debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Warning: failed to add write path %s: %v\n", normalized, err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Warning: failed to add write path %s: %v\n", normalized, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -126,13 +174,13 @@ func ApplyLandlockFromConfig(cfg *config.Config, cwd string, socketPaths []strin
|
||||
// Apply the ruleset
|
||||
if err := ruleset.Apply(); err != nil {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Failed to apply: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Failed to apply: %v\n", err)
|
||||
}
|
||||
return nil // Graceful fallback
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Applied restrictions (ABI v%d)\n", features.LandlockABI)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Applied restrictions (ABI v%d)\n", features.LandlockABI)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -153,7 +201,7 @@ type LandlockRuleset struct {
|
||||
func NewLandlockRuleset(debug bool) (*LandlockRuleset, error) {
|
||||
features := DetectLinuxFeatures()
|
||||
if !features.CanUseLandlock() {
|
||||
return nil, fmt.Errorf("Landlock not available (kernel %d.%d, need 5.13+)",
|
||||
return nil, fmt.Errorf("landlock not available (kernel %d.%d, need 5.13+)",
|
||||
features.KernelMajor, features.KernelMinor)
|
||||
}
|
||||
|
||||
@@ -202,7 +250,7 @@ func (l *LandlockRuleset) Initialize() error {
|
||||
l.initialized = true
|
||||
|
||||
if l.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Created ruleset (ABI v%d, fd=%d)\n", l.abiVersion, l.rulesetFd)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Created ruleset (ABI v%d, fd=%d)\n", l.abiVersion, l.rulesetFd)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -308,7 +356,7 @@ func (l *LandlockRuleset) addPathRule(path string, access uint64) error {
|
||||
// Check if path exists
|
||||
if _, err := os.Stat(absPath); os.IsNotExist(err) {
|
||||
if l.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Skipping non-existent path: %s\n", absPath)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Skipping non-existent path: %s\n", absPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -317,7 +365,7 @@ func (l *LandlockRuleset) addPathRule(path string, access uint64) error {
|
||||
fd, err := unix.Open(absPath, unix.O_PATH|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
if l.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Failed to open path %s: %v\n", absPath, err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Failed to open path %s: %v\n", absPath, err)
|
||||
}
|
||||
return nil // Don't fail on paths we can't access
|
||||
}
|
||||
@@ -327,7 +375,7 @@ func (l *LandlockRuleset) addPathRule(path string, access uint64) error {
|
||||
var stat unix.Stat_t
|
||||
if err := unix.Fstat(fd, &stat); err != nil {
|
||||
if l.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Failed to fstat path %s: %v\n", absPath, err)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Failed to fstat path %s: %v\n", absPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -360,7 +408,7 @@ func (l *LandlockRuleset) addPathRule(path string, access uint64) error {
|
||||
|
||||
if access == 0 {
|
||||
if l.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Skipping %s: no applicable access rights\n", absPath)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Skipping %s: no applicable access rights\n", absPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -381,7 +429,7 @@ func (l *LandlockRuleset) addPathRule(path string, access uint64) error {
|
||||
}
|
||||
|
||||
if l.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Added rule: %s (access=0x%x)\n", absPath, access)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Added rule: %s (access=0x%x)\n", absPath, access)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -390,7 +438,7 @@ func (l *LandlockRuleset) addPathRule(path string, access uint64) error {
|
||||
// Apply applies the Landlock ruleset to the current process.
|
||||
func (l *LandlockRuleset) Apply() error {
|
||||
if !l.initialized {
|
||||
return fmt.Errorf("Landlock ruleset not initialized")
|
||||
return fmt.Errorf("landlock ruleset not initialized")
|
||||
}
|
||||
|
||||
// Set NO_NEW_PRIVS first (required for Landlock)
|
||||
@@ -410,7 +458,7 @@ func (l *LandlockRuleset) Apply() error {
|
||||
}
|
||||
|
||||
if l.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:landlock] Ruleset applied to process\n")
|
||||
fmt.Fprintf(os.Stderr, "[greywall:landlock] Ruleset applied to process\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
package sandbox
|
||||
|
||||
import "github.com/Use-Tusk/fence/internal/config"
|
||||
import "gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
|
||||
// ApplyLandlockFromConfig is a no-op on non-Linux platforms.
|
||||
func ApplyLandlockFromConfig(cfg *config.Config, cwd string, socketPaths []string, debug bool) error {
|
||||
|
||||
@@ -60,12 +60,12 @@ func (s *SeccompFilter) GenerateBPFFilter() (string, error) {
|
||||
}
|
||||
|
||||
// Create a temporary directory for the filter
|
||||
tmpDir := filepath.Join(os.TempDir(), "fence-seccomp")
|
||||
tmpDir := filepath.Join(os.TempDir(), "greywall-seccomp")
|
||||
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
|
||||
return "", fmt.Errorf("failed to create seccomp dir: %w", err)
|
||||
}
|
||||
|
||||
filterPath := filepath.Join(tmpDir, fmt.Sprintf("fence-seccomp-%d.bpf", os.Getpid()))
|
||||
filterPath := filepath.Join(tmpDir, fmt.Sprintf("greywall-seccomp-%d.bpf", os.Getpid()))
|
||||
|
||||
// Generate the filter using the seccomp library or raw BPF
|
||||
// For now, we'll use bwrap's built-in seccomp support via --seccomp
|
||||
@@ -77,7 +77,7 @@ func (s *SeccompFilter) GenerateBPFFilter() (string, error) {
|
||||
}
|
||||
|
||||
if s.debug {
|
||||
fmt.Fprintf(os.Stderr, "[fence:seccomp] Generated BPF filter at %s\n", filterPath)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:seccomp] Generated BPF filter at %s\n", filterPath)
|
||||
}
|
||||
|
||||
return filterPath, nil
|
||||
|
||||
@@ -5,13 +5,20 @@ package sandbox
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
)
|
||||
|
||||
// LinuxBridge is a stub for non-Linux platforms.
|
||||
type LinuxBridge struct {
|
||||
HTTPSocketPath string
|
||||
SOCKSSocketPath string
|
||||
// ProxyBridge is a stub for non-Linux platforms.
|
||||
type ProxyBridge struct {
|
||||
SocketPath string
|
||||
ProxyHost string
|
||||
ProxyPort string
|
||||
}
|
||||
|
||||
// DnsBridge is a stub for non-Linux platforms.
|
||||
type DnsBridge struct {
|
||||
SocketPath string
|
||||
DnsAddr string
|
||||
}
|
||||
|
||||
// ReverseBridge is a stub for non-Linux platforms.
|
||||
@@ -22,20 +29,30 @@ type ReverseBridge struct {
|
||||
|
||||
// LinuxSandboxOptions is a stub for non-Linux platforms.
|
||||
type LinuxSandboxOptions struct {
|
||||
UseLandlock bool
|
||||
UseSeccomp bool
|
||||
UseEBPF bool
|
||||
Monitor bool
|
||||
Debug bool
|
||||
UseLandlock bool
|
||||
UseSeccomp bool
|
||||
UseEBPF bool
|
||||
Monitor bool
|
||||
Debug bool
|
||||
Learning bool
|
||||
StraceLogPath string
|
||||
}
|
||||
|
||||
// NewLinuxBridge returns an error on non-Linux platforms.
|
||||
func NewLinuxBridge(httpProxyPort, socksProxyPort int, debug bool) (*LinuxBridge, error) {
|
||||
return nil, fmt.Errorf("Linux bridge not available on this platform")
|
||||
// NewProxyBridge returns an error on non-Linux platforms.
|
||||
func NewProxyBridge(proxyURL string, debug bool) (*ProxyBridge, error) {
|
||||
return nil, fmt.Errorf("proxy bridge not available on this platform")
|
||||
}
|
||||
|
||||
// Cleanup is a no-op on non-Linux platforms.
|
||||
func (b *LinuxBridge) Cleanup() {}
|
||||
func (b *ProxyBridge) Cleanup() {}
|
||||
|
||||
// NewDnsBridge returns an error on non-Linux platforms.
|
||||
func NewDnsBridge(dnsAddr string, debug bool) (*DnsBridge, error) {
|
||||
return nil, fmt.Errorf("DNS bridge not available on this platform")
|
||||
}
|
||||
|
||||
// Cleanup is a no-op on non-Linux platforms.
|
||||
func (b *DnsBridge) Cleanup() {}
|
||||
|
||||
// NewReverseBridge returns an error on non-Linux platforms.
|
||||
func NewReverseBridge(ports []int, debug bool) (*ReverseBridge, error) {
|
||||
@@ -46,13 +63,13 @@ func NewReverseBridge(ports []int, debug bool) (*ReverseBridge, error) {
|
||||
func (b *ReverseBridge) Cleanup() {}
|
||||
|
||||
// WrapCommandLinux returns an error on non-Linux platforms.
|
||||
func WrapCommandLinux(cfg *config.Config, command string, bridge *LinuxBridge, reverseBridge *ReverseBridge, debug bool) (string, error) {
|
||||
return "", fmt.Errorf("Linux sandbox not available on this platform")
|
||||
func WrapCommandLinux(cfg *config.Config, command string, proxyBridge *ProxyBridge, dnsBridge *DnsBridge, reverseBridge *ReverseBridge, tun2socksPath string, debug bool) (string, error) {
|
||||
return "", fmt.Errorf("linux sandbox not available on this platform")
|
||||
}
|
||||
|
||||
// WrapCommandLinuxWithOptions returns an error on non-Linux platforms.
|
||||
func WrapCommandLinuxWithOptions(cfg *config.Config, command string, bridge *LinuxBridge, reverseBridge *ReverseBridge, opts LinuxSandboxOptions) (string, error) {
|
||||
return "", fmt.Errorf("Linux sandbox not available on this platform")
|
||||
func WrapCommandLinuxWithOptions(cfg *config.Config, command string, proxyBridge *ProxyBridge, dnsBridge *DnsBridge, reverseBridge *ReverseBridge, tun2socksPath string, opts LinuxSandboxOptions) (string, error) {
|
||||
return "", fmt.Errorf("linux sandbox not available on this platform")
|
||||
}
|
||||
|
||||
// StartLinuxMonitor returns nil on non-Linux platforms.
|
||||
|
||||
@@ -3,158 +3,37 @@ package sandbox
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
)
|
||||
|
||||
// TestLinux_WildcardAllowedDomainsSkipsUnshareNet verifies that when allowedDomains
|
||||
// contains "*", the Linux sandbox does NOT use --unshare-net, allowing direct
|
||||
// network connections for applications that don't respect HTTP_PROXY.
|
||||
func TestLinux_WildcardAllowedDomainsSkipsUnshareNet(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
allowedDomains []string
|
||||
wantUnshareNet bool
|
||||
}{
|
||||
{
|
||||
name: "no domains - uses unshare-net",
|
||||
allowedDomains: []string{},
|
||||
wantUnshareNet: true,
|
||||
},
|
||||
{
|
||||
name: "specific domain - uses unshare-net",
|
||||
allowedDomains: []string{"api.openai.com"},
|
||||
wantUnshareNet: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard domain - skips unshare-net",
|
||||
allowedDomains: []string{"*"},
|
||||
wantUnshareNet: false,
|
||||
},
|
||||
{
|
||||
name: "wildcard with specific domains - skips unshare-net",
|
||||
allowedDomains: []string{"api.openai.com", "*"},
|
||||
wantUnshareNet: false,
|
||||
},
|
||||
{
|
||||
name: "wildcard subdomain pattern - uses unshare-net",
|
||||
allowedDomains: []string{"*.openai.com"},
|
||||
wantUnshareNet: true,
|
||||
// TestLinux_NoProxyBlocksNetwork verifies that when no ProxyURL is set,
|
||||
// the Linux sandbox uses --unshare-net to block all network access.
|
||||
func TestLinux_NoProxyBlocksNetwork(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Network: config.NetworkConfig{},
|
||||
Filesystem: config.FilesystemConfig{
|
||||
AllowWrite: []string{"/tmp/test"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: tt.allowedDomains,
|
||||
},
|
||||
Filesystem: config.FilesystemConfig{
|
||||
AllowWrite: []string{"/tmp/test"},
|
||||
},
|
||||
}
|
||||
|
||||
// Check the wildcard detection logic directly
|
||||
hasWildcard := hasWildcardAllowedDomain(cfg)
|
||||
|
||||
if tt.wantUnshareNet && hasWildcard {
|
||||
t.Errorf("expected hasWildcard=false for domains %v, got true", tt.allowedDomains)
|
||||
}
|
||||
if !tt.wantUnshareNet && !hasWildcard {
|
||||
t.Errorf("expected hasWildcard=true for domains %v, got false", tt.allowedDomains)
|
||||
}
|
||||
})
|
||||
// With no proxy, network should be blocked
|
||||
if cfg.Network.ProxyURL != "" {
|
||||
t.Error("expected empty ProxyURL for no-network config")
|
||||
}
|
||||
}
|
||||
|
||||
// hasWildcardAllowedDomain checks if the config contains a "*" in allowedDomains.
|
||||
// This replicates the logic used in both linux.go and macos.go.
|
||||
func hasWildcardAllowedDomain(cfg *config.Config) bool {
|
||||
if cfg == nil {
|
||||
return false
|
||||
}
|
||||
for _, d := range cfg.Network.AllowedDomains {
|
||||
if d == "*" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TestWildcardDetectionLogic tests the wildcard detection helper.
|
||||
// This logic is shared between macOS and Linux sandbox implementations.
|
||||
func TestWildcardDetectionLogic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg *config.Config
|
||||
expectWildcard bool
|
||||
}{
|
||||
{
|
||||
name: "nil config",
|
||||
cfg: nil,
|
||||
expectWildcard: false,
|
||||
// TestLinux_ProxyURLSet verifies that a proxy URL is properly set in config.
|
||||
func TestLinux_ProxyURLSet(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
ProxyURL: "socks5://localhost:1080",
|
||||
},
|
||||
{
|
||||
name: "empty allowed domains",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{},
|
||||
},
|
||||
},
|
||||
expectWildcard: false,
|
||||
},
|
||||
{
|
||||
name: "specific domains only",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"example.com", "api.openai.com"},
|
||||
},
|
||||
},
|
||||
expectWildcard: false,
|
||||
},
|
||||
{
|
||||
name: "exact star wildcard",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"*"},
|
||||
},
|
||||
},
|
||||
expectWildcard: true,
|
||||
},
|
||||
{
|
||||
name: "star wildcard among others",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"example.com", "*", "api.openai.com"},
|
||||
},
|
||||
},
|
||||
expectWildcard: true,
|
||||
},
|
||||
{
|
||||
name: "prefix wildcard is not star",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"*.example.com"},
|
||||
},
|
||||
},
|
||||
expectWildcard: false,
|
||||
},
|
||||
{
|
||||
name: "star in domain name is not wildcard",
|
||||
cfg: &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: []string{"test*domain.com"},
|
||||
},
|
||||
},
|
||||
expectWildcard: false,
|
||||
Filesystem: config.FilesystemConfig{
|
||||
AllowWrite: []string{"/tmp/test"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := hasWildcardAllowedDomain(tt.cfg)
|
||||
if got != tt.expectWildcard {
|
||||
t.Errorf("hasWildcardAllowedDomain() = %v, want %v", got, tt.expectWildcard)
|
||||
}
|
||||
})
|
||||
if cfg.Network.ProxyURL != "socks5://localhost:1080" {
|
||||
t.Errorf("expected ProxyURL socks5://localhost:1080, got %s", cfg.Network.ProxyURL)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,14 +4,14 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
)
|
||||
|
||||
// sessionSuffix is a unique identifier for this process session.
|
||||
@@ -29,13 +29,15 @@ func generateSessionSuffix() string {
|
||||
type MacOSSandboxParams struct {
|
||||
Command string
|
||||
NeedsNetworkRestriction bool
|
||||
HTTPProxyPort int
|
||||
SOCKSProxyPort int
|
||||
ProxyURL string // External proxy URL (for env vars)
|
||||
ProxyHost string // Proxy host (for sandbox profile network rules)
|
||||
ProxyPort string // Proxy port (for sandbox profile network rules)
|
||||
AllowUnixSockets []string
|
||||
AllowAllUnixSockets bool
|
||||
AllowLocalBinding bool
|
||||
AllowLocalOutbound bool
|
||||
DefaultDenyRead bool
|
||||
Cwd string // Current working directory (for deny-by-default CWD allowlisting)
|
||||
ReadAllowPaths []string
|
||||
ReadDenyPaths []string
|
||||
WriteAllowPaths []string
|
||||
@@ -43,6 +45,8 @@ type MacOSSandboxParams struct {
|
||||
AllowPty bool
|
||||
AllowGitConfig bool
|
||||
Shell string
|
||||
DaemonMode bool // When true, pf handles network routing; Seatbelt allows network-outbound
|
||||
DaemonSocketPath string // Daemon socket to deny access to from sandboxed process
|
||||
}
|
||||
|
||||
// GlobToRegex converts a glob pattern to a regex for macOS sandbox profiles.
|
||||
@@ -145,13 +149,13 @@ func getTmpdirParent() []string {
|
||||
}
|
||||
|
||||
// generateReadRules generates filesystem read rules for the sandbox profile.
|
||||
func generateReadRules(defaultDenyRead bool, allowPaths, denyPaths []string, logTag string) []string {
|
||||
func generateReadRules(defaultDenyRead bool, cwd string, allowPaths, denyPaths []string, logTag string) []string {
|
||||
var rules []string
|
||||
|
||||
if defaultDenyRead {
|
||||
// When defaultDenyRead is enabled:
|
||||
// 1. Allow file-read-metadata globally (needed for directory traversal, stat, etc.)
|
||||
// 2. Allow file-read-data only for system paths + user-specified allowRead paths
|
||||
// 2. Allow file-read-data only for system paths + CWD + user-specified allowRead paths
|
||||
// This lets programs see what files exist but not read their contents.
|
||||
|
||||
// Allow metadata operations globally (stat, readdir, etc.) and root dir (for path resolution)
|
||||
@@ -166,6 +170,44 @@ func generateReadRules(defaultDenyRead bool, allowPaths, denyPaths []string, log
|
||||
)
|
||||
}
|
||||
|
||||
// Allow reading CWD (full recursive read access)
|
||||
if cwd != "" {
|
||||
rules = append(rules,
|
||||
"(allow file-read-data",
|
||||
fmt.Sprintf(" (subpath %s))", escapePath(cwd)),
|
||||
)
|
||||
|
||||
// Allow ancestor directory traversal (literal only, so programs can resolve CWD path)
|
||||
for _, ancestor := range getAncestorDirectories(cwd) {
|
||||
rules = append(rules,
|
||||
fmt.Sprintf("(allow file-read-data (literal %s))", escapePath(ancestor)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Allow home shell configs and tool caches (read-only)
|
||||
home, _ := os.UserHomeDir()
|
||||
if home != "" {
|
||||
// Shell config files (literal access)
|
||||
shellConfigs := []string{".bashrc", ".bash_profile", ".profile", ".zshrc", ".zprofile", ".zshenv", ".inputrc"}
|
||||
for _, f := range shellConfigs {
|
||||
p := filepath.Join(home, f)
|
||||
rules = append(rules,
|
||||
fmt.Sprintf("(allow file-read-data (literal %s))", escapePath(p)),
|
||||
)
|
||||
}
|
||||
|
||||
// Home tool caches (subpath access for package managers/configs)
|
||||
homeCaches := []string{".cache", ".npm", ".cargo", ".rustup", ".local", ".config", ".nvm", ".pyenv", ".rbenv", ".asdf"}
|
||||
for _, d := range homeCaches {
|
||||
p := filepath.Join(home, d)
|
||||
rules = append(rules,
|
||||
"(allow file-read-data",
|
||||
fmt.Sprintf(" (subpath %s))", escapePath(p)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Allow reading data from user-specified paths
|
||||
for _, pathPattern := range allowPaths {
|
||||
normalized := NormalizePath(pathPattern)
|
||||
@@ -183,6 +225,24 @@ func generateReadRules(defaultDenyRead bool, allowPaths, denyPaths []string, log
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Deny sensitive files within CWD (Seatbelt evaluates deny before allow)
|
||||
if cwd != "" {
|
||||
for _, f := range SensitiveProjectFiles {
|
||||
p := filepath.Join(cwd, f)
|
||||
rules = append(rules,
|
||||
"(deny file-read*",
|
||||
fmt.Sprintf(" (literal %s)", escapePath(p)),
|
||||
fmt.Sprintf(" (with message %q))", logTag),
|
||||
)
|
||||
}
|
||||
// Also deny .env.* pattern via regex
|
||||
rules = append(rules,
|
||||
"(deny file-read*",
|
||||
fmt.Sprintf(" (regex %s)", escapePath("^"+regexp.QuoteMeta(cwd)+"/\\.env\\..*$")),
|
||||
fmt.Sprintf(" (with message %q))", logTag),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// Allow all reads by default
|
||||
rules = append(rules, "(allow file-read*)")
|
||||
@@ -219,9 +279,19 @@ func generateReadRules(defaultDenyRead bool, allowPaths, denyPaths []string, log
|
||||
}
|
||||
|
||||
// generateWriteRules generates filesystem write rules for the sandbox profile.
|
||||
func generateWriteRules(allowPaths, denyPaths []string, allowGitConfig bool, logTag string) []string {
|
||||
// When cwd is non-empty, it is automatically included in the write allow paths.
|
||||
func generateWriteRules(cwd string, allowPaths, denyPaths []string, allowGitConfig bool, logTag string) []string {
|
||||
var rules []string
|
||||
|
||||
// Auto-allow CWD for writes (project directory should be writable)
|
||||
if cwd != "" {
|
||||
rules = append(rules,
|
||||
"(allow file-write*",
|
||||
fmt.Sprintf(" (subpath %s)", escapePath(cwd)),
|
||||
fmt.Sprintf(" (with message %q))", logTag),
|
||||
)
|
||||
}
|
||||
|
||||
// Allow TMPDIR parent on macOS
|
||||
for _, tmpdirParent := range getTmpdirParent() {
|
||||
normalized := NormalizePath(tmpdirParent)
|
||||
@@ -253,8 +323,11 @@ func generateWriteRules(allowPaths, denyPaths []string, allowGitConfig bool, log
|
||||
}
|
||||
|
||||
// Combine user-specified and mandatory deny patterns
|
||||
cwd, _ := os.Getwd()
|
||||
mandatoryDeny := GetMandatoryDenyPatterns(cwd, allowGitConfig)
|
||||
mandatoryCwd := cwd
|
||||
if mandatoryCwd == "" {
|
||||
mandatoryCwd, _ = os.Getwd()
|
||||
}
|
||||
mandatoryDeny := GetMandatoryDenyPatterns(mandatoryCwd, allowGitConfig)
|
||||
allDenyPaths := make([]string, 0, len(denyPaths)+len(mandatoryDeny))
|
||||
allDenyPaths = append(allDenyPaths, denyPaths...)
|
||||
allDenyPaths = append(allDenyPaths, mandatoryDeny...)
|
||||
@@ -351,8 +424,8 @@ func GenerateSandboxProfile(params MacOSSandboxParams) string {
|
||||
|
||||
// Header
|
||||
profile.WriteString("(version 1)\n")
|
||||
profile.WriteString(fmt.Sprintf("(deny default (with message %q))\n\n", logTag))
|
||||
profile.WriteString(fmt.Sprintf("; LogTag: %s\n\n", logTag))
|
||||
fmt.Fprintf(&profile, "(deny default (with message %q))\n\n", logTag)
|
||||
fmt.Fprintf(&profile, "; LogTag: %s\n\n", logTag)
|
||||
|
||||
// Essential permissions - based on Chrome sandbox policy
|
||||
profile.WriteString(`; Essential permissions - based on Chrome sandbox policy
|
||||
@@ -378,7 +451,13 @@ func GenerateSandboxProfile(params MacOSSandboxParams) string {
|
||||
(global-name "com.apple.system.logger")
|
||||
(global-name "com.apple.system.notification_center")
|
||||
(global-name "com.apple.trustd.agent")
|
||||
(global-name "com.apple.system.opendirectoryd.libinfo")
|
||||
`)
|
||||
// macOS DNS resolution goes through mDNSResponder via Mach IPC — blocking
|
||||
// opendirectoryd.libinfo or configd does NOT cause a fallback to direct UDP
|
||||
// DNS. getaddrinfo() simply fails with EAI_NONAME. So we must allow these
|
||||
// services in all modes. In daemon mode, DNS for proxy-aware apps (curl, git)
|
||||
// is handled via ALL_PROXY=socks5h:// env var instead.
|
||||
profile.WriteString(` (global-name "com.apple.system.opendirectoryd.libinfo")
|
||||
(global-name "com.apple.system.opendirectoryd.membership")
|
||||
(global-name "com.apple.bsd.dirhelper")
|
||||
(global-name "com.apple.securityd.xpc")
|
||||
@@ -483,6 +562,7 @@ func GenerateSandboxProfile(params MacOSSandboxParams) string {
|
||||
(allow file-ioctl (literal "/dev/urandom"))
|
||||
(allow file-ioctl (literal "/dev/dtracehelper"))
|
||||
(allow file-ioctl (literal "/dev/tty"))
|
||||
(allow file-ioctl (regex #"^/dev/ttys"))
|
||||
|
||||
(allow file-ioctl file-read-data file-write-data
|
||||
(require-all
|
||||
@@ -491,13 +571,34 @@ func GenerateSandboxProfile(params MacOSSandboxParams) string {
|
||||
)
|
||||
)
|
||||
|
||||
; Inherited terminal access (TUI apps need read/write on the actual PTY device)
|
||||
(allow file-read-data file-write-data (regex #"^/dev/ttys"))
|
||||
|
||||
`)
|
||||
|
||||
// Network rules
|
||||
profile.WriteString("; Network\n")
|
||||
if !params.NeedsNetworkRestriction {
|
||||
switch {
|
||||
case params.DaemonMode:
|
||||
// In daemon mode, pf handles network routing: all traffic from the
|
||||
// _greywall user is routed through utun → tun2socks → proxy.
|
||||
// Seatbelt must allow network-outbound so packets reach pf.
|
||||
// The proxy allowlist is enforced by the external SOCKS5 proxy.
|
||||
profile.WriteString("(allow network-outbound)\n")
|
||||
// Allow local binding for servers if configured.
|
||||
if params.AllowLocalBinding {
|
||||
profile.WriteString(`(allow network-bind (local ip "localhost:*"))
|
||||
(allow network-inbound (local ip "localhost:*"))
|
||||
`)
|
||||
}
|
||||
// Explicitly deny access to the daemon socket to prevent the
|
||||
// sandboxed process from manipulating daemon sessions.
|
||||
if params.DaemonSocketPath != "" {
|
||||
fmt.Fprintf(&profile, "(deny network-outbound (remote unix-socket (path-literal %s)))\n", escapePath(params.DaemonSocketPath))
|
||||
}
|
||||
case !params.NeedsNetworkRestriction:
|
||||
profile.WriteString("(allow network*)\n")
|
||||
} else {
|
||||
default:
|
||||
if params.AllowLocalBinding {
|
||||
// Allow binding and inbound connections on localhost (for servers)
|
||||
profile.WriteString(`(allow network-bind (local ip "localhost:*"))
|
||||
@@ -515,52 +616,37 @@ func GenerateSandboxProfile(params MacOSSandboxParams) string {
|
||||
} else if len(params.AllowUnixSockets) > 0 {
|
||||
for _, socketPath := range params.AllowUnixSockets {
|
||||
normalized := NormalizePath(socketPath)
|
||||
profile.WriteString(fmt.Sprintf("(allow network* (subpath %s))\n", escapePath(normalized)))
|
||||
fmt.Fprintf(&profile, "(allow network* (subpath %s))\n", escapePath(normalized))
|
||||
}
|
||||
}
|
||||
|
||||
if params.HTTPProxyPort > 0 {
|
||||
profile.WriteString(fmt.Sprintf(`(allow network-bind (local ip "localhost:%d"))
|
||||
(allow network-inbound (local ip "localhost:%d"))
|
||||
(allow network-outbound (remote ip "localhost:%d"))
|
||||
`, params.HTTPProxyPort, params.HTTPProxyPort, params.HTTPProxyPort))
|
||||
}
|
||||
|
||||
if params.SOCKSProxyPort > 0 {
|
||||
profile.WriteString(fmt.Sprintf(`(allow network-bind (local ip "localhost:%d"))
|
||||
(allow network-inbound (local ip "localhost:%d"))
|
||||
(allow network-outbound (remote ip "localhost:%d"))
|
||||
`, params.SOCKSProxyPort, params.SOCKSProxyPort, params.SOCKSProxyPort))
|
||||
// Allow outbound to the external proxy host:port
|
||||
if params.ProxyHost != "" && params.ProxyPort != "" {
|
||||
fmt.Fprintf(&profile, "(allow network-outbound (remote ip \"%s:%s\"))\n", params.ProxyHost, params.ProxyPort)
|
||||
}
|
||||
}
|
||||
profile.WriteString("\n")
|
||||
|
||||
// Read rules
|
||||
profile.WriteString("; File read\n")
|
||||
for _, rule := range generateReadRules(params.DefaultDenyRead, params.ReadAllowPaths, params.ReadDenyPaths, logTag) {
|
||||
for _, rule := range generateReadRules(params.DefaultDenyRead, params.Cwd, params.ReadAllowPaths, params.ReadDenyPaths, logTag) {
|
||||
profile.WriteString(rule + "\n")
|
||||
}
|
||||
profile.WriteString("\n")
|
||||
|
||||
// Write rules
|
||||
profile.WriteString("; File write\n")
|
||||
for _, rule := range generateWriteRules(params.WriteAllowPaths, params.WriteDenyPaths, params.AllowGitConfig, logTag) {
|
||||
for _, rule := range generateWriteRules(params.Cwd, params.WriteAllowPaths, params.WriteDenyPaths, params.AllowGitConfig, logTag) {
|
||||
profile.WriteString(rule + "\n")
|
||||
}
|
||||
|
||||
// PTY support
|
||||
// PTY allocation support (creating new pseudo-terminals)
|
||||
if params.AllowPty {
|
||||
profile.WriteString(`
|
||||
; Pseudo-terminal (pty) support
|
||||
; Pseudo-terminal allocation (pty) support
|
||||
(allow pseudo-tty)
|
||||
(allow file-ioctl
|
||||
(literal "/dev/ptmx")
|
||||
(regex #"^/dev/ttys")
|
||||
)
|
||||
(allow file-read* file-write*
|
||||
(literal "/dev/ptmx")
|
||||
(regex #"^/dev/ttys")
|
||||
)
|
||||
(allow file-ioctl (literal "/dev/ptmx"))
|
||||
(allow file-read* file-write* (literal "/dev/ptmx"))
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -568,14 +654,10 @@ func GenerateSandboxProfile(params MacOSSandboxParams) string {
|
||||
}
|
||||
|
||||
// WrapCommandMacOS wraps a command with macOS sandbox restrictions.
|
||||
func WrapCommandMacOS(cfg *config.Config, command string, httpPort, socksPort int, exposedPorts []int, debug bool) (string, error) {
|
||||
// Check if allowedDomains contains "*" (wildcard = allow all direct network)
|
||||
// In this mode, we still run the proxy for apps that respect HTTP_PROXY,
|
||||
// but allow direct connections for apps that don't (like cursor-agent, opencode).
|
||||
// deniedDomains will only be enforced for apps that use the proxy.
|
||||
hasWildcardAllow := slices.Contains(cfg.Network.AllowedDomains, "*")
|
||||
|
||||
needsNetwork := len(cfg.Network.AllowedDomains) > 0 || len(cfg.Network.DeniedDomains) > 0
|
||||
// When daemonSession is non-nil, the command runs as the _greywall user
|
||||
// with network-outbound allowed (pf routes traffic through utun → proxy).
|
||||
func WrapCommandMacOS(cfg *config.Config, command string, exposedPorts []int, daemonSession *DaemonSession, debug bool) (string, error) {
|
||||
cwd, _ := os.Getwd()
|
||||
|
||||
// Build allow paths: default + configured
|
||||
allowPaths := append(GetDefaultWritePaths(), cfg.Filesystem.AllowWrite...)
|
||||
@@ -591,38 +673,54 @@ func WrapCommandMacOS(cfg *config.Config, command string, httpPort, socksPort in
|
||||
allowLocalOutbound = *cfg.Network.AllowLocalOutbound
|
||||
}
|
||||
|
||||
// If wildcard allow, don't restrict network at sandbox level (allow direct connections).
|
||||
// Otherwise, restrict to localhost/proxy only (strict mode).
|
||||
needsNetworkRestriction := !hasWildcardAllow && (needsNetwork || len(cfg.Network.AllowedDomains) == 0)
|
||||
|
||||
if debug && hasWildcardAllow {
|
||||
fmt.Fprintf(os.Stderr, "[fence:macos] Wildcard allowedDomains detected - allowing direct network connections\n")
|
||||
fmt.Fprintf(os.Stderr, "[fence:macos] Note: deniedDomains only enforced for apps that respect HTTP_PROXY\n")
|
||||
// Parse proxy URL for network rules
|
||||
var proxyHost, proxyPort string
|
||||
if cfg.Network.ProxyURL != "" {
|
||||
if u, err := url.Parse(cfg.Network.ProxyURL); err == nil {
|
||||
proxyHost = u.Hostname()
|
||||
proxyPort = u.Port()
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if we're using daemon-mode (transparent proxying via pf + utun)
|
||||
daemonMode := daemonSession != nil
|
||||
|
||||
// Restrict network unless proxy is configured to an external host
|
||||
// If no proxy: block all outbound. If proxy: allow outbound only to proxy.
|
||||
// In daemon mode, network restriction is handled by pf, not Seatbelt.
|
||||
needsNetworkRestriction := !daemonMode
|
||||
|
||||
params := MacOSSandboxParams{
|
||||
Command: command,
|
||||
NeedsNetworkRestriction: needsNetworkRestriction,
|
||||
HTTPProxyPort: httpPort,
|
||||
SOCKSProxyPort: socksPort,
|
||||
ProxyURL: cfg.Network.ProxyURL,
|
||||
ProxyHost: proxyHost,
|
||||
ProxyPort: proxyPort,
|
||||
AllowUnixSockets: cfg.Network.AllowUnixSockets,
|
||||
AllowAllUnixSockets: cfg.Network.AllowAllUnixSockets,
|
||||
AllowLocalBinding: allowLocalBinding,
|
||||
AllowLocalOutbound: allowLocalOutbound,
|
||||
DefaultDenyRead: cfg.Filesystem.DefaultDenyRead,
|
||||
DefaultDenyRead: cfg.Filesystem.IsDefaultDenyRead(),
|
||||
Cwd: cwd,
|
||||
ReadAllowPaths: cfg.Filesystem.AllowRead,
|
||||
ReadDenyPaths: cfg.Filesystem.DenyRead,
|
||||
WriteAllowPaths: allowPaths,
|
||||
WriteDenyPaths: cfg.Filesystem.DenyWrite,
|
||||
AllowPty: cfg.AllowPty,
|
||||
AllowGitConfig: cfg.Filesystem.AllowGitConfig,
|
||||
DaemonMode: daemonMode,
|
||||
DaemonSocketPath: "/var/run/greywall.sock",
|
||||
}
|
||||
|
||||
if debug && len(exposedPorts) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "[fence:macos] Enabling local binding for exposed ports: %v\n", exposedPorts)
|
||||
fmt.Fprintf(os.Stderr, "[greywall:macos] Enabling local binding for exposed ports: %v\n", exposedPorts)
|
||||
}
|
||||
if debug && allowLocalBinding && !allowLocalOutbound {
|
||||
fmt.Fprintf(os.Stderr, "[fence:macos] Blocking localhost outbound (AllowLocalOutbound=false)\n")
|
||||
fmt.Fprintf(os.Stderr, "[greywall:macos] Blocking localhost outbound (AllowLocalOutbound=false)\n")
|
||||
}
|
||||
if debug && daemonMode {
|
||||
fmt.Fprintf(os.Stderr, "[greywall:macos] Daemon mode: transparent proxying via pf + utun (group=%s, device=%s)\n",
|
||||
daemonSession.SandboxGroup, daemonSession.TunDevice)
|
||||
}
|
||||
|
||||
profile := GenerateSandboxProfile(params)
|
||||
@@ -637,14 +735,70 @@ func WrapCommandMacOS(cfg *config.Config, command string, httpPort, socksPort in
|
||||
return "", fmt.Errorf("shell %q not found: %w", shell, err)
|
||||
}
|
||||
|
||||
proxyEnvs := GenerateProxyEnvVars(httpPort, socksPort)
|
||||
|
||||
// Build the command
|
||||
// env VAR1=val1 VAR2=val2 sandbox-exec -p 'profile' shell -c 'command'
|
||||
var parts []string
|
||||
parts = append(parts, "env")
|
||||
parts = append(parts, proxyEnvs...)
|
||||
parts = append(parts, "sandbox-exec", "-p", profile, shellPath, "-c", command)
|
||||
|
||||
if daemonMode {
|
||||
// In daemon mode: run as the real user but with EGID=_greywall via sudo.
|
||||
// pf routes all TCP from group _greywall through utun → tun2socks → proxy.
|
||||
// Using -u #<uid> preserves the user's identity (home dir, SSH keys, etc.)
|
||||
// while -g _greywall sets the effective GID for pf matching.
|
||||
//
|
||||
// DNS on macOS goes through mDNSResponder (Mach IPC), which runs outside
|
||||
// the _greywall group, so pf can't intercept DNS. Instead, we set
|
||||
// ALL_PROXY=socks5h:// so proxy-aware apps (curl, git, etc.) resolve DNS
|
||||
// through the SOCKS5 proxy. The "h" suffix means "resolve hostname at proxy".
|
||||
//
|
||||
// Set ALL_PROXY and HTTP_PROXY/HTTPS_PROXY with socks5h:// so both
|
||||
// SOCKS5-aware apps (curl, git) and HTTP-proxy-aware apps (opencode,
|
||||
// Node.js tools) resolve DNS through the proxy. The "h" suffix means
|
||||
// "resolve hostname at proxy side". Note: apps that read HTTP_PROXY
|
||||
// but don't support SOCKS5 protocol (e.g., Bun) may fail to connect.
|
||||
//
|
||||
// sudo resets the environment, so we use `env` after sudo to re-inject
|
||||
// terminal vars (TERM, COLORTERM, etc.) needed for TUI apps.
|
||||
uid := fmt.Sprintf("#%d", os.Getuid())
|
||||
sandboxEnvs := GenerateProxyEnvVars("")
|
||||
// Convert socks5:// → socks5h:// for hostname resolution through proxy.
|
||||
socks5hURL := strings.Replace(cfg.Network.ProxyURL, "socks5://", "socks5h://", 1)
|
||||
if socks5hURL != "" {
|
||||
// ALL_PROXY uses socks5h:// (DNS resolved at proxy side) for
|
||||
// SOCKS5-aware apps (curl, git).
|
||||
// HTTP_PROXY/HTTPS_PROXY use the configured HTTP CONNECT proxy
|
||||
// for apps that only understand HTTP proxies (opencode, Node.js
|
||||
// tools, etc.). The CONNECT proxy resolves DNS server-side.
|
||||
httpProxyURL := cfg.Network.HTTPProxyURL
|
||||
// Inject credentials from the SOCKS5 proxy URL into the HTTP proxy
|
||||
// URL if the HTTP proxy URL doesn't already have credentials.
|
||||
if httpProxyURL != "" {
|
||||
if hu, err := url.Parse(httpProxyURL); err == nil && hu.User == nil {
|
||||
if su, err := url.Parse(socks5hURL); err == nil && su.User != nil {
|
||||
hu.User = su.User
|
||||
httpProxyURL = hu.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
sandboxEnvs = append(sandboxEnvs,
|
||||
"ALL_PROXY="+socks5hURL, "all_proxy="+socks5hURL,
|
||||
)
|
||||
if httpProxyURL != "" {
|
||||
sandboxEnvs = append(sandboxEnvs,
|
||||
"HTTP_PROXY="+httpProxyURL, "http_proxy="+httpProxyURL,
|
||||
"HTTPS_PROXY="+httpProxyURL, "https_proxy="+httpProxyURL,
|
||||
)
|
||||
}
|
||||
}
|
||||
termEnvs := getTerminalEnvVars()
|
||||
parts = append(parts, "sudo", "-u", uid, "-g", daemonSession.SandboxGroup, "env")
|
||||
parts = append(parts, sandboxEnvs...)
|
||||
parts = append(parts, termEnvs...)
|
||||
parts = append(parts, "sandbox-exec", "-p", profile, shellPath, "-c", command)
|
||||
} else {
|
||||
// Non-daemon mode: use proxy env vars for best-effort proxying.
|
||||
proxyEnvs := GenerateProxyEnvVars(cfg.Network.ProxyURL)
|
||||
parts = append(parts, "env")
|
||||
parts = append(parts, proxyEnvs...)
|
||||
parts = append(parts, "sandbox-exec", "-p", profile, shellPath, "-c", command)
|
||||
}
|
||||
|
||||
return ShellQuote(parts), nil
|
||||
}
|
||||
|
||||
@@ -1,50 +1,41 @@
|
||||
package sandbox
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Use-Tusk/fence/internal/config"
|
||||
"gitea.app.monadical.io/monadical/greywall/internal/config"
|
||||
)
|
||||
|
||||
// TestMacOS_WildcardAllowedDomainsRelaxesNetwork verifies that when allowedDomains
|
||||
// contains "*", the macOS sandbox profile allows direct network connections.
|
||||
func TestMacOS_WildcardAllowedDomainsRelaxesNetwork(t *testing.T) {
|
||||
// TestMacOS_NetworkRestrictionWithProxy verifies that when a proxy URL is set,
|
||||
// the macOS sandbox profile allows outbound to the proxy host:port.
|
||||
func TestMacOS_NetworkRestrictionWithProxy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
allowedDomains []string
|
||||
wantNetworkRestricted bool
|
||||
wantAllowNetworkOutbound bool
|
||||
name string
|
||||
proxyURL string
|
||||
wantProxy bool
|
||||
proxyHost string
|
||||
proxyPort string
|
||||
}{
|
||||
{
|
||||
name: "no domains - network restricted",
|
||||
allowedDomains: []string{},
|
||||
wantNetworkRestricted: true,
|
||||
wantAllowNetworkOutbound: false,
|
||||
name: "no proxy - network blocked",
|
||||
proxyURL: "",
|
||||
wantProxy: false,
|
||||
},
|
||||
{
|
||||
name: "specific domain - network restricted",
|
||||
allowedDomains: []string{"api.openai.com"},
|
||||
wantNetworkRestricted: true,
|
||||
wantAllowNetworkOutbound: false,
|
||||
name: "socks5 proxy - outbound allowed to proxy",
|
||||
proxyURL: "socks5://proxy.example.com:1080",
|
||||
wantProxy: true,
|
||||
proxyHost: "proxy.example.com",
|
||||
proxyPort: "1080",
|
||||
},
|
||||
{
|
||||
name: "wildcard domain - network unrestricted",
|
||||
allowedDomains: []string{"*"},
|
||||
wantNetworkRestricted: false,
|
||||
wantAllowNetworkOutbound: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard with specific domains - network unrestricted",
|
||||
allowedDomains: []string{"api.openai.com", "*"},
|
||||
wantNetworkRestricted: false,
|
||||
wantAllowNetworkOutbound: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard subdomain pattern - network restricted",
|
||||
allowedDomains: []string{"*.openai.com"},
|
||||
wantNetworkRestricted: true,
|
||||
wantAllowNetworkOutbound: false,
|
||||
name: "socks5h proxy - outbound allowed to proxy",
|
||||
proxyURL: "socks5h://localhost:1080",
|
||||
wantProxy: true,
|
||||
proxyHost: "localhost",
|
||||
proxyPort: "1080",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -52,34 +43,33 @@ func TestMacOS_WildcardAllowedDomainsRelaxesNetwork(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Network: config.NetworkConfig{
|
||||
AllowedDomains: tt.allowedDomains,
|
||||
ProxyURL: tt.proxyURL,
|
||||
},
|
||||
Filesystem: config.FilesystemConfig{
|
||||
AllowWrite: []string{"/tmp/test"},
|
||||
},
|
||||
}
|
||||
|
||||
// Generate the sandbox profile parameters
|
||||
params := buildMacOSParamsForTest(cfg)
|
||||
|
||||
if params.NeedsNetworkRestriction != tt.wantNetworkRestricted {
|
||||
t.Errorf("NeedsNetworkRestriction = %v, want %v",
|
||||
params.NeedsNetworkRestriction, tt.wantNetworkRestricted)
|
||||
if tt.wantProxy {
|
||||
if params.ProxyHost != tt.proxyHost {
|
||||
t.Errorf("expected ProxyHost %q, got %q", tt.proxyHost, params.ProxyHost)
|
||||
}
|
||||
if params.ProxyPort != tt.proxyPort {
|
||||
t.Errorf("expected ProxyPort %q, got %q", tt.proxyPort, params.ProxyPort)
|
||||
}
|
||||
|
||||
profile := GenerateSandboxProfile(params)
|
||||
expectedRule := `(allow network-outbound (remote ip "` + tt.proxyHost + ":" + tt.proxyPort + `"))`
|
||||
if !strings.Contains(profile, expectedRule) {
|
||||
t.Errorf("profile should contain proxy outbound rule %q", expectedRule)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the actual profile and check its contents
|
||||
profile := GenerateSandboxProfile(params)
|
||||
|
||||
// When network is unrestricted, profile should allow network* (all network ops)
|
||||
if tt.wantAllowNetworkOutbound {
|
||||
if !strings.Contains(profile, "(allow network*)") {
|
||||
t.Errorf("expected unrestricted network profile to contain '(allow network*)', got:\n%s", profile)
|
||||
}
|
||||
} else {
|
||||
// When network is restricted, profile should NOT have blanket allow
|
||||
if strings.Contains(profile, "(allow network*)") {
|
||||
t.Errorf("expected restricted network profile to NOT contain blanket '(allow network*)'")
|
||||
}
|
||||
// Network should always be restricted (proxy or not)
|
||||
if !params.NeedsNetworkRestriction {
|
||||
t.Error("NeedsNetworkRestriction should always be true")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -88,15 +78,6 @@ func TestMacOS_WildcardAllowedDomainsRelaxesNetwork(t *testing.T) {
|
||||
// buildMacOSParamsForTest is a helper to build MacOSSandboxParams from config,
|
||||
// replicating the logic in WrapCommandMacOS for testing.
|
||||
func buildMacOSParamsForTest(cfg *config.Config) MacOSSandboxParams {
|
||||
hasWildcardAllow := false
|
||||
for _, d := range cfg.Network.AllowedDomains {
|
||||
if d == "*" {
|
||||
hasWildcardAllow = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
needsNetwork := len(cfg.Network.AllowedDomains) > 0 || len(cfg.Network.DeniedDomains) > 0
|
||||
allowPaths := append(GetDefaultWritePaths(), cfg.Filesystem.AllowWrite...)
|
||||
allowLocalBinding := cfg.Network.AllowLocalBinding
|
||||
allowLocalOutbound := allowLocalBinding
|
||||
@@ -104,18 +85,32 @@ func buildMacOSParamsForTest(cfg *config.Config) MacOSSandboxParams {
|
||||
allowLocalOutbound = *cfg.Network.AllowLocalOutbound
|
||||
}
|
||||
|
||||
needsNetworkRestriction := !hasWildcardAllow && (needsNetwork || len(cfg.Network.AllowedDomains) == 0)
|
||||
var proxyHost, proxyPort string
|
||||
if cfg.Network.ProxyURL != "" {
|
||||
// Simple parsing for tests
|
||||
parts := strings.SplitN(cfg.Network.ProxyURL, "://", 2)
|
||||
if len(parts) == 2 {
|
||||
hostPort := parts[1]
|
||||
colonIdx := strings.LastIndex(hostPort, ":")
|
||||
if colonIdx >= 0 {
|
||||
proxyHost = hostPort[:colonIdx]
|
||||
proxyPort = hostPort[colonIdx+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return MacOSSandboxParams{
|
||||
Command: "echo test",
|
||||
NeedsNetworkRestriction: needsNetworkRestriction,
|
||||
HTTPProxyPort: 8080,
|
||||
SOCKSProxyPort: 1080,
|
||||
NeedsNetworkRestriction: true,
|
||||
ProxyURL: cfg.Network.ProxyURL,
|
||||
ProxyHost: proxyHost,
|
||||
ProxyPort: proxyPort,
|
||||
AllowUnixSockets: cfg.Network.AllowUnixSockets,
|
||||
AllowAllUnixSockets: cfg.Network.AllowAllUnixSockets,
|
||||
AllowLocalBinding: allowLocalBinding,
|
||||
AllowLocalOutbound: allowLocalOutbound,
|
||||
DefaultDenyRead: cfg.Filesystem.DefaultDenyRead,
|
||||
DefaultDenyRead: cfg.Filesystem.IsDefaultDenyRead(),
|
||||
Cwd: "/tmp/test-project",
|
||||
ReadAllowPaths: cfg.Filesystem.AllowRead,
|
||||
ReadDenyPaths: cfg.Filesystem.DenyRead,
|
||||
WriteAllowPaths: allowPaths,
|
||||
@@ -158,8 +153,6 @@ func TestMacOS_ProfileNetworkSection(t *testing.T) {
|
||||
params := MacOSSandboxParams{
|
||||
Command: "echo test",
|
||||
NeedsNetworkRestriction: tt.restricted,
|
||||
HTTPProxyPort: 8080,
|
||||
SOCKSProxyPort: 1080,
|
||||
}
|
||||
|
||||
profile := GenerateSandboxProfile(params)
|
||||
@@ -184,38 +177,46 @@ func TestMacOS_DefaultDenyRead(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
defaultDenyRead bool
|
||||
cwd string
|
||||
allowRead []string
|
||||
wantContainsBlanketAllow bool
|
||||
wantContainsMetadataAllow bool
|
||||
wantContainsSystemAllows bool
|
||||
wantContainsUserAllowRead bool
|
||||
wantContainsCwdAllow bool
|
||||
}{
|
||||
{
|
||||
name: "default mode - blanket allow read",
|
||||
name: "legacy mode - blanket allow read",
|
||||
defaultDenyRead: false,
|
||||
cwd: "/home/user/project",
|
||||
allowRead: nil,
|
||||
wantContainsBlanketAllow: true,
|
||||
wantContainsMetadataAllow: false, // No separate metadata allow needed
|
||||
wantContainsSystemAllows: false, // No need for explicit system allows
|
||||
wantContainsMetadataAllow: false,
|
||||
wantContainsSystemAllows: false,
|
||||
wantContainsUserAllowRead: false,
|
||||
wantContainsCwdAllow: false,
|
||||
},
|
||||
{
|
||||
name: "defaultDenyRead enabled - metadata allow, system data allows",
|
||||
name: "defaultDenyRead enabled - metadata allow, system data allows, CWD allow",
|
||||
defaultDenyRead: true,
|
||||
cwd: "/home/user/project",
|
||||
allowRead: nil,
|
||||
wantContainsBlanketAllow: false,
|
||||
wantContainsMetadataAllow: true, // Should have file-read-metadata for traversal
|
||||
wantContainsSystemAllows: true, // Should have explicit system path allows
|
||||
wantContainsMetadataAllow: true,
|
||||
wantContainsSystemAllows: true,
|
||||
wantContainsUserAllowRead: false,
|
||||
wantContainsCwdAllow: true,
|
||||
},
|
||||
{
|
||||
name: "defaultDenyRead with allowRead paths",
|
||||
defaultDenyRead: true,
|
||||
allowRead: []string{"/home/user/project"},
|
||||
cwd: "/home/user/project",
|
||||
allowRead: []string{"/home/user/other"},
|
||||
wantContainsBlanketAllow: false,
|
||||
wantContainsMetadataAllow: true,
|
||||
wantContainsSystemAllows: true,
|
||||
wantContainsUserAllowRead: true,
|
||||
wantContainsCwdAllow: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -223,35 +224,36 @@ func TestMacOS_DefaultDenyRead(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params := MacOSSandboxParams{
|
||||
Command: "echo test",
|
||||
HTTPProxyPort: 8080,
|
||||
SOCKSProxyPort: 1080,
|
||||
DefaultDenyRead: tt.defaultDenyRead,
|
||||
Cwd: tt.cwd,
|
||||
ReadAllowPaths: tt.allowRead,
|
||||
}
|
||||
|
||||
profile := GenerateSandboxProfile(params)
|
||||
|
||||
// Check for blanket "(allow file-read*)" without path restrictions
|
||||
// This appears at the start of read rules section in default mode
|
||||
hasBlanketAllow := strings.Contains(profile, "(allow file-read*)\n")
|
||||
if hasBlanketAllow != tt.wantContainsBlanketAllow {
|
||||
t.Errorf("blanket file-read allow = %v, want %v", hasBlanketAllow, tt.wantContainsBlanketAllow)
|
||||
}
|
||||
|
||||
// Check for file-read-metadata allow (for directory traversal in defaultDenyRead mode)
|
||||
hasMetadataAllow := strings.Contains(profile, "(allow file-read-metadata)")
|
||||
if hasMetadataAllow != tt.wantContainsMetadataAllow {
|
||||
t.Errorf("file-read-metadata allow = %v, want %v", hasMetadataAllow, tt.wantContainsMetadataAllow)
|
||||
}
|
||||
|
||||
// Check for system path allows (e.g., /usr, /bin) - should use file-read-data in strict mode
|
||||
hasSystemAllows := strings.Contains(profile, `(subpath "/usr")`) ||
|
||||
strings.Contains(profile, `(subpath "/bin")`)
|
||||
if hasSystemAllows != tt.wantContainsSystemAllows {
|
||||
t.Errorf("system path allows = %v, want %v\nProfile:\n%s", hasSystemAllows, tt.wantContainsSystemAllows, profile)
|
||||
}
|
||||
|
||||
// Check for user-specified allowRead paths
|
||||
if tt.wantContainsCwdAllow && tt.cwd != "" {
|
||||
hasCwdAllow := strings.Contains(profile, fmt.Sprintf(`(subpath %q)`, tt.cwd))
|
||||
if !hasCwdAllow {
|
||||
t.Errorf("CWD path %q not found in profile", tt.cwd)
|
||||
}
|
||||
}
|
||||
|
||||
if tt.wantContainsUserAllowRead && len(tt.allowRead) > 0 {
|
||||
hasUserAllow := strings.Contains(profile, tt.allowRead[0])
|
||||
if !hasUserAllow {
|
||||
@@ -290,14 +292,14 @@ func TestExpandMacOSTmpPaths(t *testing.T) {
|
||||
want: []string{".", "~/.cache"},
|
||||
},
|
||||
{
|
||||
name: "mirrors /tmp/fence to /private/tmp/fence",
|
||||
input: []string{".", "/tmp/fence"},
|
||||
want: []string{".", "/tmp/fence", "/private/tmp/fence"},
|
||||
name: "mirrors /tmp/greywall to /private/tmp/greywall",
|
||||
input: []string{".", "/tmp/greywall"},
|
||||
want: []string{".", "/tmp/greywall", "/private/tmp/greywall"},
|
||||
},
|
||||
{
|
||||
name: "mirrors /private/tmp/fence to /tmp/fence",
|
||||
input: []string{".", "/private/tmp/fence"},
|
||||
want: []string{".", "/private/tmp/fence", "/tmp/fence"},
|
||||
name: "mirrors /private/tmp/greywall to /tmp/greywall",
|
||||
input: []string{".", "/private/tmp/greywall"},
|
||||
want: []string{".", "/private/tmp/greywall", "/tmp/greywall"},
|
||||
},
|
||||
{
|
||||
name: "mirrors nested subdirectory",
|
||||
@@ -306,8 +308,8 @@ func TestExpandMacOSTmpPaths(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no duplicate when mirror already present",
|
||||
input: []string{".", "/tmp/fence", "/private/tmp/fence"},
|
||||
want: []string{".", "/tmp/fence", "/private/tmp/fence"},
|
||||
input: []string{".", "/tmp/greywall", "/private/tmp/greywall"},
|
||||
want: []string{".", "/tmp/greywall", "/private/tmp/greywall"},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user