Sandboxed commands previously ran as `sudo -u _greywall`, breaking user identity (home dir, SSH keys, git config). Now uses `sudo -u #<uid> -g _greywall` so the process keeps the real user's identity while pf matches on EGID for traffic routing. Key changes: - pf rules use `group <GID>` instead of `user _greywall` - GID resolved dynamically at daemon startup (not hardcoded, since macOS system groups like com.apple.access_ssh may claim preferred IDs) - Sudoers rule installed at /etc/sudoers.d/greywall (validated with visudo) - Invoking user added to _greywall group via dscl (not dseditgroup, which clobbers group attributes) - tun2socks device discovery scans both stdout and stderr (fixes 10s timeout caused by STACK message going to stdout) - Always-on daemon logging for session create/destroy events
247 lines
6.4 KiB
Go
247 lines
6.4 KiB
Go
//go:build darwin || linux
|
|
|
|
package daemon
|
|
|
|
import (
|
|
"fmt"
|
|
"io"
|
|
"net"
|
|
"net/url"
|
|
"os"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
)
|
|
|
|
const (
|
|
defaultMaxConns = 256
|
|
connIdleTimeout = 5 * time.Minute
|
|
upstreamDialTimout = 10 * time.Second
|
|
)
|
|
|
|
// Relay is a pure Go TCP relay that forwards connections from local listeners
|
|
// to an upstream SOCKS5 proxy address. It does NOT implement the SOCKS5 protocol;
|
|
// it blindly forwards bytes between the local connection and the upstream proxy.
|
|
type Relay struct {
|
|
listeners []net.Listener // both IPv4 and IPv6 listeners
|
|
targetAddr string // external SOCKS5 proxy host:port
|
|
port int // assigned port
|
|
wg sync.WaitGroup
|
|
done chan struct{}
|
|
debug bool
|
|
maxConns int // max concurrent connections (default 256)
|
|
activeConns atomic.Int32 // current active connections
|
|
}
|
|
|
|
// NewRelay parses a proxy URL to extract host:port and binds listeners on both
|
|
// 127.0.0.1 and [::1] using the same port. The port is dynamically assigned
|
|
// from the first (IPv4) bind. If the IPv6 bind fails, the relay continues
|
|
// with IPv4 only. Binding both addresses prevents IPv6 port squatting attacks.
|
|
func NewRelay(proxyURL string, debug bool) (*Relay, error) {
|
|
u, err := url.Parse(proxyURL)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("invalid proxy URL %q: %w", proxyURL, err)
|
|
}
|
|
|
|
host := u.Hostname()
|
|
port := u.Port()
|
|
if host == "" || port == "" {
|
|
return nil, fmt.Errorf("proxy URL must include host and port: %q", proxyURL)
|
|
}
|
|
targetAddr := net.JoinHostPort(host, port)
|
|
|
|
// Bind IPv4 first to get a dynamically assigned port.
|
|
ipv4Listener, err := net.Listen("tcp4", "127.0.0.1:0")
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to bind IPv4 listener: %w", err)
|
|
}
|
|
|
|
assignedPort := ipv4Listener.Addr().(*net.TCPAddr).Port
|
|
listeners := []net.Listener{ipv4Listener}
|
|
|
|
// Bind IPv6 on the same port. If it fails, log and continue with IPv4 only.
|
|
ipv6Addr := fmt.Sprintf("[::1]:%d", assignedPort)
|
|
ipv6Listener, err := net.Listen("tcp6", ipv6Addr)
|
|
if err != nil {
|
|
if debug {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] IPv6 bind on %s failed, continuing IPv4 only: %v\n", ipv6Addr, err)
|
|
}
|
|
} else {
|
|
listeners = append(listeners, ipv6Listener)
|
|
}
|
|
|
|
if debug {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] Bound %d listener(s) on port %d -> %s\n", len(listeners), assignedPort, targetAddr)
|
|
}
|
|
|
|
return &Relay{
|
|
listeners: listeners,
|
|
targetAddr: targetAddr,
|
|
port: assignedPort,
|
|
done: make(chan struct{}),
|
|
debug: debug,
|
|
maxConns: defaultMaxConns,
|
|
}, nil
|
|
}
|
|
|
|
// Port returns the local port the relay is listening on.
|
|
func (r *Relay) Port() int {
|
|
return r.port
|
|
}
|
|
|
|
// Start begins accepting connections on all listeners. Each accepted connection
|
|
// is handled in its own goroutine with bidirectional forwarding to the upstream
|
|
// proxy address. Start returns immediately; use Stop to shut down.
|
|
func (r *Relay) Start() error {
|
|
for _, ln := range r.listeners {
|
|
r.wg.Add(1)
|
|
go r.acceptLoop(ln)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Stop gracefully shuts down the relay by closing all listeners and waiting
|
|
// for in-flight connections to finish.
|
|
func (r *Relay) Stop() {
|
|
close(r.done)
|
|
for _, ln := range r.listeners {
|
|
_ = ln.Close()
|
|
}
|
|
r.wg.Wait()
|
|
}
|
|
|
|
// acceptLoop runs the accept loop for a single listener.
|
|
func (r *Relay) acceptLoop(ln net.Listener) {
|
|
defer r.wg.Done()
|
|
|
|
for {
|
|
conn, err := ln.Accept()
|
|
if err != nil {
|
|
select {
|
|
case <-r.done:
|
|
return
|
|
default:
|
|
}
|
|
// Transient accept error; continue.
|
|
if r.debug {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] Accept error: %v\n", err)
|
|
}
|
|
continue
|
|
}
|
|
|
|
r.wg.Add(1)
|
|
go r.handleConn(conn)
|
|
}
|
|
}
|
|
|
|
// handleConn handles a single accepted connection by dialing the upstream
|
|
// proxy and performing bidirectional byte forwarding.
|
|
func (r *Relay) handleConn(local net.Conn) {
|
|
defer r.wg.Done()
|
|
|
|
remoteAddr := local.RemoteAddr().String()
|
|
|
|
// Enforce max concurrent connections.
|
|
current := r.activeConns.Add(1)
|
|
if int(current) > r.maxConns {
|
|
r.activeConns.Add(-1)
|
|
if r.debug {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] Connection from %s rejected: max connections (%d) reached\n", remoteAddr, r.maxConns)
|
|
}
|
|
_ = local.Close()
|
|
return
|
|
}
|
|
defer r.activeConns.Add(-1)
|
|
|
|
if r.debug {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] Connection accepted from %s\n", remoteAddr)
|
|
}
|
|
|
|
// Dial the upstream proxy.
|
|
upstream, err := net.DialTimeout("tcp", r.targetAddr, upstreamDialTimout)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] WARNING: upstream connect to %s failed: %v\n", r.targetAddr, err)
|
|
_ = local.Close()
|
|
return
|
|
}
|
|
|
|
if r.debug {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] Upstream connected: %s -> %s\n", remoteAddr, r.targetAddr)
|
|
}
|
|
|
|
// Bidirectional copy with proper TCP half-close.
|
|
var (
|
|
localToUpstream int64
|
|
upstreamToLocal int64
|
|
copyWg sync.WaitGroup
|
|
)
|
|
|
|
copyWg.Add(2)
|
|
|
|
// local -> upstream
|
|
go func() {
|
|
defer copyWg.Done()
|
|
localToUpstream = r.copyWithHalfClose(upstream, local)
|
|
}()
|
|
|
|
// upstream -> local
|
|
go func() {
|
|
defer copyWg.Done()
|
|
upstreamToLocal = r.copyWithHalfClose(local, upstream)
|
|
}()
|
|
|
|
copyWg.Wait()
|
|
_ = local.Close()
|
|
_ = upstream.Close()
|
|
|
|
if r.debug {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] Connection closed %s (sent=%d recv=%d)\n", remoteAddr, localToUpstream, upstreamToLocal)
|
|
}
|
|
}
|
|
|
|
// copyWithHalfClose copies data from src to dst, setting an idle timeout on
|
|
// each read. When the source reaches EOF, it signals a TCP half-close on dst
|
|
// via CloseWrite (if available) rather than a full Close.
|
|
func (r *Relay) copyWithHalfClose(dst, src net.Conn) int64 {
|
|
buf := make([]byte, 32*1024)
|
|
var written int64
|
|
|
|
for {
|
|
// Reset idle timeout before each read.
|
|
if err := src.SetReadDeadline(time.Now().Add(connIdleTimeout)); err != nil {
|
|
break
|
|
}
|
|
|
|
nr, readErr := src.Read(buf)
|
|
if nr > 0 {
|
|
// Reset write deadline for each write.
|
|
if err := dst.SetWriteDeadline(time.Now().Add(connIdleTimeout)); err != nil {
|
|
break
|
|
}
|
|
nw, writeErr := dst.Write(buf[:nr])
|
|
written += int64(nw)
|
|
if writeErr != nil {
|
|
break
|
|
}
|
|
if nw != nr {
|
|
break
|
|
}
|
|
}
|
|
if readErr != nil {
|
|
// Source hit EOF or error: signal half-close on destination.
|
|
if tcpDst, ok := dst.(*net.TCPConn); ok {
|
|
_ = tcpDst.CloseWrite()
|
|
}
|
|
if readErr != io.EOF {
|
|
// Unexpected error; connection may have timed out.
|
|
if r.debug {
|
|
fmt.Fprintf(os.Stderr, "[greywall:relay] Copy error: %v\n", readErr)
|
|
}
|
|
}
|
|
break
|
|
}
|
|
}
|
|
|
|
return written
|
|
}
|