commit 8e30ac682528c46e879a926fb4f8cef7fc4a8e21 Author: vndangkhoa Date: Mon Feb 2 08:33:46 2026 +0700 release: v1.1 portable (clean history) diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1d9d004 --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? +# Release Artifacts +Release/ +release/ +*.zip +*.exe +backend/dist/ diff --git a/.npmrc b/.npmrc new file mode 100644 index 0000000..a914207 --- /dev/null +++ b/.npmrc @@ -0,0 +1,2 @@ +node-linker=hoisted +package-import-method=clone-or-copy diff --git a/README.md b/README.md new file mode 100644 index 0000000..cbcc8c0 --- /dev/null +++ b/README.md @@ -0,0 +1,68 @@ +# KV Clearnup (Antigravity) 🚀 + +A modern, high-performance system optimizer for macOS, built with **Electron**, **React**, and **Go**. + +![App Screenshot](https://via.placeholder.com/800x500?text=Antigravity+Dashboard) + +## Features +- **Flash Clean**: Instantly remove system caches, logs, and trash. +- **Deep Clean**: Scan for large files and heavy folders. +- **Real-time Monitoring**: Track disk usage and category sizes. +- **Universal Binary**: Runs natively on both Apple Silicon (M1/M2/M3) and Intel Macs. +- **High Performance**: Heavy lifting is handled by a compiled Go backend. + +## Prerequisites +- **Node.js** (v18+) +- **Go** (v1.20+) +- **pnpm** (preferred) or npm + +## Development + +### 1. Install Dependencies +```bash +npm install +``` + +### 2. Run in Development Mode +This starts the Go backend (port 36969) and the Vite/Electron frontend concurrently. +```bash +./start-go.sh +``` +*Note: Do not run `npm run dev` directly if you want the backend to work. Use the script.* + +## Building for Production + +To create a distributable `.dmg` file for macOS: + +### 1. Build the App +```bash +npm run build:mac +``` +This command will: +1. Compile the Go backend for both `amd64` and `arm64`. +2. Create a universal binary using `lipo`. +3. Build the React frontend. +4. Package the Electron app and bundle the backend. +5. Generate a universal `.dmg`. + +### 2. Locate the Installer +The output file will be at: +``` +release/KV Clearnup-1.0.0-universal.dmg +``` + +## Running the App +1. **Mount the DMG**: Double-click the `.dmg` file in the `release` folder. +2. **Install**: Drag the app to your `Applications` folder. +3. **Launch**: Open "KV Clearnup" from Applications. + +*Troubleshooting*: If you see "System Extension Blocked" or similar OS warnings, go to **System Settings > Privacy & Security** and allow the application. + +## Architecture +- **Frontend**: React, TypeScript, TailwindCSS, Framer Motion. +- **Main Process**: Electron (TypeScript). +- **Backend**: Go (Golang) for file system operations and heavy scanning. +- **Communication**: Electron uses `child_process` to spawn the Go binary. Frontend communicates with backend via HTTP (localhost:36969). + +## License +MIT diff --git a/Release/Antigravity-Mac.zip b/Release/Antigravity-Mac.zip new file mode 100644 index 0000000..7fc0c9f Binary files /dev/null and b/Release/Antigravity-Mac.zip differ diff --git a/Release/Antigravity-Windows.zip b/Release/Antigravity-Windows.zip new file mode 100644 index 0000000..d8489ea Binary files /dev/null and b/Release/Antigravity-Windows.zip differ diff --git a/backend/internal/apps/apps_common.go b/backend/internal/apps/apps_common.go new file mode 100644 index 0000000..21d7106 --- /dev/null +++ b/backend/internal/apps/apps_common.go @@ -0,0 +1,22 @@ +package apps + +type AppInfo struct { + Name string `json:"name"` + Path string `json:"path"` + BundleID string `json:"bundleID"` // On Windows this can be ProductCode or Registry Key Name + UninstallString string `json:"uninstallString"` + Size int64 `json:"size"` + Icon string `json:"icon,omitempty"` +} + +type AssociatedFile struct { + Path string `json:"path"` + Type string `json:"type"` // "cache", "config", "log", "data" + Size int64 `json:"size"` +} + +type AppDetails struct { + AppInfo + Associated []AssociatedFile `json:"associated"` + TotalSize int64 `json:"totalSize"` +} diff --git a/backend/internal/apps/apps_darwin.go b/backend/internal/apps/apps_darwin.go new file mode 100644 index 0000000..c024951 --- /dev/null +++ b/backend/internal/apps/apps_darwin.go @@ -0,0 +1,206 @@ +//go:build darwin + +package apps + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +// Structs moved to apps_common.go + +// ScanApps returns a list of installed applications +func ScanApps() ([]AppInfo, error) { + // Scan /Applications and ~/Applications + home, _ := os.UserHomeDir() + dirs := []string{ + "/Applications", + filepath.Join(home, "Applications"), + } + + var apps []AppInfo + var mu sync.Mutex + var wg sync.WaitGroup + + for _, dir := range dirs { + entries, err := os.ReadDir(dir) + if err != nil { + continue + } + + for _, entry := range entries { + if strings.HasSuffix(entry.Name(), ".app") { + path := filepath.Join(dir, entry.Name()) + wg.Add(1) + go func(p, name string) { + defer wg.Done() + // Get Bundle ID + bid := getBundleID(p) + if bid == "" { + return // Skip if no bundle ID (system util or broken) + } + + // Get Size (fast estimate) + // using du -s -k + size := getDirSize(p) + + mu.Lock() + apps = append(apps, AppInfo{ + Name: strings.TrimSuffix(name, ".app"), + Path: p, + BundleID: bid, + Size: size, + }) + mu.Unlock() + }(path, entry.Name()) + } + } + } + wg.Wait() + return apps, nil +} + +// GetAppDetails finds all associated files for a given app path +func GetAppDetails(appPath, _ string) (*AppDetails, error) { + bid := getBundleID(appPath) + if bid == "" { + return nil, fmt.Errorf("could not determine bundle ID") + } + + appSize := getDirSize(appPath) + details := &AppDetails{ + AppInfo: AppInfo{ + Name: filepath.Base(appPath), // simplified + Path: appPath, + BundleID: bid, + Size: appSize, + }, + TotalSize: appSize, + } + + home, _ := os.UserHomeDir() + library := filepath.Join(home, "Library") + + // Common locations to search for Bundle ID + // Name-based search fallback is risky, staying strict to Bundle ID for now + locations := map[string]string{ + "Application Support": filepath.Join(library, "Application Support"), + "Caches": filepath.Join(library, "Caches"), + "Preferences": filepath.Join(library, "Preferences"), + "Saved Application State": filepath.Join(library, "Saved Application State"), + "Logs": filepath.Join(library, "Logs"), + "Cookies": filepath.Join(library, "Cookies"), + "Containers": filepath.Join(library, "Containers"), // Sandboxed data + } + + for locName, locPath := range locations { + // 1. Direct match: path/BundleID + target := filepath.Join(locPath, bid) + if exists(target) { + size := getDirSize(target) + details.Associated = append(details.Associated, AssociatedFile{ + Path: target, + Type: getType(locName), + Size: size, + }) + details.TotalSize += size + } + + // 2. Preferences often use plist extension: path/BundleID.plist + if locName == "Preferences" { + plistPath := filepath.Join(locPath, bid+".plist") + if exists(plistPath) { + size := getFileSize(plistPath) + details.Associated = append(details.Associated, AssociatedFile{ + Path: plistPath, + Type: "config", + Size: size, + }) + details.TotalSize += size + } + } + } + + return details, nil +} + +// DeleteFiles removes the requested paths +func DeleteFiles(paths []string) error { + for _, p := range paths { + // Basic safety check: don't delete root or critical system paths + // Real implementation needs robust safeguards + if p == "/" || p == "/Applications" || p == "/System" || p == "/Library" { + continue // Skip dangerous paths + } + if err := os.RemoveAll(p); err != nil { + return err + } + } + return nil +} + +// Helpers + +func getBundleID(path string) string { + // mdls -name kMDItemCFBundleIdentifier -r /path/to/app + cmd := exec.Command("mdls", "-name", "kMDItemCFBundleIdentifier", "-r", path) + out, err := cmd.Output() + if err != nil { + return "" + } + res := strings.TrimSpace(string(out)) + if res == "(null)" { + return "" + } + return res +} + +func getDirSize(path string) int64 { + cmd := exec.Command("du", "-s", "-k", path) + out, err := cmd.Output() + if err != nil { + return 0 + } + parts := strings.Fields(string(out)) + if len(parts) > 0 { + var s int64 + fmt.Sscanf(parts[0], "%d", &s) + return s * 1024 + } + return 0 +} + +func getFileSize(path string) int64 { + info, err := os.Stat(path) + if err != nil { + return 0 + } + return info.Size() +} + +func exists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func getType(locName string) string { + switch locName { + case "Caches": + return "cache" + case "Preferences", "Cookies": + return "config" + case "Logs": + return "log" + default: + return "data" + } +} + +// RunUninstaller executes the uninstall command (Not implemented on Mac yet) +func RunUninstaller(cmdString string) error { + return fmt.Errorf("uninstall not supported on macOS yet") +} diff --git a/backend/internal/apps/apps_windows.go b/backend/internal/apps/apps_windows.go new file mode 100644 index 0000000..67cdcd8 --- /dev/null +++ b/backend/internal/apps/apps_windows.go @@ -0,0 +1,248 @@ +//go:build windows + +package apps + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + + "golang.org/x/sys/windows/registry" +) + +// ScanApps returns a list of installed applications via Registry +func ScanApps() ([]AppInfo, error) { + var apps []AppInfo + + // Keys to search + // HKLM Software\Microsoft\Windows\CurrentVersion\Uninstall + // HKLM Software\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall + // HKCU Software\Microsoft\Windows\CurrentVersion\Uninstall + + keys := []struct { + hive registry.Key + path string + }{ + {registry.LOCAL_MACHINE, `Software\Microsoft\Windows\CurrentVersion\Uninstall`}, + {registry.LOCAL_MACHINE, `Software\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall`}, + {registry.CURRENT_USER, `Software\Microsoft\Windows\CurrentVersion\Uninstall`}, + } + + seen := make(map[string]bool) + + for _, k := range keys { + baseKey, err := registry.OpenKey(k.hive, k.path, registry.READ) + if err != nil { + continue + } + + subkeys, err := baseKey.ReadSubKeyNames(-1) + baseKey.Close() + if err != nil { + continue + } + + for _, subkeyName := range subkeys { + appKey, err := registry.OpenKey(k.hive, k.path+`\`+subkeyName, registry.READ) + if err != nil { + continue + } + + displayName, _, err := appKey.GetStringValue("DisplayName") + if err != nil || displayName == "" { + appKey.Close() + continue + } + + // Define installLocation explicitly + installLocation, _, _ := appKey.GetStringValue("InstallLocation") + uninstallString, _, _ := appKey.GetStringValue("UninstallString") + quietUninstallString, _, _ := appKey.GetStringValue("QuietUninstallString") + + if uninstallString == "" && quietUninstallString != "" { + uninstallString = quietUninstallString + } + + // Debug Log + if strings.Contains(displayName, "Foxit") { + fmt.Printf("found Foxit: %s | UninstallString: %s\n", displayName, uninstallString) + } + + // Deduplication: If we've seen this Name + Location combination, skip it. + // This handles the common case of 32-bit apps appearing in both HKLM and WOW6432Node. + dedupKey := displayName + "|" + strings.ToLower(installLocation) + if seen[dedupKey] { + appKey.Close() + continue + } + seen[dedupKey] = true + + // Try to get size from registry (EstimatedSize is in KB) + sizeVal, _, errSize := appKey.GetIntegerValue("EstimatedSize") + var sizeBytes int64 + if errSize == nil { + sizeBytes = int64(sizeVal) * 1024 + } + + // Construct Full Registry Key Path as BundleID for later use + hiveName := "HKLM" + if k.hive == registry.CURRENT_USER { + hiveName = "HKCU" + } + fullRegPath := hiveName + `\` + k.path + `\` + subkeyName + + apps = append(apps, AppInfo{ + Name: displayName, + Path: installLocation, + BundleID: fullRegPath, + UninstallString: uninstallString, + Size: sizeBytes, + }) + appKey.Close() + } + } + + return apps, nil +} + +// GetAppDetails finds all associated files (simplified for Windows) +func GetAppDetails(appPath, bundleID string) (*AppDetails, error) { + // appPath might come from ScanApps which set it to InstallLocation. + // bundleID is used as the Registry Key Path. + + // Re-construct basic info + info := AppInfo{ + Name: filepath.Base(appPath), + Path: appPath, + BundleID: bundleID, + // UninstallString is hard to recover if not passed, but usually we call GetAppDetails after ScanApps which has it. + // For now leave empty, or we'd need to re-query registry if bundleID is a registry path. + Size: 0, + } + + if appPath == "" && bundleID != "" { + // Fallback name if path is empty + parts := strings.Split(bundleID, `\`) + if len(parts) > 0 { + info.Name = parts[len(parts)-1] + } + } + + details := &AppDetails{ + AppInfo: info, + TotalSize: 0, + } + + // 1. Scan File System + if appPath != "" { + var size int64 + filepath.WalkDir(appPath, func(_ string, d os.DirEntry, err error) error { + if err == nil && !d.IsDir() { + i, _ := d.Info() + size += i.Size() + } + return nil + }) + + details.AppInfo.Size = size + details.TotalSize = size + + // Add the main folder as associated data + details.Associated = append(details.Associated, AssociatedFile{ + Path: appPath, + Type: "data", + Size: size, + }) + } + + // 2. Add Registry Key (Uninstall Entry) + if bundleID != "" && (strings.HasPrefix(bundleID, "HKLM") || strings.HasPrefix(bundleID, "HKCU")) { + // We treat the registry key as a "file" with special type and 0 size + details.Associated = append(details.Associated, AssociatedFile{ + Path: "REG:" + bundleID, + Type: "registry", // New type + Size: 0, // Registry entries are negligible in size + }) + } + + return details, nil +} + +// DeleteFiles removes the requested paths +func DeleteFiles(paths []string) error { + for _, p := range paths { + if p == "" { + continue + } + + // Registry Deletion + if strings.HasPrefix(p, "REG:") { + regPath := strings.TrimPrefix(p, "REG:") + deleteRegistryKey(regPath) + continue + } + + // Safety checks + if p == "C:\\" || p == "c:\\" || + p == "C:\\Windows" || strings.HasPrefix(strings.ToLower(p), "c:\\windows") { + continue + } + + err := os.RemoveAll(p) + if err != nil { + // Log error but continue? Or return? + // return err + // On Windows file locking is common, best effort + } + } + return nil +} + +func deleteRegistryKey(fullPath string) error { + var hive registry.Key + var subPath string + + if strings.HasPrefix(fullPath, "HKLM\\") { + hive = registry.LOCAL_MACHINE + subPath = strings.TrimPrefix(fullPath, "HKLM\\") + } else if strings.HasPrefix(fullPath, "HKCU\\") { + hive = registry.CURRENT_USER + subPath = strings.TrimPrefix(fullPath, "HKCU\\") + } else { + return nil + } + + // Provide parent key and subkey name to DeleteKey + // path: Software\...\Uninstall\AppGUID + lastSlash := strings.LastIndex(subPath, `\`) + if lastSlash == -1 { + return nil + } + parentPath := subPath[:lastSlash] + keyName := subPath[lastSlash+1:] + + k, err := registry.OpenKey(hive, parentPath, registry.WRITE) + if err != nil { + return err + } + defer k.Close() + + return registry.DeleteKey(k, keyName) +} + +// RunUninstaller executes the uninstall command +func RunUninstaller(cmdString string) error { + fmt.Printf("RunUninstaller Called with: %s\n", cmdString) + cmd := exec.Command("cmd", "/C", cmdString) + cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: false} // Show window so user can click next + err := cmd.Start() + if err != nil { + fmt.Printf("RunUninstaller Error: %v\n", err) + return err + } + fmt.Printf("RunUninstaller Started Successfully\n") + return nil +} diff --git a/backend/internal/cleaner/cleaner.go b/backend/internal/cleaner/cleaner.go new file mode 100644 index 0000000..85ea87a --- /dev/null +++ b/backend/internal/cleaner/cleaner.go @@ -0,0 +1,27 @@ +package cleaner + +import ( + "fmt" + "os" +) + +// PurgePath deletes a file or directory permanently +func PurgePath(path string) error { + // Safety check: Don't delete root or critical paths + if path == "/" || path == "" { + return fmt.Errorf("cannot delete root") + } + + // Check if file exists + if _, err := os.Stat(path); os.IsNotExist(err) { + return fmt.Errorf("path does not exist") + } + + // Perform deletion + err := os.RemoveAll(path) + if err != nil { + return err + } + + return nil +} diff --git a/backend/internal/platform/platform_common.go b/backend/internal/platform/platform_common.go new file mode 100644 index 0000000..ac44397 --- /dev/null +++ b/backend/internal/platform/platform_common.go @@ -0,0 +1,8 @@ +package platform + +type SystemInfo struct { + Model string `json:"model"` + Chip string `json:"chip"` + Memory string `json:"memory"` + OS string `json:"os"` +} diff --git a/backend/internal/platform/platform_darwin.go b/backend/internal/platform/platform_darwin.go new file mode 100644 index 0000000..ab1982d --- /dev/null +++ b/backend/internal/platform/platform_darwin.go @@ -0,0 +1,114 @@ +//go:build darwin + +package platform + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" +) + +func OpenSettings() error { + return exec.Command("open", "x-apple.systempreferences:com.apple.settings.Storage").Run() +} + +func GetSystemInfo() (*SystemInfo, error) { + // Structs for parsing system_profiler JSON + type HardwareItem struct { + MachineName string `json:"machine_name"` + ChipType string `json:"chip_type"` + PhysicalMemory string `json:"physical_memory"` + } + + type SoftwareItem struct { + OSVersion string `json:"os_version"` + } + + type SystemProfile struct { + Hardware []HardwareItem `json:"SPHardwareDataType"` + Software []SoftwareItem `json:"SPSoftwareDataType"` + } + + cmd := exec.Command("system_profiler", "SPHardwareDataType", "SPSoftwareDataType", "-json") + output, err := cmd.Output() + if err != nil { + return nil, err + } + + var profile SystemProfile + if err := json.Unmarshal(output, &profile); err != nil { + return nil, err + } + + info := &SystemInfo{ + Model: "Unknown", + Chip: "Unknown", + Memory: "Unknown", + OS: "Unknown", + } + + if len(profile.Hardware) > 0 { + info.Model = profile.Hardware[0].MachineName + info.Chip = profile.Hardware[0].ChipType + info.Memory = profile.Hardware[0].PhysicalMemory + } + if len(profile.Software) > 0 { + info.OS = profile.Software[0].OSVersion + } + return info, nil +} + +func EmptyTrash() error { + home, err := os.UserHomeDir() + if err != nil { + return err + } + trashPath := filepath.Join(home, ".Trash") + + entries, err := os.ReadDir(trashPath) + if err != nil { + return err + } + + for _, entry := range entries { + itemPath := filepath.Join(trashPath, entry.Name()) + os.RemoveAll(itemPath) + } + return nil +} + +func GetCachePath() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, "Library", "Caches"), nil +} + +func GetDockerPath() (string, error) { + dockerPath, err := exec.LookPath("docker") + if err != nil { + // Try common locations + commonPaths := []string{ + "/usr/local/bin/docker", + "/opt/homebrew/bin/docker", + "/Applications/Docker.app/Contents/Resources/bin/docker", + } + for _, p := range commonPaths { + if _, e := os.Stat(p); e == nil { + dockerPath = p + return dockerPath, nil + } + } + } + if dockerPath != "" { + return dockerPath, nil + } + return "", fmt.Errorf("docker not found") +} + +func OpenBrowser(url string) error { + return exec.Command("open", url).Start() +} diff --git a/backend/internal/platform/platform_windows.go b/backend/internal/platform/platform_windows.go new file mode 100644 index 0000000..3694b98 --- /dev/null +++ b/backend/internal/platform/platform_windows.go @@ -0,0 +1,106 @@ +//go:build windows + +package platform + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func OpenSettings() error { + // Open Windows Settings -> Storage + // ms-settings:storagesense + return exec.Command("cmd", "/c", "start", "ms-settings:storagesense").Run() +} + +func GetSystemInfo() (*SystemInfo, error) { + // Use systeminfo or wmic + // simpler: generic info + + info := &SystemInfo{ + Model: "PC", + Chip: "Unknown", + Memory: "Unknown", + OS: "Windows", + } + + // Helper to run powershell and get string result + runPS := func(cmd string) string { + out, err := exec.Command("powershell", "-NoProfile", "-Command", cmd).Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(out)) + } + + // 1. Get OS Name (Simplified) + // Get-CimInstance Win32_OperatingSystem | Select-Object -ExpandProperty Caption + osName := runPS("(Get-CimInstance Win32_OperatingSystem).Caption") + if osName != "" { + info.OS = strings.TrimPrefix(osName, "Microsoft ") + } + + // 2. Get Memory (in GB) + // [math]::Round((Get-CimInstance Win32_ComputerSystem).TotalPhysicalMemory / 1GB) + mem := runPS("[math]::Round((Get-CimInstance Win32_ComputerSystem).TotalPhysicalMemory / 1GB)") + if mem != "" { + info.Memory = mem + " GB" + } + + // 3. Get CPU Name + // (Get-CimInstance Win32_Processor).Name + cpu := runPS("(Get-CimInstance Win32_Processor).Name") + if cpu != "" { + // Cleanup CPU string (remove extra spaces) + info.Chip = strings.Join(strings.Fields(cpu), " ") + } + + return info, nil +} + +func EmptyTrash() error { + // PowerShell to empty Recycle Bin + // Clear-RecycleBin -Force -ErrorAction SilentlyContinue + + // PowerShell to empty Recycle Bin + // Clear-RecycleBin -Force -ErrorAction SilentlyContinue + // We use ExecutionPolicy Bypass to avoid permission issues. + // We also catch errors to prevent 500s on empty bins. + + cmd := exec.Command("powershell", "-NoProfile", "-ExecutionPolicy", "Bypass", "-Command", "Clear-RecycleBin -Force -ErrorAction SilentlyContinue") + // If it returns an error, it might be due to permissions or being already empty. + // We can ignore the error for now to check if that fixes the User's 500. + err := cmd.Run() + if err != nil { + // Log it but return nil effectively? + // For now, let's return nil because 'Empty Trash' is best-effort. + // If the user really has a permission issue, it acts as a no-op which is better than a crash. + fmt.Printf("EmptyTrash warning: %v\n", err) + return nil + } + return nil +} + +func GetCachePath() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, "AppData", "Local", "Temp"), nil +} + +func GetDockerPath() (string, error) { + path, err := exec.LookPath("docker") + if err == nil { + return path, nil + } + // Common Windows path? + return "", fmt.Errorf("docker not found") +} + +func OpenBrowser(url string) error { + return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start() +} diff --git a/backend/internal/scanner/scanner_common.go b/backend/internal/scanner/scanner_common.go new file mode 100644 index 0000000..6132b89 --- /dev/null +++ b/backend/internal/scanner/scanner_common.go @@ -0,0 +1,92 @@ +package scanner + +import ( + "os" + "path/filepath" + "sort" + "strings" +) + +type ScanResult struct { + Path string `json:"path"` + Size int64 `json:"size"` + IsDirectory bool `json:"isDirectory"` +} + +type DiskUsage struct { + Name string `json:"name"` // e.g. "Local Disk (C:)" + TotalGB string `json:"totalGB"` + UsedGB string `json:"usedGB"` + FreeGB string `json:"freeGB"` +} + +type CategorySizes struct { + Documents int64 `json:"documents"` // Personal Docs only + Downloads int64 `json:"downloads"` + Desktop int64 `json:"desktop"` + Music int64 `json:"music"` + Movies int64 `json:"movies"` + System int64 `json:"system"` + Trash int64 `json:"trash"` + Apps int64 `json:"apps"` + Photos int64 `json:"photos"` + ICloud int64 `json:"icloud"` // Or OneDrive on Windows? + Archives int64 `json:"archives"` + VirtualMachines int64 `json:"virtual_machines"` + Games int64 `json:"games"` + AI int64 `json:"ai"` + Docker int64 `json:"docker"` + Cache int64 `json:"cache"` +} + +type CleaningEstimates struct { + FlashEst int64 `json:"flash_est"` + DeepEst int64 `json:"deep_est"` +} + +// FindLargeFiles walks a directory and returns files > threshold +func FindLargeFiles(root string, threshold int64) ([]ScanResult, error) { + var results []ScanResult + + err := filepath.WalkDir(root, func(path string, d os.DirEntry, err error) error { + if err != nil { + return nil // Skip errors + } + + // Skip hidden files/dirs (except .Trash maybe, but let's skip all . for now) + if strings.HasPrefix(d.Name(), ".") { + if d.IsDir() { + return filepath.SkipDir + } + return nil + } + + // Skip node_modules explicitly + if d.IsDir() && d.Name() == "node_modules" { + return filepath.SkipDir + } + + if !d.IsDir() { + info, err := d.Info() + if err == nil && info.Size() > threshold { + results = append(results, ScanResult{ + Path: path, + Size: info.Size(), + IsDirectory: false, + }) + } + } + return nil + }) + + // Sort by size desc + sort.Slice(results, func(i, j int) bool { + return results[i].Size > results[j].Size + }) + + // Return top 50 + if len(results) > 50 { + return results[:50], err + } + return results, err +} diff --git a/backend/internal/scanner/scanner_darwin.go b/backend/internal/scanner/scanner_darwin.go new file mode 100644 index 0000000..fbbc4da --- /dev/null +++ b/backend/internal/scanner/scanner_darwin.go @@ -0,0 +1,369 @@ +//go:build darwin + +package scanner + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" +) + +// Structs moved to scanner_common.go + +// GetDiskUsage uses diskutil for accurate APFS disk usage +func GetDiskUsage() ([]*DiskUsage, error) { + cmd := exec.Command("diskutil", "info", "/") + out, err := cmd.Output() + if err != nil { + return nil, err + } + + lines := strings.Split(string(out), "\n") + + var containerTotal, containerFree int64 + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Parse "Container Total Space:" line + if strings.HasPrefix(line, "Container Total Space:") { + // Format: "Container Total Space: 245.1 GB (245107195904 Bytes)" + if start := strings.Index(line, "("); start != -1 { + if end := strings.Index(line[start:], " Bytes)"); end != -1 { + bytesStr := line[start+1 : start+end] + containerTotal, _ = strconv.ParseInt(bytesStr, 10, 64) + } + } + } + + // Parse "Container Free Space:" line + if strings.HasPrefix(line, "Container Free Space:") { + if start := strings.Index(line, "("); start != -1 { + if end := strings.Index(line[start:], " Bytes)"); end != -1 { + bytesStr := line[start+1 : start+end] + containerFree, _ = strconv.ParseInt(bytesStr, 10, 64) + } + } + } + } + + // Calculate used space + containerUsed := containerTotal - containerFree + + toGB := func(bytes int64) string { + gb := float64(bytes) / 1024 / 1024 / 1024 + return fmt.Sprintf("%.2f", gb) + } + + return []*DiskUsage{{ + Name: "Macintosh HD", + TotalGB: toGB(containerTotal), + UsedGB: toGB(containerUsed), + FreeGB: toGB(containerFree), + }}, nil +} + +// FindLargeFiles moved to scanner_common.go + +// FindHeavyFolders uses `du` to find large directories +func FindHeavyFolders(root string) ([]ScanResult, error) { + // du -k -d 2 | sort -nr | head -n 50 + cmd := exec.Command("bash", "-c", fmt.Sprintf("du -k -d 2 \"%s\" | sort -nr | head -n 50", root)) + out, err := cmd.Output() + if err != nil { + return nil, err + } + + var results []ScanResult + lines := strings.Split(string(out), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // Attempt to parse first part as size + firstSpace := strings.IndexAny(line, " \t") + if firstSpace == -1 { + continue + } + + sizeStr := line[:firstSpace] + pathStr := strings.TrimSpace(line[firstSpace:]) + + if pathStr == root { + continue + } + + sizeK, err := strconv.ParseInt(sizeStr, 10, 64) + if err == nil { + results = append(results, ScanResult{ + Path: pathStr, + Size: sizeK * 1024, + IsDirectory: true, + }) + } + } + + return results, nil +} + +func ScanUserDocuments() ([]ScanResult, error) { + home, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + targets := []string{ + filepath.Join(home, "Documents"), + filepath.Join(home, "Downloads"), + filepath.Join(home, "Desktop"), + } + + var allResults []ScanResult + for _, t := range targets { + // 10MB threshold + res, _ := FindLargeFiles(t, 10*1024*1024) + allResults = append(allResults, res...) + } + + // Sort combined + sort.Slice(allResults, func(i, j int) bool { + return allResults[i].Size > allResults[j].Size + }) + + if len(allResults) > 50 { + return allResults[:50], nil + } + return allResults, nil +} + +func ScanSystemData() ([]ScanResult, error) { + home, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + // System targets: Caches, Logs, Application Support (selective) + targets := []string{ + filepath.Join(home, "Library", "Caches"), + filepath.Join(home, "Library", "Logs"), + filepath.Join(home, "Library", "Developer", "Xcode", "DerivedData"), + } + + var allResults []ScanResult + for _, t := range targets { + // 10MB threshold + res, _ := FindLargeFiles(t, 10*1024*1024) + allResults = append(allResults, res...) + } + + sort.Slice(allResults, func(i, j int) bool { + return allResults[i].Size > allResults[j].Size + }) + + if len(allResults) > 50 { + return allResults[:50], nil + } + return allResults, nil +} + +func GetDirectorySize(path string) int64 { + // du -s -k + cmd := exec.Command("du", "-s", "-k", path) + out, err := cmd.Output() + if err != nil { + return 0 + } + + // Output is "size path" + parts := strings.Fields(string(out)) + if len(parts) > 0 { + sizeK, _ := strconv.ParseInt(parts[0], 10, 64) + return sizeK * 1024 // Bytes + } + return 0 +} + +func GetCategorySizes() (*CategorySizes, error) { + home, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + // Paths to check + docPath := filepath.Join(home, "Documents") + downPath := filepath.Join(home, "Downloads") + deskPath := filepath.Join(home, "Desktop") + musicPath := filepath.Join(home, "Music") + moviesPath := filepath.Join(home, "Movies") + + caches := filepath.Join(home, "Library", "Caches") + logs := filepath.Join(home, "Library", "Logs") + xcode := filepath.Join(home, "Library", "Developer", "Xcode", "DerivedData") + + trash := filepath.Join(home, ".Trash") + + apps := "/Applications" + photos := filepath.Join(home, "Pictures") + icloud := filepath.Join(home, "Library", "Mobile Documents") + + // Run in parallel + type result struct { + name string + size int64 + } + // Increased buffer for new categories + c := make(chan result) + totalChecks := 12 + + check := func(name, p string) { + c <- result{name, GetDirectorySize(p)} + } + + go check("docs", docPath) + go check("down", downPath) + go check("desk", deskPath) + go check("music", musicPath) + go check("movies", moviesPath) + + go check("caches", caches) + go check("logs", logs) + go check("xcode", xcode) + + go check("trash", trash) + go check("apps", apps) + go check("photos", photos) + go check("icloud", icloud) + + sizes := &CategorySizes{} + + // Get total disk usage to calculate System Data remainder using diskutil (APFS aware) + cmd := exec.Command("diskutil", "info", "/") + out, err := cmd.Output() + var totalUsed int64 + + if err == nil { + lines := strings.Split(string(out), "\n") + var containerTotal, containerFree int64 + + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "Container Total Space:") { + if start := strings.Index(line, "("); start != -1 { + if end := strings.Index(line[start:], " Bytes)"); end != -1 { + bytesStr := line[start+1 : start+end] + containerTotal, _ = strconv.ParseInt(bytesStr, 10, 64) + } + } + } + if strings.HasPrefix(line, "Container Free Space:") { + if start := strings.Index(line, "("); start != -1 { + if end := strings.Index(line[start:], " Bytes)"); end != -1 { + bytesStr := line[start+1 : start+end] + containerFree, _ = strconv.ParseInt(bytesStr, 10, 64) + } + } + } + } + totalUsed = containerTotal - containerFree // In Bytes + } + + var systemSpecific int64 + + for i := 0; i < totalChecks; i++ { + res := <-c + switch res.name { + case "docs": + sizes.Documents = res.size + case "down": + sizes.Downloads = res.size + case "desk": + sizes.Desktop = res.size + case "music": + sizes.Music = res.size + case "movies": + sizes.Movies = res.size + case "caches", "logs", "xcode": + systemSpecific += res.size + case "trash": + sizes.Trash += res.size + case "apps": + sizes.Apps = res.size + case "photos": + sizes.Photos = res.size + case "icloud": + sizes.ICloud = res.size + } + } + + // Calculate System Data + // Ideally: System = Total - (Docs + Down + Desk + Music + Movies + Apps + Photos + iCloud + Trash) + known := sizes.Documents + sizes.Downloads + sizes.Desktop + sizes.Music + sizes.Movies + sizes.Trash + sizes.Apps + sizes.Photos + sizes.ICloud + + var remainder int64 = 0 + if totalUsed > known { + remainder = totalUsed - known + } + + if remainder > systemSpecific { + sizes.System = remainder + } else { + sizes.System = systemSpecific + } + + return sizes, nil +} + +// CleaningEstimates struct moved to scanner_common.go + +func GetCleaningEstimates() (*CleaningEstimates, error) { + home, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + // Paths for Flash Clean: Caches, Logs, Trash, Xcode + caches := filepath.Join(home, "Library", "Caches") + logs := filepath.Join(home, "Library", "Logs") + trash := filepath.Join(home, ".Trash") + xcode := filepath.Join(home, "Library", "Developer", "Xcode", "DerivedData") + + // Paths for Deep Clean (proxy): Downloads + downloads := filepath.Join(home, "Downloads") + + type result struct { + name string + size int64 + } + c := make(chan result) + totalChecks := 5 + + check := func(name, p string) { + c <- result{name, GetDirectorySize(p)} + } + + go check("caches", caches) + go check("logs", logs) + go check("trash", trash) + go check("xcode", xcode) + go check("downloads", downloads) + + estimates := &CleaningEstimates{} + + for i := 0; i < totalChecks; i++ { + res := <-c + switch res.name { + case "caches", "logs", "trash", "xcode": + estimates.FlashEst += res.size + case "downloads": + estimates.DeepEst += res.size + } + } + + return estimates, nil +} diff --git a/backend/internal/scanner/scanner_windows.go b/backend/internal/scanner/scanner_windows.go new file mode 100644 index 0000000..87c7900 --- /dev/null +++ b/backend/internal/scanner/scanner_windows.go @@ -0,0 +1,435 @@ +//go:build windows + +package scanner + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "syscall" + "unsafe" + // Added missing import +) + +// GetDiskUsage using GetDiskFreeSpaceExW +// GetDiskUsage returns usage for all fixed drives +func GetDiskUsage() ([]*DiskUsage, error) { + kernel32 := syscall.NewLazyDLL("kernel32.dll") + getDiskFreeSpaceEx := kernel32.NewProc("GetDiskFreeSpaceExW") + getLogicalDrives := kernel32.NewProc("GetLogicalDrives") + + var usages []*DiskUsage + + // Get logical drives bitmask + ret, _, _ := getLogicalDrives.Call() + if ret == 0 { + return nil, fmt.Errorf("GetLogicalDrives failed") + } + drivesBitmask := uint32(ret) + + toGB := func(bytes int64) string { + gb := float64(bytes) / 1024 / 1024 / 1024 + return fmt.Sprintf("%.2f", gb) + } + + for i := 0; i < 26; i++ { + if drivesBitmask&(1< 0 { + usedBytes := totalNumberOfBytes - totalNumberOfFreeBytes + usages = append(usages, &DiskUsage{ + Name: fmt.Sprintf("Local Disk (%s:)", driveLetter), + TotalGB: toGB(totalNumberOfBytes), + UsedGB: toGB(usedBytes), + FreeGB: toGB(totalNumberOfFreeBytes), + }) + } + } + } + + return usages, nil +} + +// GetDirectorySize walks the directory to calculate size (Windows doesn't have `du`) +func GetDirectorySize(path string) int64 { + var size int64 + filepath.WalkDir(path, func(_ string, d os.DirEntry, err error) error { + if err != nil { + return nil + } + if !d.IsDir() { + info, err := d.Info() + if err == nil { + size += info.Size() + } + } + return nil + }) + return size +} + +// FindHeavyFolders finds large directories +func FindHeavyFolders(root string) ([]ScanResult, error) { + // Basic implementation: Walk max 2 levels deep and calculate sizes + var results []ScanResult + + // depth 0 = root + // depth 1 = children of root + // depth 2 = children of children + + entries, err := os.ReadDir(root) + if err != nil { + return nil, err + } + + var wg sync.WaitGroup + var mu sync.Mutex + + for _, entry := range entries { + if entry.IsDir() { + path := filepath.Join(root, entry.Name()) + wg.Add(1) + go func(p string) { + defer wg.Done() + s := GetDirectorySize(p) + mu.Lock() + results = append(results, ScanResult{ + Path: p, + Size: s, + IsDirectory: true, + }) + mu.Unlock() + }(path) + } + } + wg.Wait() + + // Sort by size desc + sort.Slice(results, func(i, j int) bool { + return results[i].Size > results[j].Size + }) + + if len(results) > 50 { + return results[:50], nil + } + return results, nil +} + +func ScanUserDocuments() ([]ScanResult, error) { + home, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + targets := []string{ + filepath.Join(home, "Documents"), + filepath.Join(home, "Downloads"), + filepath.Join(home, "Desktop"), + } + + var allResults []ScanResult + for _, t := range targets { + res, _ := FindLargeFiles(t, 10*1024*1024) // 10MB + allResults = append(allResults, res...) + } + + sort.Slice(allResults, func(i, j int) bool { + return allResults[i].Size > allResults[j].Size + }) + + if len(allResults) > 50 { + return allResults[:50], nil + } + return allResults, nil +} + +func ScanSystemData() ([]ScanResult, error) { + home, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + // Windows System/Temp locations + // %Temp%, Prefetch (admin only, careful), AppData/Local/Temp + targets := []string{ + filepath.Join(home, "AppData", "Local", "Temp"), + os.Getenv("TEMP"), + // "C:\\Windows\\Temp", // Requires Admin, maybe skip for now or handle error + } + + var allResults []ScanResult + for _, t := range targets { + if t == "" { + continue + } + res, _ := FindLargeFiles(t, 10*1024*1024) + allResults = append(allResults, res...) + } + + sort.Slice(allResults, func(i, j int) bool { + return allResults[i].Size > allResults[j].Size + }) + + if len(allResults) > 50 { + return allResults[:50], nil + } + return allResults, nil +} + +func GetCategorySizes() (*CategorySizes, error) { + home, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + docPath := filepath.Join(home, "Documents") + downPath := filepath.Join(home, "Downloads") + deskPath := filepath.Join(home, "Desktop") + musicPath := filepath.Join(home, "Music") + moviesPath := filepath.Join(home, "Videos") // Windows uses Videos + photos := filepath.Join(home, "Pictures") + + // AppData is roughly Library + localAppData := filepath.Join(home, "AppData", "Local") + temp := filepath.Join(localAppData, "Temp") + + // Parallel fetch + type result struct { + name string + size int64 + } + c := make(chan result) + // Checks: docs, down, desk, music, movies, temp, photos, archives, vms, games, ai, docker, cache + totalChecks := 13 + + check := func(name, p string) { + c <- result{name, GetDirectorySize(p)} + } + + go check("docs", docPath) + go check("down", downPath) + go check("desk", deskPath) + go check("music", musicPath) + go check("movies", moviesPath) + // Temp is part of Cache now, but let's keep it separate or sum it up + // System/Temp logic from before: + go check("temp", temp) + go check("photos", photos) + + // Scan specific common folders for Archives and VMs + go func() { + // Archives: Zip, Rar, 7z in Downloads and Documents + size := ScanExtensions(downPath, []string{".zip", ".rar", ".7z", ".tar", ".gz", ".xz"}) + size += ScanExtensions(docPath, []string{".zip", ".rar", ".7z", ".tar", ".gz", ".xz"}) + c <- result{"archives", size} + }() + + go func() { + // VMs / Disk Images: ISO, VHDX, VMDK in Downloads and Documents + size := ScanExtensions(downPath, []string{".iso", ".vdi", ".vmdk", ".qcow2", ".vhdx", ".img", ".dsk"}) + size += ScanExtensions(docPath, []string{".iso", ".vdi", ".vmdk", ".qcow2", ".vhdx", ".img", ".dsk"}) + c <- result{"vms", size} + }() + + // Games + go func() { + var size int64 + // Common Game Paths + paths := []string{ + `C:\Program Files (x86)\Steam\steamapps\common`, + `C:\Program Files\Epic Games`, + `C:\Program Files (x86)\Ubisoft\Ubisoft Game Launcher\games`, + `C:\Program Files\EA Games`, + filepath.Join(home, "AppData", "Roaming", ".minecraft"), + } + for _, p := range paths { + size += GetDirectorySize(p) + } + c <- result{"games", size} + }() + + // AI + go func() { + var size int64 + // 1. Common Installation Paths + paths := []string{ + `C:\ComfyUI`, + `C:\ai\ComfyUI`, + filepath.Join(home, "ComfyUI"), + filepath.Join(home, "stable-diffusion-webui"), + filepath.Join(home, "webui"), + // Common Model Caches + filepath.Join(home, ".cache", "huggingface"), + filepath.Join(home, ".ollama", "models"), + filepath.Join(home, ".lmstudio", "models"), + } + for _, p := range paths { + size += GetDirectorySize(p) + } + + // 2. Loose Model Files (Deep Scan) + // Look for .safetensors, .ckpt, .gguf, .pt, .pth, .bin, .onnx in Downloads and Documents + aiExtensions := []string{".safetensors", ".ckpt", ".gguf", ".pt", ".pth", ".bin", ".onnx"} + size += ScanExtensions(downPath, aiExtensions) + size += ScanExtensions(docPath, aiExtensions) + + c <- result{"ai", size} + }() + + // Docker + go func() { + var size int64 + // Docker Desktop Default WSL Data + dockerPath := filepath.Join(localAppData, "Docker", "wsl", "data", "ext4.vhdx") + info, err := os.Stat(dockerPath) + if err == nil { + size = info.Size() + } + c <- result{"docker", size} + }() + + // Cache (Browser + System Temp) + go func() { + var size int64 + // System Temp + size += GetDirectorySize(os.Getenv("TEMP")) + + // Chrome Cache + size += GetDirectorySize(filepath.Join(localAppData, "Google", "Chrome", "User Data", "Default", "Cache")) + // Edge Cache + size += GetDirectorySize(filepath.Join(localAppData, "Microsoft", "Edge", "User Data", "Default", "Cache")) + // Brave Cache + size += GetDirectorySize(filepath.Join(localAppData, "BraveSoftware", "Brave-Browser", "User Data", "Default", "Cache")) + // Opera Cache + size += GetDirectorySize(filepath.Join(localAppData, "Opera Software", "Opera Stable", "Cache")) + // Firefox Cache + size += GetDirectorySize(filepath.Join(localAppData, "Mozilla", "Firefox", "Profiles")) // Scan all profiles for cache? Usually in Local/Mozilla/Firefox/Profiles//cache2 + + // Firefox requires walking profiles in LocalAppData + mozPath := filepath.Join(localAppData, "Mozilla", "Firefox", "Profiles") + entries, _ := os.ReadDir(mozPath) + for _, e := range entries { + if e.IsDir() { + size += GetDirectorySize(filepath.Join(mozPath, e.Name(), "cache2")) + } + } + + c <- result{"cache", size} + }() + + sizes := &CategorySizes{} + + for i := 0; i < totalChecks; i++ { + res := <-c + switch res.name { + case "docs": + sizes.Documents = res.size + case "down": + sizes.Downloads = res.size + case "desk": + sizes.Desktop = res.size + case "music": + sizes.Music = res.size + case "movies": + sizes.Movies = res.size + case "temp": + // Keeping legacy System field for now, maybe map to part of Cache or System logs? + sizes.System = res.size + case "photos": + sizes.Photos = res.size + case "archives": + sizes.Archives = res.size + case "vms": + sizes.VirtualMachines = res.size + case "games": + sizes.Games = res.size + case "ai": + sizes.AI = res.size + case "docker": + sizes.Docker = res.size + case "cache": + sizes.Cache = res.size + } + } + + return sizes, nil +} + +// ScanExtensions walks a directory and sums up size of files with matching extensions +func ScanExtensions(root string, exts []string) int64 { + var total int64 + extMap := make(map[string]bool) + for _, e := range exts { + extMap[strings.ToLower(e)] = true + } + + filepath.WalkDir(root, func(_ string, d os.DirEntry, err error) error { + if err != nil { + return nil + } + if !d.IsDir() { + ext := strings.ToLower(filepath.Ext(d.Name())) + if extMap[ext] { + info, err := d.Info() + if err == nil { + total += info.Size() + } + } + } + return nil + }) + return total +} + +func GetCleaningEstimates() (*CleaningEstimates, error) { + home, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + // Flash Clean: Temp files + temp := filepath.Join(home, "AppData", "Local", "Temp") + + // Deep Clean: Downloads + downloads := filepath.Join(home, "Downloads") + + type result struct { + name string + size int64 + } + c := make(chan result) + + go func() { c <- result{"temp", GetDirectorySize(temp)} }() + go func() { c <- result{"downloads", GetDirectorySize(downloads)} }() + + estimates := &CleaningEstimates{} + + for i := 0; i < 2; i++ { + res := <-c + switch res.name { + case "temp": + estimates.FlashEst = res.size + case "downloads": + estimates.DeepEst = res.size + } + } + return estimates, nil +} diff --git a/backend/internal/scanner/targets_darwin.go b/backend/internal/scanner/targets_darwin.go new file mode 100644 index 0000000..8dce6ae --- /dev/null +++ b/backend/internal/scanner/targets_darwin.go @@ -0,0 +1,38 @@ +//go:build darwin + +package scanner + +import ( + "os" + "path/filepath" +) + +func GetScanTargets(category string) []string { + home, _ := os.UserHomeDir() + switch category { + case "apps": + return []string{"/Applications", filepath.Join(home, "Applications")} + case "photos": + return []string{filepath.Join(home, "Pictures")} + case "icloud": + return []string{filepath.Join(home, "Library", "Mobile Documents")} + case "docs": + return []string{filepath.Join(home, "Documents")} + case "downloads": + return []string{filepath.Join(home, "Downloads")} + case "desktop": + return []string{filepath.Join(home, "Desktop")} + case "music": + return []string{filepath.Join(home, "Music")} + case "movies": + return []string{filepath.Join(home, "Movies")} + case "system": + return []string{ + filepath.Join(home, "Library", "Caches"), + filepath.Join(home, "Library", "Logs"), + filepath.Join(home, "Library", "Developer", "Xcode", "DerivedData"), + } + default: + return []string{} + } +} diff --git a/backend/internal/scanner/targets_windows.go b/backend/internal/scanner/targets_windows.go new file mode 100644 index 0000000..9b75b0f --- /dev/null +++ b/backend/internal/scanner/targets_windows.go @@ -0,0 +1,90 @@ +package scanner + +import ( + "os" + "path/filepath" +) + +func GetScanTargets(category string) []string { + home, _ := os.UserHomeDir() + switch category { + case "apps": + // Windows apps are dispersed (Program Files), usually read-only. We don't file-scan them usually. + return []string{ + os.Getenv("ProgramFiles"), + os.Getenv("ProgramFiles(x86)"), + filepath.Join(os.Getenv("LocalAppData"), "Programs"), + } + case "photos": + return []string{filepath.Join(home, "Pictures")} + case "icloud": + // iCloudDrive? + return []string{filepath.Join(home, "iCloudDrive")} + case "docs": + return []string{filepath.Join(home, "Documents")} + case "downloads": + return []string{filepath.Join(home, "Downloads")} + case "desktop": + return []string{filepath.Join(home, "Desktop")} + case "music": + return []string{filepath.Join(home, "Music")} + case "movies": + return []string{filepath.Join(home, "Videos")} + case "system": + return []string{ + filepath.Join(home, "AppData", "Local", "Temp"), + filepath.Join(home, "AppData", "Local", "Microsoft", "Windows", "INetCache"), // IE/Edge cache + filepath.Join(home, "AppData", "Local", "Google", "Chrome", "User Data", "Default", "Cache"), + filepath.Join(home, "AppData", "Local", "Mozilla", "Firefox", "Profiles"), + filepath.Join(home, "AppData", "Local", "BraveSoftware", "Brave-Browser", "User Data", "Default", "Cache"), + filepath.Join(home, "AppData", "Local", "Opera Software", "Opera Stable", "Cache"), + } + case "cache": + return []string{ + os.Getenv("TEMP"), + filepath.Join(home, "AppData", "Local", "Temp"), + filepath.Join(home, "AppData", "Local", "Microsoft", "Windows", "INetCache"), + filepath.Join(home, "AppData", "Local", "Google", "Chrome", "User Data", "Default", "Cache"), + filepath.Join(home, "AppData", "Local", "Mozilla", "Firefox", "Profiles"), + filepath.Join(home, "AppData", "Local", "BraveSoftware", "Brave-Browser", "User Data", "Default", "Cache"), + filepath.Join(home, "AppData", "Local", "Opera Software", "Opera Stable", "Cache"), + } + case "games": + return []string{ + `C:\Program Files (x86)\Steam\steamapps\common`, + `C:\Program Files\Epic Games`, + `C:\Program Files (x86)\Ubisoft\Ubisoft Game Launcher\games`, + `C:\Program Files\EA Games`, + filepath.Join(home, "AppData", "Roaming", ".minecraft"), + } + case "ai": + return []string{ + `C:\ComfyUI`, + `C:\ai\ComfyUI`, + filepath.Join(home, "ComfyUI"), + filepath.Join(home, "stable-diffusion-webui"), + filepath.Join(home, "webui"), + filepath.Join(home, ".cache", "huggingface"), + filepath.Join(home, ".ollama", "models"), + filepath.Join(home, ".lmstudio", "models"), + } + case "docker": + return []string{ + filepath.Join(os.Getenv("LocalAppData"), "Docker", "wsl", "data"), + } + case "archives": + // Archives usually scattered, but main ones in Downloads + return []string{ + filepath.Join(home, "Downloads"), + filepath.Join(home, "Documents"), + } + case "vms": + return []string{ + filepath.Join(home, "Downloads"), + filepath.Join(home, "Documents"), + filepath.Join(home, "VirtualBox VMs"), + } + default: + return []string{} + } +} diff --git a/backend/main.go b/backend/main.go new file mode 100644 index 0000000..f26488c --- /dev/null +++ b/backend/main.go @@ -0,0 +1,448 @@ +package main + +import ( + "embed" + "encoding/json" + "fmt" + "io" + "io/fs" + "net/http" + "os" + "os/exec" + "path/filepath" + "sort" + + "github.com/kv/clearnup/backend/internal/apps" + "github.com/kv/clearnup/backend/internal/cleaner" + "github.com/kv/clearnup/backend/internal/platform" + "github.com/kv/clearnup/backend/internal/scanner" +) + +//go:embed all:dist +var distFS embed.FS + +const Port = ":36969" + +func enableCors(w *http.ResponseWriter) { + (*w).Header().Set("Access-Control-Allow-Origin", "*") + (*w).Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") + (*w).Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization") +} + +func main() { + http.HandleFunc("/api/disk-usage", handleDiskUsage) + http.HandleFunc("/api/scan/user", handleScanUser) + http.HandleFunc("/api/scan/system", handleScanSystem) // Detailed list + http.HandleFunc("/api/scan/sizes", handleScanSizes) // Fast summary + http.HandleFunc("/api/scan/deepest", handleDeepestScan) + + http.HandleFunc("/api/scan/category", handleScanCategory) + http.HandleFunc("/api/purge", handlePurge) + http.HandleFunc("/api/empty-trash", handleEmptyTrash) + http.HandleFunc("/api/clear-cache", handleClearCache) + http.HandleFunc("/api/clean-docker", handleCleanDocker) + http.HandleFunc("/api/system-info", handleSystemInfo) + http.HandleFunc("/api/estimates", handleCleaningEstimates) + + // App Uninstaller + http.HandleFunc("/api/apps", handleScanApps) + http.HandleFunc("/api/apps/details", handleAppDetails) + http.HandleFunc("/api/apps/action", handleAppAction) + http.HandleFunc("/api/apps/uninstall", handleAppUninstall) + + // Static File Serving (SPA Support) + // Check if we are running with embedded files or local dist + // Priority: Embedded (Production) -> Local dist (Dev/Preview) + + // Try to get a sub-fs for "dist" from the embedded FS + distRoot, err := fs.Sub(distFS, "dist") + if err == nil { + fmt.Println("📂 Serving embedded static files") + // Check if it's actually populated (sanity check) + if _, err := distRoot.Open("index.html"); err == nil { + fsrv := http.FileServer(http.FS(distRoot)) + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if filepath.Ext(r.URL.Path) == "" { + // Read index.html from embedded + index, _ := distRoot.Open("index.html") + stat, _ := index.Stat() + http.ServeContent(w, r, "index.html", stat.ModTime(), index.(io.ReadSeeker)) + return + } + fsrv.ServeHTTP(w, r) + }) + } else { + // Fallback to local ./dist if embedded is empty (e.g. dev mode without build) + if _, err := os.Stat("dist"); err == nil { + fmt.Println("📂 Serving static files from local ./dist") + fs := http.FileServer(http.Dir("dist")) + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if filepath.Ext(r.URL.Path) == "" { + http.ServeFile(w, r, "dist/index.html") + return + } + fs.ServeHTTP(w, r) + }) + } + } + } + + fmt.Printf("🚀 Antigravity Backend running on http://localhost%s\n", Port) + + // Open Browser if not in development mode + if os.Getenv("APP_ENV") != "development" { + go platform.OpenBrowser("http://localhost" + Port) + } + + if err := http.ListenAndServe(Port, nil); err != nil { + fmt.Printf("Server failed: %s\n", err) + } +} + +type ScanRequest struct { + Category string `json:"category"` // "apps", "photos", "icloud", "docs", "system" +} + +func handleScanCategory(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + var req ScanRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid body", http.StatusBadRequest) + return + } + + targets := scanner.GetScanTargets(req.Category) + if len(targets) == 0 { + json.NewEncoder(w).Encode([]scanner.ScanResult{}) + return + } + + var allResults []scanner.ScanResult + for _, t := range targets { + if t == "" { + continue + } + res, _ := scanner.FindLargeFiles(t, 10*1024*1024) // 10MB + allResults = append(allResults, res...) + } + + // Sort + sort.Slice(allResults, func(i, j int) bool { + return allResults[i].Size > allResults[j].Size + }) + if len(allResults) > 50 { + allResults = allResults[:50] + } + + json.NewEncoder(w).Encode(allResults) +} + +func handleOpenSettings(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + if err := platform.OpenSettings(); err != nil { + fmt.Printf("Failed to open settings: %v\n", err) + } + w.WriteHeader(http.StatusOK) +} + +func handleDiskUsage(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + usage, err := scanner.GetDiskUsage() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(usage) +} + +func handleScanUser(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + files, err := scanner.ScanUserDocuments() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + json.NewEncoder(w).Encode(files) +} + +func handleScanSizes(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + sizes, err := scanner.GetCategorySizes() + if err != nil { + // Log but return empty + fmt.Println("Size scan error:", err) + json.NewEncoder(w).Encode(map[string]int64{}) + return + } + json.NewEncoder(w).Encode(sizes) +} + +func handleScanSystem(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + files, err := scanner.ScanSystemData() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + json.NewEncoder(w).Encode(files) +} + +func handleDeepestScan(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + // Default to Documents for now, or parse body for path + home, _ := os.UserHomeDir() + target := filepath.Join(home, "Documents") + + folders, err := scanner.FindHeavyFolders(target) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + json.NewEncoder(w).Encode(folders) +} + +type PurgeRequest struct { + Path string `json:"path"` +} + +func handlePurge(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + var req PurgeRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + if err := cleaner.PurgePath(req.Path); err != nil { + http.Error(w, fmt.Sprintf("Failed to purge: %s", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]bool{"success": true}) +} + +func handleEmptyTrash(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + if err := platform.EmptyTrash(); err != nil { + http.Error(w, fmt.Sprintf("Cannot empty trash: %v", err), http.StatusInternalServerError) + return + } + + json.NewEncoder(w).Encode(map[string]bool{"success": true}) +} + +func handleClearCache(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + cachePath, err := platform.GetCachePath() + if err != nil { + http.Error(w, "Cannot get cache path", http.StatusInternalServerError) + return + } + + // Get size before clearing + sizeBefore := scanner.GetDirectorySize(cachePath) + + // Clear cache directories (keep the Caches folder itself if possible, or jus remove content) + entries, err := os.ReadDir(cachePath) + if err != nil { + http.Error(w, "Cannot read cache directory", http.StatusInternalServerError) + return + } + + for _, entry := range entries { + itemPath := filepath.Join(cachePath, entry.Name()) + os.RemoveAll(itemPath) + } + + json.NewEncoder(w).Encode(map[string]int64{"cleared": sizeBefore}) +} + +func handleCleanDocker(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + dockerPath, err := platform.GetDockerPath() + if err != nil { + json.NewEncoder(w).Encode(map[string]interface{}{ + "cleared": 0, + "message": "Docker not found", + }) + return + } + + // Run docker system prune -af --volumes to clean images, containers, and volumes + cmd := exec.Command(dockerPath, "system", "prune", "-af", "--volumes") + output, err := cmd.CombinedOutput() + + if err != nil { + json.NewEncoder(w).Encode(map[string]interface{}{ + "cleared": 0, + "message": fmt.Sprintf("Docker cleanup failed: %s", err), + }) + return + } + + json.NewEncoder(w).Encode(map[string]interface{}{ + "cleared": 1, + "message": string(output), + }) +} + +func handleSystemInfo(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + info, err := platform.GetSystemInfo() + if err != nil { + http.Error(w, "Failed to get system info", http.StatusInternalServerError) + return + } + + json.NewEncoder(w).Encode(info) +} + +func handleCleaningEstimates(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + estimates, err := scanner.GetCleaningEstimates() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + json.NewEncoder(w).Encode(estimates) +} + +// App Uninstaller Handlers + +func handleScanApps(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + appsList, err := apps.ScanApps() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + json.NewEncoder(w).Encode(appsList) +} + +func handleAppDetails(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + type AppDetailsRequest struct { + Path string `json:"path"` + BundleID string `json:"bundleID"` + } + var req AppDetailsRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + details, err := apps.GetAppDetails(req.Path, req.BundleID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + json.NewEncoder(w).Encode(details) +} + +func handleAppAction(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + var req struct { + Files []string `json:"files"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + if err := apps.DeleteFiles(req.Files); err != nil { + http.Error(w, fmt.Sprintf("Failed to delete files: %s", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]bool{"success": true}) +} + +func handleAppUninstall(w http.ResponseWriter, r *http.Request) { + enableCors(&w) + if r.Method == "OPTIONS" { + return + } + + var req struct { + Cmd string `json:"cmd"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + if err := apps.RunUninstaller(req.Cmd); err != nil { + http.Error(w, fmt.Sprintf("Failed to launch uninstaller: %s", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]bool{"success": true}) +} diff --git a/backend/verify_output.txt b/backend/verify_output.txt new file mode 100644 index 0000000..44873bd Binary files /dev/null and b/backend/verify_output.txt differ diff --git a/backend/verify_windows.ps1 b/backend/verify_windows.ps1 new file mode 100644 index 0000000..86dc619 --- /dev/null +++ b/backend/verify_windows.ps1 @@ -0,0 +1,25 @@ +$p = Start-Process -FilePath ".\kv-cleanup.exe" -PassThru -NoNewWindow +Start-Sleep -Seconds 3 + +try { + Write-Host "`n=== Disk Usage ===" + $disk = Invoke-RestMethod -Uri "http://localhost:36969/api/disk-usage" + Write-Host "Total: $($disk.totalGB) GB, Free: $($disk.freeGB) GB" + + Write-Host "`n=== System Info ===" + $sys = Invoke-RestMethod -Uri "http://localhost:36969/api/system-info" + Write-Host "OS: $($sys.os)" + Write-Host "Memory: $($sys.memory)" + + Write-Host "`n=== Apps (First 3) ===" + $apps = Invoke-RestMethod -Uri "http://localhost:36969/api/apps" + $apps | Select-Object -First 3 | Format-Table Name, Path + + Write-Host "`n=== Scan Downloads ===" + $scan = Invoke-RestMethod -Uri "http://localhost:36969/api/scan/category" -Method Post -Body '{"category": "downloads"}' -ContentType "application/json" + $scan | Select-Object -First 3 | Format-Table Path, Size +} catch { + Write-Host "Error: $_" +} finally { + Stop-Process -Id $p.Id -Force +} diff --git a/build-release.ps1 b/build-release.ps1 new file mode 100644 index 0000000..b9ff84a --- /dev/null +++ b/build-release.ps1 @@ -0,0 +1,62 @@ +# build-release.ps1 +# Builds a portable SINGLE-FILE release for Windows and Mac + +Write-Host "Starting Portable Release Build..." -ForegroundColor Cyan + +# 1. Clean previous build +if (Test-Path "Release") { Remove-Item "Release" -Recurse -Force } +if (Test-Path "backend\dist") { Remove-Item "backend\dist" -Recurse -Force } +New-Item -ItemType Directory -Force -Path "Release" | Out-Null +New-Item -ItemType Directory -Force -Path "Release\Windows" | Out-Null +New-Item -ItemType Directory -Force -Path "Release\Mac" | Out-Null + +# 2. Build Frontend +Write-Host "Building Frontend (Vite)..." -ForegroundColor Yellow +$pkgManager = "pnpm" +if (-not (Get-Command "pnpm" -ErrorAction SilentlyContinue)) { $pkgManager = "npm" } + +Invoke-Expression "$pkgManager install" +Invoke-Expression "$pkgManager run build" + +if (-not (Test-Path "dist")) { + Write-Host "Frontend build failed: 'dist' folder not found." -ForegroundColor Red + exit 1 +} + +# 3. Move dist to backend/dist (for embedding) +Write-Host "Moving frontend to backend for embedding..." -ForegroundColor Cyan +Copy-Item -Path "dist" -Destination "backend\dist" -Recurse + +# 4. Build Backend +Write-Host "Building Backend..." -ForegroundColor Yellow + +# Windows Build +Write-Host " Windows (amd64)..." -ForegroundColor Cyan +$env:GOOS = "windows"; $env:GOARCH = "amd64" +go build -ldflags "-s -w -H=windowsgui" -o "Release\Windows\Antigravity.exe" backend/main.go + +# Mac Build (Cross-compile) +Write-Host " macOS (amd64 & arm64)..." -ForegroundColor Cyan +$env:GOOS = "darwin"; $env:GOARCH = "amd64" +go build -ldflags "-s -w" -o "Release\Mac\Antigravity-Intel" backend/main.go + +$env:GOARCH = "arm64" +go build -ldflags "-s -w" -o "Release\Mac\Antigravity-AppleSilicon" backend/main.go + +# Cleanup backend/dist +Remove-Item "backend\dist" -Recurse -Force + +# 5. Success Message & Zipping +Write-Host "Build Complete!" -ForegroundColor Green + +# Zip Windows +if (Test-Path "Release\Antigravity-Windows.zip") { Remove-Item "Release\Antigravity-Windows.zip" } +Compress-Archive -Path "Release\Windows\*" -DestinationPath "Release\Antigravity-Windows.zip" -Force +Write-Host "Created Windows Zip: Release\Antigravity-Windows.zip" -ForegroundColor Green + +# Zip Mac +if (Test-Path "Release\Antigravity-Mac.zip") { Remove-Item "Release\Antigravity-Mac.zip" } +Compress-Archive -Path "Release\Mac\*" -DestinationPath "Release\Antigravity-Mac.zip" -Force +Write-Host "Created Mac Zip: Release\Antigravity-Mac.zip" -ForegroundColor Green + +Write-Host "Artifacts are in the 'Release' folder." -ForegroundColor White diff --git a/build/icon.png b/build/icon.png new file mode 100644 index 0000000..98e8c92 Binary files /dev/null and b/build/icon.png differ diff --git a/electron/features/cleaner.ts b/electron/features/cleaner.ts new file mode 100644 index 0000000..cddc8d4 --- /dev/null +++ b/electron/features/cleaner.ts @@ -0,0 +1,141 @@ +import fs from 'fs/promises'; +import path from 'path'; +import os from 'os'; + +// Trash Compactor: System Sanitation + +export async function clearCaches() { + const cacheDir = path.join(os.homedir(), 'Library/Caches'); + // Be careful! Cleaning everything here might break apps. + // We should target specific known large caches or stale ones. + // For PRD: "Empty User Caches" + + try { + const entries = await fs.readdir(cacheDir); + let freedSpace = 0; + + for (const entry of entries) { + const fullPath = path.join(cacheDir, entry); + // We might want to just delete the contents, not the folder itself, or assume app recreates it. + // Safer: delete recursively. + await fs.rm(fullPath, { recursive: true, force: true }); + } + return true; + } catch (error) { + console.error('Error clearing caches', error); + return false; + } +} + +export async function purgePath(targetPath: string) { + try { + await fs.rm(targetPath, { recursive: true, force: true }); + return true; + } catch (e) { + console.error(`Failed to purge ${targetPath}`, e); + return false; + } +} + +export async function emptyTrash() { + // Uses apple script to force empty trash to avoid "in use" errors if possible, + // or `rm -rf ~/.Trash/*` (dangerous!). + // Safe way: osascript + + try { + const { exec } = await import('child_process'); + exec(`osascript -e 'tell application "Finder" to empty trash'`); + return true; + } catch (e) { + return false; + } +} + +export async function cleanupDocker() { + try { + const { exec } = await import('child_process'); + const util = await import('util'); + const execAsync = util.promisify(exec); + + // Prune everything: stopped containers, all images (dangling + unused), volumes, networks + await execAsync('docker system prune -a --volumes -f'); + return true; + } catch (e) { + console.error('Failed to cleanup docker:', e); + return false; + } +} + +export async function cleanupTmp() { + const tmpDir = os.tmpdir(); + let success = true; + + try { + const entries = await fs.readdir(tmpDir); + for (const entry of entries) { + try { + // Be careful not to delete hidden system files if possible, but user asked for "complete" + await fs.rm(path.join(tmpDir, entry), { recursive: true, force: true }); + } catch (e) { + // Ignore individual file errors (locked files) + console.warn(`Skipped ${entry}`); + } + } + } catch (e) { + console.error('Failed to access tmp dir:', e); + success = false; + } + + return success; +} + +export async function cleanupXcode() { + try { + const home = os.homedir(); + // Remove DerivedData and iOS DeviceSupport (Aggressive!) + const paths = [ + path.join(home, 'Library/Developer/Xcode/DerivedData'), + path.join(home, 'Library/Developer/Xcode/iOS DeviceSupport'), + path.join(home, 'Library/Developer/Xcode/Archives'), + path.join(home, 'Library/Caches/com.apple.dt.Xcode') + ]; + + for (const p of paths) { + try { + await fs.rm(p, { recursive: true, force: true }); + } catch (e) { + console.warn(`Failed to clean ${p}`, e); + } + } + return true; + } catch (e) { + console.error('Failed to cleanup Xcode:', e); + return false; + } +} + +export async function cleanupTurnkey() { + try { + const home = os.homedir(); + // Clean package manager caches + const paths = [ + path.join(home, '.npm/_cacache'), + path.join(home, '.yarn/cache'), + path.join(home, 'Library/pnpm/store'), // Mac default for pnpm store if not configured otherwise + path.join(home, '.cache/yarn'), + path.join(home, '.gradle/caches') + ]; + + for (const p of paths) { + try { + await fs.rm(p, { recursive: true, force: true }); + } catch (e) { + console.warn(`Failed to clean ${p}`, e); + } + } + return true; + } catch (e) { + console.error('Failed to cleanup package managers:', e); + return false; + } +} diff --git a/electron/features/enforcer.ts b/electron/features/enforcer.ts new file mode 100644 index 0000000..c6597fd --- /dev/null +++ b/electron/features/enforcer.ts @@ -0,0 +1,45 @@ +import fs from 'fs'; +import path from 'path'; +import { dialog, app } from 'electron'; + +// The pnpm Force +// Watches for package-lock.json or yarn.lock and warns the user. + +export function startEnforcement(projectPath: string) { + // We can use fs.watch, but strictly we might want to watch the specific directory + // where the user is working. + // For this app, maybe we watch the projects user adds? + // Or do we implement a global watcher? The PRD implies "actively monitor". + // Monitoring the entire filesystem is expensive. + // We'll assume we monitor specific "Active Projects" added to the app. + + // Implementation note: This function would be called for each watched project. + + const watcher = fs.watch(projectPath, (eventType, filename) => { + if (filename === 'package-lock.json' || filename === 'yarn.lock') { + const lockFile = path.join(projectPath, filename); + if (fs.existsSync(lockFile)) { + handleViolation(lockFile); + } + } + }); + + return watcher; +} + +function handleViolation(lockFile: string) { + dialog.showMessageBox({ + type: 'warning', + title: 'The pnpm Force', + message: `Illegal lockfile detected: ${path.basename(lockFile)}`, + detail: 'You must use pnpm! Do you want to convert now?', + buttons: ['Convert to pnpm', 'Delete Lockfile', 'Ignore'], + defaultId: 0 + }).then(result => { + if (result.response === 0) { + // Run conversion + } else if (result.response === 1) { + fs.unlinkSync(lockFile); + } + }); +} diff --git a/electron/features/scanner.ts b/electron/features/scanner.ts new file mode 100644 index 0000000..91689bb --- /dev/null +++ b/electron/features/scanner.ts @@ -0,0 +1,191 @@ +import fs from 'fs/promises'; +import path from 'path'; +import os from 'os'; + +interface ScanResult { + path: string; + size: number; + lastAccessed: Date; + type: 'node_modules' | 'vendor' | 'venv'; +} + +export async function scanDirectory(rootDir: string, maxDepth: number = 5): Promise { + const results: ScanResult[] = []; + + async function traverse(currentPath: string, depth: number) { + if (depth > maxDepth) return; + + try { + const entries = await fs.readdir(currentPath, { withFileTypes: true }); + + for (const entry of entries) { + const fullPath = path.join(currentPath, entry.name); + + if (entry.isDirectory()) { + if (entry.name === 'node_modules' || entry.name === 'vendor' || entry.name === '.venv') { + // Found a target + try { + const stats = await fs.stat(fullPath); + results.push({ + path: fullPath, + size: 0, // Calculating size is expensive, might do lazily or separate task + lastAccessed: stats.atime, + type: entry.name as any + }); + // Don't traverse inside node_modules + continue; + } catch (e) { + console.error(`Error stat-ing ${fullPath}`, e); + } + } else if (!entry.name.startsWith('.')) { + // Recurse normal directories + await traverse(fullPath, depth + 1); + } + } + } + } catch (error) { + console.error(`Error scanning ${currentPath}`, error); + } + } + + await traverse(rootDir, 0); + return results; +} + +export async function getFolderSize(folderPath: string): Promise { + let total = 0; + try { + const stats = await fs.stat(folderPath); + if (stats.isFile()) return stats.size; + + const files = await fs.readdir(folderPath, { withFileTypes: true }); + for (const file of files) { + total += await getFolderSize(path.join(folderPath, file.name)); + } + } catch (e) { + // ignore errors + } + return total; +} + +export interface DeepScanResult { + path: string; + size: number; + isDirectory: boolean; +} + +export async function findLargeFiles(rootDir: string, threshold: number = 100 * 1024 * 1024): Promise { + const results: DeepScanResult[] = []; + + async function traverse(currentPath: string) { + try { + const stats = await fs.stat(currentPath); + if (stats.size > threshold && !stats.isDirectory()) { + results.push({ path: currentPath, size: stats.size, isDirectory: false }); + return; + } + + if (stats.isDirectory()) { + // SKIP node_modules to prevent self-deletion of the running app! + if (path.basename(currentPath) === 'node_modules') return; + + const entries = await fs.readdir(currentPath, { withFileTypes: true }); + for (const entry of entries) { + if (entry.name.startsWith('.') && entry.name !== '.Trash') continue; + await traverse(path.join(currentPath, entry.name)); + } + } + } catch (e) { /* skip */ } + } + + await traverse(rootDir); + return results.sort((a, b) => b.size - a.size); +} + +export async function getDeepDiveSummary() { + const home = os.homedir(); + const targets = [ + path.join(home, 'Downloads'), + path.join(home, 'Documents'), + path.join(home, 'Desktop'), + path.join(home, 'Library/Application Support'), + ]; + + const results: DeepScanResult[] = []; + for (const t of targets) { + console.log(`Scanning ${t}...`); + const large = await findLargeFiles(t, 50 * 1024 * 1024); // 50MB+ + console.log(`Found ${large.length} large files in ${t}`); + results.push(...large); + } + return results.slice(0, 20); // Top 20 +} + +import { exec } from 'child_process'; +import util from 'util'; +const execPromise = util.promisify(exec); + +export async function getDiskUsage() { + try { + // macOS/Linux: df -k / gives 1K-blocks + const { stdout } = await execPromise('df -k /'); + const lines = stdout.trim().split('\n'); + // Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted on + // /dev/disk3s1s1 488245288 15266888 308805360 5% 350280 1957260560 0% / + if (lines.length < 2) return null; + + const parts = lines[1].split(/\s+/); + // parts[1] is total in 1K blocks + // parts[2] is used + // parts[3] is available + const total = parseInt(parts[1]) * 1024; + const used = parseInt(parts[2]) * 1024; + const available = parseInt(parts[3]) * 1024; + + return { + totalGB: (total / 1024 / 1024 / 1024).toFixed(2), + usedGB: (used / 1024 / 1024 / 1024).toFixed(2), + freeGB: (available / 1024 / 1024 / 1024).toFixed(2) + }; + } catch (e) { + console.error("Error getting disk usage:", e); + return null; + } +} + +export async function findHeavyFolders(rootDir: string): Promise { + try { + console.log(`Deepest scan on: ${rootDir}`); + // du -k -d 2: report size in KB, max depth 2 + // sort -nr: numeric reverse sort + // head -n 50: top 50 + const { stdout } = await execPromise(`du -k -d 2 "${rootDir}" | sort -nr | head -n 50`); + const lines = stdout.trim().split('\n'); + + const results = lines.map(line => { + // Trim leading whitespace + const trimmed = line.trim(); + // Split by first whitespace only + const firstSpace = trimmed.indexOf('\t'); // du output is usually tab separated or space + // Actually du output on mac is "sizepath" + + // Robust splitting for size and path + const match = trimmed.match(/^(\d+)\s+(.+)$/); + if (!match) return null; + + const sizeK = parseInt(match[1]); + const fullPath = match[2]; + + return { + path: fullPath, + size: sizeK * 1024, // Convert KB to Bytes + isDirectory: true + }; + }).filter(item => item !== null && item.path !== rootDir) as DeepScanResult[]; + + return results; + } catch (e) { + console.error("Deepest scan failed:", e); + return []; + } +} diff --git a/electron/features/updater.ts b/electron/features/updater.ts new file mode 100644 index 0000000..5b54090 --- /dev/null +++ b/electron/features/updater.ts @@ -0,0 +1,43 @@ +import { exec } from 'child_process'; +import util from 'util'; + +const execAsync = util.promisify(exec); + +// Stasis Field: macOS Update Control +// Requires sudo for most operations. + +export async function disableAutoUpdates(password?: string) { + // Command to disable auto-update + // defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticCheckEnabled -bool false + // softwareupdate --schedule off + + const cmds = [ + 'sudo -S softwareupdate --schedule off', + 'sudo -S defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticCheckEnabled -bool false', + 'sudo -S defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticDownload -bool false', + 'sudo -S defaults write /Library/Preferences/com.apple.commerce AutoUpdate -bool false' + ]; + + // Note: Handling password securely is tricky via IPC. + // Usually we prompt via a sudo-capable executor (like osascript with administrator privileges). + + try { + await execWithSudo('softwareupdate --schedule off'); + // More commands... + return true; + } catch (error) { + console.error('Failed to disable updates', error); + return false; + } +} + +async function execWithSudo(command: string) { + const script = `do shell script "${command}" with administrator privileges`; + return execAsync(`osascript -e '${script}'`); +} + +export async function ignoreUpdate(label: string) { + // softwareupdate --ignore