cmd/upspin-audit: remove from exp repo

Moving it to main repository.

Update upspin/upspin#560

Change-Id: Ie778320700e64559c564af5c0c324ffbb45a3598
Reviewed-on: https://upspin-review.googlesource.com/17660
Reviewed-by: Andrew Gerrand <adg@golang.org>
diff --git a/cmd/upspin-audit/bytesize.go b/cmd/upspin-audit/bytesize.go
deleted file mode 100644
index 5a892ca..0000000
--- a/cmd/upspin-audit/bytesize.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 The Upspin Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import "fmt"
-
-// ByteSize provides a way to make numbers format in nice compact form.
-// Convert a number to ByteSize and print it using its String method to see
-// 2392685154 print as 2.23GB.
-type ByteSize float64
-
-const (
-	_           = iota // ignore first value by assigning to blank identifier
-	KB ByteSize = 1 << (10 * iota)
-	MB
-	GB
-	TB
-	PB
-	EB
-	ZB
-	YB
-)
-
-func (b ByteSize) String() string {
-	switch {
-	case b >= YB:
-		return fmt.Sprintf("%.2fYB", b/YB)
-	case b >= ZB:
-		return fmt.Sprintf("%.2fZB", b/ZB)
-	case b >= EB:
-		return fmt.Sprintf("%.2fEB", b/EB)
-	case b >= PB:
-		return fmt.Sprintf("%.2fPB", b/PB)
-	case b >= TB:
-		return fmt.Sprintf("%.2fTB", b/TB)
-	case b >= GB:
-		return fmt.Sprintf("%.2fGB", b/GB)
-	case b >= MB:
-		return fmt.Sprintf("%.2fMB", b/MB)
-	case b >= KB:
-		return fmt.Sprintf("%.2fKB", b/KB)
-	}
-	return fmt.Sprintf("%.2fB", b)
-}
diff --git a/cmd/upspin-audit/deletegarbage.go b/cmd/upspin-audit/deletegarbage.go
deleted file mode 100644
index 913bcbd..0000000
--- a/cmd/upspin-audit/deletegarbage.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2017 The Upspin Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"flag"
-	"os"
-	"strings"
-
-	"upspin.io/bind"
-	"upspin.io/errors"
-	"upspin.io/upspin"
-)
-
-func (s *State) deleteGarbage(args []string) {
-	const help = `
-Audit delete-garbage deletes garbage blocks as listed by the most recent
-run of find-garbage. It operates on the store endpoint of the current user.
-
-It must be run as the same Upspin user as the store server itself,
-as only that user has permission to delete blocks.
-
-Misuse of this command may result in permanent data loss. Use with caution.
-`
-	fs := flag.NewFlagSet("delete-garbage", flag.ExitOnError)
-	dataDir := dataDirFlag(fs)
-	s.ParseFlags(fs, args, help, "audit delete-garbage")
-
-	if fs.NArg() != 0 {
-		fs.Usage()
-		os.Exit(2)
-	}
-
-	for _, fi := range s.latestFilesWithPrefix(*dataDir, garbageFilePrefix) {
-		if fi.Addr != s.Config.StoreEndpoint().NetAddr {
-			// Only delete from the store endpoint of the current user.
-			continue
-		}
-		garbage, err := s.readItems(fi.Path)
-		if err != nil {
-			s.Exit(err)
-		}
-		store, err := bind.StoreServer(s.Config, s.Config.StoreEndpoint())
-		if err != nil {
-			s.Exit(err)
-		}
-		const numWorkers = 10
-		d := deleter{
-			State: s,
-			store: store,
-			refs:  make(chan upspin.Reference),
-			stop:  make(chan bool, numWorkers),
-		}
-		for i := 0; i < numWorkers; i++ {
-			go d.worker()
-		}
-	loop:
-		for ref := range garbage {
-			if strings.HasPrefix(string(ref), rootRefPrefix) {
-				// Don't ever collect root backups.
-				continue
-			}
-			select {
-			case d.refs <- ref:
-			case <-d.stop:
-				break loop
-			}
-		}
-		close(d.refs)
-	}
-}
-
-// deleter holds the state of delete-garbage workers.
-type deleter struct {
-	State *State
-	store upspin.StoreServer
-	refs  chan upspin.Reference
-	stop  chan bool
-}
-
-// worker receives refs from refs and deletes them from store. If the store
-// return a permission error then worker sends a value to stop.
-func (d *deleter) worker() {
-	for ref := range d.refs {
-		err := d.store.Delete(ref)
-		if err != nil {
-			d.State.Fail(err)
-			// Stop the entire process if we get a permission error;
-			// we likely are running as the wrong user.
-			if errors.Is(errors.Permission, err) {
-				d.stop <- true
-				return
-			}
-		}
-	}
-}
diff --git a/cmd/upspin-audit/findgarbage.go b/cmd/upspin-audit/findgarbage.go
deleted file mode 100644
index 8ebf251..0000000
--- a/cmd/upspin-audit/findgarbage.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2017 The Upspin Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"flag"
-	"fmt"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"upspin.io/upspin"
-)
-
-func (s *State) findGarbage(args []string) {
-	const help = `
-Audit find-garbage analyses the output of scan-dir and scan-store to finds
-blocks that are present in the store server but not referred to by the scanned
-directory trees.
-`
-	fs := flag.NewFlagSet("find-garbage", flag.ExitOnError)
-	dataDir := dataDirFlag(fs)
-	s.ParseFlags(fs, args, help, "audit find-garbage")
-
-	if fs.NArg() != 0 {
-		fs.Usage()
-		os.Exit(2)
-	}
-
-	if err := os.MkdirAll(*dataDir, 0700); err != nil {
-		s.Exit(err)
-	}
-
-	// Iterate through the files in dataDir and collect a set of the latest
-	// files for each dir endpoint/tree and store endpoint.
-	latest := s.latestFilesWithPrefix(*dataDir, storeFilePrefix, dirFilePrefix)
-
-	// Print a summary of the files we found.
-	nDirs, nStores := 0, 0
-	fmt.Println("Found data for these store endpoints: (scan-store output)")
-	for _, fi := range latest {
-		if fi.User == "" {
-			fmt.Printf("\t%s\t%s\n", fi.Time.Format(timeFormat), fi.Addr)
-			nStores++
-		}
-	}
-	if nStores == 0 {
-		fmt.Println("\t(none)")
-	}
-	fmt.Println("Found data for these user trees and store endpoints: (scan-dir output)")
-	for _, fi := range latest {
-		if fi.User != "" {
-			fmt.Printf("\t%s\t%s\t%s\n", fi.Time.Format(timeFormat), fi.Addr, fi.User)
-			nDirs++
-		}
-	}
-	if nDirs == 0 {
-		fmt.Println("\t(none)")
-	}
-	fmt.Println()
-
-	if nDirs == 0 || nStores == 0 {
-		s.Exitf("nothing to do; run scan-store and scan-dir first")
-	}
-
-	// Look for garbage references and summarize them.
-	for _, store := range latest {
-		if store.User != "" {
-			continue // Ignore dirs.
-		}
-		storeItems, err := s.readItems(store.Path)
-		if err != nil {
-			s.Exit(err)
-		}
-		dirsMissing := make(map[upspin.Reference]int64)
-		for ref, size := range storeItems {
-			dirsMissing[ref] = size
-		}
-		var users []string
-		for _, dir := range latest {
-			if dir.User == "" {
-				continue // Ignore stores.
-			}
-			if store.Addr != dir.Addr {
-				continue
-			}
-			if dir.Time.Before(store.Time) {
-				s.Exitf("scan-store must be performed before all scan-dir operations\n"+
-					"scan-dir output in\n\t%s\npredates scan-store output in\n\t%s",
-					filepath.Base(dir.Path), filepath.Base(store.Path))
-			}
-			users = append(users, string(dir.User))
-			dirItems, err := s.readItems(dir.Path)
-			if err != nil {
-				s.Exit(err)
-			}
-			storeMissing := make(map[upspin.Reference]int64)
-			for ref, size := range dirItems {
-				if _, ok := storeItems[ref]; !ok {
-					storeMissing[ref] = size
-				}
-				delete(dirsMissing, ref)
-			}
-			if len(storeMissing) > 0 {
-				fmt.Printf("Store %q missing %d references present in %q.\n", store.Addr, len(storeMissing), dir.User)
-			}
-		}
-		if len(dirsMissing) > 0 {
-			fmt.Printf("Store %q contains %d references not present in these trees:\n\t%s\n", store.Addr, len(dirsMissing), strings.Join(users, "\n\t"))
-			file := filepath.Join(*dataDir, fmt.Sprintf("%s%s_%d", garbageFilePrefix, store.Addr, store.Time.Unix()))
-			s.writeItems(file, itemMapToSlice(dirsMissing))
-		}
-	}
-}
diff --git a/cmd/upspin-audit/main.go b/cmd/upspin-audit/main.go
deleted file mode 100644
index 3b0f5fb..0000000
--- a/cmd/upspin-audit/main.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2017 The Upspin Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Upspin-audit provides subcommands for auditing storage consumption.
-// It has several subcommands that should be used in a way yet to be
-// determined.
-package main
-
-// TODO:
-// - add failsafes to avoid misuse of delete-garbage
-// - add a command that is the reverse of find-garbage (find-missing?)
-// - add a tidy command to remove data from old scans
-
-import (
-	"bufio"
-	"flag"
-	"fmt"
-	"log"
-	"os"
-	"path/filepath"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-
-	"upspin.io/config"
-	"upspin.io/errors"
-	"upspin.io/flags"
-	"upspin.io/subcmd"
-	"upspin.io/transports"
-	"upspin.io/upspin"
-	"upspin.io/version"
-)
-
-const (
-	timeFormat    = "2006-01-02 15:04:05"
-	rootRefPrefix = "tree.root."
-
-	dirFilePrefix     = "dir_"
-	storeFilePrefix   = "store_"
-	garbageFilePrefix = "garbage_"
-)
-
-type State struct {
-	*subcmd.State
-}
-
-const help = `Upspin-audit provides subcommands for auditing storage consumption.
-
-The subcommands are:
-
-scan-dir
-scan-store
-	Scan the directory and store servers, creating a list of blocks
-	each uses, and report the total storage held by those blocks.
-
-find-garbage
-	Use the results of scan-dir and scan-store operations to create a list
-	of blocks that are present in a store server but not referenced
-	by the scanned directory servers.
-
-delete-garbage
-	Delete the blocks found by find-garbage from the store server.
-
-To delete the garbage references in a given store server:
-1. Run scan-store (as the store server user) to generate a list of references
-   to blocks in the store server.
-2. Run scan-dir for each Upspin tree that stores data in the store server (as
-   the Upspin users that own those trees) to generate lists of block
-   references mentioned by those trees.
-3. Run find-garbage to compile a list of references that are in the scan-store
-   output but not in the combined output of the scan-dir runs.
-4. Run delete-garbage (as the store server user) to delete the blocks in the
-   find-garbage output.
-`
-
-func main() {
-	const name = "audit"
-
-	log.SetFlags(0)
-	log.SetPrefix("upspin-audit: ")
-	flag.Usage = usage
-	flags.ParseArgsInto(flag.CommandLine, os.Args[1:], flags.Client, "version")
-
-	if flags.Version {
-		fmt.Fprint(os.Stdout, version.Version())
-		os.Exit(2)
-	}
-
-	if flag.NArg() < 1 {
-		usage()
-	}
-	s := &State{
-		State: subcmd.NewState(name),
-	}
-
-	cfg, err := config.FromFile(flags.Config)
-	if err != nil {
-		s.Exit(err)
-	}
-	transports.Init(cfg)
-	s.State.Init(cfg)
-
-	switch flag.Arg(0) {
-	case "scan-dir":
-		s.scanDirectories(flag.Args()[1:])
-	case "scan-store":
-		s.scanStore(flag.Args()[1:])
-	case "find-garbage":
-		s.findGarbage(flag.Args()[1:])
-	case "delete-garbage":
-		s.deleteGarbage(flag.Args()[1:])
-	default:
-		usage()
-	}
-
-	s.ExitNow()
-}
-
-func usage() {
-	fmt.Fprintln(os.Stderr, help)
-	fmt.Fprintln(os.Stderr, "Usage of upspin audit:")
-	fmt.Fprintln(os.Stderr, "\tupspin [globalflags] audit <command> [flags] ...")
-	fmt.Fprintln(os.Stderr, "Commands: scan-dir, scan-store, find-garbage, delete-garbage")
-	fmt.Fprintln(os.Stderr, "Global flags:")
-	flag.PrintDefaults()
-	os.Exit(2)
-}
-
-// dataDirFlag returns a string pointer bound to a new flag that specifies the data directory.
-// Done here so the definition can be common among the commands.
-func dataDirFlag(fs *flag.FlagSet) *string {
-	var dataDir string
-	fs.StringVar(&dataDir, "data", filepath.Join(os.Getenv("HOME"), "upspin", "audit"), "`directory` storing scan data")
-	return &dataDir
-}
-
-// writeItems sorts and writes a list of reference/size pairs to file.
-func (s *State) writeItems(file string, items []upspin.ListRefsItem) {
-	sort.Slice(items, func(i, j int) bool { return items[i].Ref < items[j].Ref })
-
-	f, err := os.Create(file)
-	if err != nil {
-		s.Exit(err)
-	}
-	defer func() {
-		if err := f.Close(); err != nil {
-			s.Exit(err)
-		}
-	}()
-	w := bufio.NewWriter(f)
-	for _, ri := range items {
-		if _, err := fmt.Fprintf(w, "%q %d\n", ri.Ref, ri.Size); err != nil {
-			s.Exit(err)
-		}
-	}
-	if err := w.Flush(); err != nil {
-		s.Exit(err)
-	}
-}
-
-// readItems reads a list of reference/size pairs from the given file and
-// returns them as a map. The asymmetry with writeItems, which takes a slice,
-// is to fit the most common usage pattern.
-func (s *State) readItems(file string) (map[upspin.Reference]int64, error) {
-	f, err := os.Open(file)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	sc := bufio.NewScanner(f)
-	items := make(map[upspin.Reference]int64)
-	for sc.Scan() {
-		line := sc.Text()
-		i := strings.LastIndex(line, " ")
-		if i < 0 {
-			return nil, errors.Errorf("malformed line in %q: %q", file, line)
-		}
-		quotedRef, sizeString := line[:i], line[i+1:]
-
-		ref, err := strconv.Unquote(quotedRef)
-		if err != nil {
-			return nil, errors.Errorf("malformed ref in %q: %v", file, err)
-		}
-		size, err := strconv.ParseInt(sizeString, 10, 64)
-		if err != nil {
-			return nil, errors.Errorf("malformed size in %q: %v", file, err)
-		}
-		items[upspin.Reference(ref)] = size
-	}
-	if err := sc.Err(); err != nil {
-		return nil, err
-	}
-	return items, nil
-}
-
-func itemMapToSlice(m map[upspin.Reference]int64) (items []upspin.ListRefsItem) {
-	for ref, size := range m {
-		items = append(items, upspin.ListRefsItem{Ref: ref, Size: size})
-	}
-	return
-}
-
-// fileInfo holds a description of a reference list file written by scan-store
-// or scan-dir. It is derived from the name of the file, not its contents.
-type fileInfo struct {
-	Path string
-	Addr upspin.NetAddr
-	User upspin.UserName // empty for store
-	Time time.Time
-}
-
-// latestFilesWithPrefix returns the most recently generated files in dir that
-// have that have the given prefixes.
-func (s *State) latestFilesWithPrefix(dir string, prefixes ...string) (files []fileInfo) {
-	paths, err := filepath.Glob(filepath.Join(dir, "*"))
-	if err != nil {
-		s.Exit(err)
-	}
-	type latestKey struct {
-		Addr upspin.NetAddr
-		User upspin.UserName // empty for store
-	}
-	latest := make(map[latestKey]fileInfo)
-	for _, file := range paths {
-		fi, err := filenameToFileInfo(file, prefixes...)
-		if err == errIgnoreFile {
-			continue
-		}
-		if err != nil {
-			s.Exit(err)
-		}
-		k := latestKey{
-			Addr: fi.Addr,
-			User: fi.User,
-		}
-		if cur, ok := latest[k]; ok && cur.Time.After(fi.Time) {
-			continue
-		}
-		latest[k] = fi
-	}
-	for _, fi := range latest {
-		files = append(files, fi)
-	}
-	return files
-}
-
-// errIgnoreFile is returned from filenameToFileInfo to signal that the given
-// file name is not one generated by scan-dir or scan-store. It should be handled
-// by callers to filenameToFileInfo and is not to be seen by users.
-var errIgnoreFile = errors.Str("not a file we're interested in")
-
-// filenameToFileInfo takes a file name generated by scan-dir or scan-store and
-// returns the information held by that file name as a fileInfo.
-func filenameToFileInfo(file string, prefixes ...string) (fi fileInfo, err error) {
-	fi.Path = file
-	file = filepath.Base(file)
-	s := file // We will consume this string.
-
-	// Check and trim prefix.
-	ok := false
-	for _, p := range prefixes {
-		if strings.HasPrefix(s, p) {
-			s = strings.TrimPrefix(s, p)
-			ok = true
-			break
-		}
-	}
-	if !ok {
-		err = errIgnoreFile
-		return
-	}
-
-	// Collect and trim endpoint name.
-	i := strings.Index(s, "_")
-	if i < 0 {
-		err = errors.Errorf("malformed file name %q", file)
-		return
-	}
-	fi.Addr = upspin.NetAddr(s[:i])
-	s = s[i+1:]
-
-	// For dir files, collect and trim user name.
-	if strings.HasPrefix(file, dirFilePrefix) {
-		i := strings.LastIndex(s, "_")
-		if i < 0 {
-			err = errors.Errorf("malformed file name %q: missing user name", file)
-			return
-		}
-		fi.User = upspin.UserName(s[:i])
-		s = s[i+1:]
-	}
-
-	// Collect time stamp.
-	ts, err := strconv.ParseInt(s, 10, 64)
-	if err != nil {
-		err = errors.Errorf("malformed file name %q: bad timestamp: %v", file, err)
-		return
-	}
-	fi.Time = time.Unix(ts, 0)
-
-	return
-}
diff --git a/cmd/upspin-audit/scandir.go b/cmd/upspin-audit/scandir.go
deleted file mode 100644
index 776c029..0000000
--- a/cmd/upspin-audit/scandir.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright 2017 The Upspin Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"os"
-	"path/filepath"
-	"sync"
-	"time"
-
-	"upspin.io/path"
-	"upspin.io/upspin"
-)
-
-// This file implements the directory scan. Because the network time of flight is
-// significant to throughput, the scan is parallelized, which makes the code
-// more intricate than we'd like.
-// The code actually walks the directory tree using Glob. We could in principle
-// use Watch(-1), but snapshots are problematic for Watch. We take care to
-// avoid scanning a directory we've already seen, which Watch doesn't do on
-// the server. Our code makes it practical to scan the snapshot tree.
-
-const scanParallelism = 10 // Empirically chosen: speedup significant, not too many resources.
-
-type dirScanner struct {
-	State    *State
-	inFlight sync.WaitGroup        // Count of directories we have seen but not yet processed.
-	buffer   chan *upspin.DirEntry // Where to send directories for processing.
-	dirsToDo chan *upspin.DirEntry // Receive from here to find next directory to process.
-	done     chan *upspin.DirEntry // Send entries here once it is completely done, including children.
-}
-
-type sizeMap map[upspin.Endpoint]map[upspin.Reference]int64
-
-func (m sizeMap) addRef(ep upspin.Endpoint, ref upspin.Reference, size int64) {
-	refs := m[ep]
-	if refs == nil {
-		refs = make(map[upspin.Reference]int64)
-		m[ep] = refs
-	}
-	refs[ref] = size
-}
-
-func (s *State) scanDirectories(args []string) {
-	const help = `
-Audit scan-dir scans the directory trees of the named user roots and produces a
-list of store block references mentioned in those trees.
-
-It should be run as a user that has full read access to the named roots.
-`
-
-	fs := flag.NewFlagSet("scan-dir", flag.ExitOnError)
-	glob := fs.Bool("glob", true, "apply glob processing to the arguments")
-	dataDir := dataDirFlag(fs)
-	s.ParseFlags(fs, args, help, "audit scan-dir root ...")
-
-	if fs.NArg() == 0 || fs.Arg(0) == "help" {
-		fs.Usage()
-		os.Exit(2)
-	}
-
-	if err := os.MkdirAll(*dataDir, 0700); err != nil {
-		s.Exit(err)
-	}
-
-	var paths []upspin.PathName
-	if *glob {
-		paths = s.GlobAllUpspinPath(fs.Args())
-	} else {
-		for _, p := range fs.Args() {
-			paths = append(paths, upspin.PathName(p))
-		}
-	}
-
-	// Check that the arguments are user roots.
-	for _, p := range paths {
-		parsed, err := path.Parse(p)
-		if err != nil {
-			s.Exit(err)
-		}
-		if !parsed.IsRoot() {
-			s.Exitf("%q is not a user root", p)
-		}
-	}
-
-	now := time.Now()
-
-	sc := dirScanner{
-		State:    s,
-		buffer:   make(chan *upspin.DirEntry),
-		dirsToDo: make(chan *upspin.DirEntry),
-		done:     make(chan *upspin.DirEntry),
-	}
-
-	for i := 0; i < scanParallelism; i++ {
-		go sc.dirWorker()
-	}
-	go sc.bufferLoop()
-
-	// Prime the pump.
-	for _, p := range paths {
-		de, err := s.DirServer(p).Lookup(p)
-		if err != nil {
-			s.Exit(err)
-		}
-		sc.do(de)
-	}
-
-	// Shut down the process tree once nothing is in flight.
-	go func() {
-		sc.inFlight.Wait()
-		close(sc.buffer)
-		close(sc.done)
-	}()
-
-	// Receive and collect the data.
-	size := make(sizeMap)
-	users := make(map[upspin.UserName]sizeMap)
-	for de := range sc.done {
-		p, err := path.Parse(de.Name)
-		if err != nil {
-			s.Fail(err)
-			continue
-		}
-		userSize := users[p.User()]
-		if userSize == nil {
-			userSize = make(sizeMap)
-			users[p.User()] = userSize
-		}
-		for _, block := range de.Blocks {
-			ep := block.Location.Endpoint
-			size.addRef(ep, block.Location.Reference, block.Size)
-			userSize.addRef(ep, block.Location.Reference, block.Size)
-		}
-	}
-
-	// Print a summary.
-	total := int64(0)
-	for ep, refs := range size {
-		sum := int64(0)
-		for _, s := range refs {
-			sum += s
-		}
-		total += sum
-		fmt.Printf("%s: %d bytes (%s) (%d references)\n", ep.NetAddr, sum, ByteSize(sum), len(refs))
-	}
-	if len(size) > 1 {
-		fmt.Printf("%d bytes total (%s)\n", total, ByteSize(total))
-	}
-
-	// Write the data to files, one for each user/endpoint combo.
-	for u, size := range users {
-		for ep, refs := range size {
-			file := filepath.Join(*dataDir, fmt.Sprintf("%s%s_%s_%d", dirFilePrefix, ep.NetAddr, u, now.Unix()))
-			s.writeItems(file, itemMapToSlice(refs))
-		}
-	}
-}
-
-// do processes a DirEntry. If it's a file, we deliver it to the done channel.
-// Otherwise it's a directory and we buffer it for expansion.
-func (sc *dirScanner) do(entry *upspin.DirEntry) {
-	if !entry.IsDir() {
-		sc.done <- entry
-	} else {
-		sc.inFlight.Add(1)
-		sc.buffer <- entry
-	}
-}
-
-// bufferLoop gathers work to do and distributes it to the workers. It acts as
-// an itermediary buffering work to avoid deadlock; without this loop, workers
-// would both send to and receive from the dirsToDo channel. Once nothing is
-// pending or in flight, bufferLoop shuts down the processing network.
-func (sc *dirScanner) bufferLoop() {
-	defer close(sc.dirsToDo)
-	entriesPending := make(map[*upspin.DirEntry]bool)
-	seen := make(map[string]bool) // Eirectories we have seen, keyed by references within.
-	buffer := sc.buffer
-	var keyBuf bytes.Buffer // For creating keys for the seen map.
-	for {
-		var entry *upspin.DirEntry
-		var dirsToDo chan *upspin.DirEntry
-		if len(entriesPending) > 0 {
-			// Pick one entry at random from the map.
-			for entry = range entriesPending {
-				break
-			}
-			dirsToDo = sc.dirsToDo
-		} else if buffer == nil {
-			return
-		}
-		select {
-		case dirsToDo <- entry:
-			delete(entriesPending, entry)
-		case entry, active := <-buffer:
-			if !active {
-				buffer = nil
-				break
-			}
-			// If this directory has already been done, don't do it again.
-			// This situation arises when scanning a snapshot tree, as most of
-			// the directories are just dups of those in the main tree.
-			// We identify duplication by comparing the list of references within.
-			// TODO: Find a less expensive check.
-			keyBuf.Reset()
-			for i := range entry.Blocks {
-				b := &entry.Blocks[i]
-				fmt.Fprintf(&keyBuf, "%q %q\n", b.Location.Endpoint, b.Location.Reference)
-			}
-			key := keyBuf.String()
-			if seen[key] {
-				sc.inFlight.Done()
-			} else {
-				seen[key] = true
-				entriesPending[entry] = true
-			}
-		}
-	}
-}
-
-// dirWorker receives DirEntries for directories from the dirsToDo channel
-// and processes them, descending into their components and delivering
-// the results to the buffer channel.
-func (sc *dirScanner) dirWorker() {
-	for dir := range sc.dirsToDo {
-		des, err := sc.State.DirServer(dir.Name).Glob(upspin.AllFilesGlob(dir.Name))
-		if err != nil {
-			sc.State.Fail(err)
-		} else {
-			for _, de := range des {
-				sc.do(de)
-			}
-		}
-		sc.done <- dir
-		sc.inFlight.Done()
-	}
-}
diff --git a/cmd/upspin-audit/scanstore.go b/cmd/upspin-audit/scanstore.go
deleted file mode 100644
index 0b9d0a2..0000000
--- a/cmd/upspin-audit/scanstore.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2017 The Upspin Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"encoding/json"
-	"flag"
-	"fmt"
-	"os"
-	"path/filepath"
-	"time"
-
-	"upspin.io/bind"
-	"upspin.io/upspin"
-)
-
-// This file implements the storage scan.
-
-func (s *State) scanStore(args []string) {
-	const help = `
-Audit scan-store produces a list of references to the blocks held
-by the given store server.
-By default it scans the store endpoint specified by the given config.
-
-It must be run as the same Upspin user as the store server itself,
-as only that user has permission to list references.
-`
-
-	fs := flag.NewFlagSet("scan-store", flag.ExitOnError)
-	endpointFlag := fs.String("endpoint", string(s.Config.StoreEndpoint().NetAddr), "network `address` of storage server; default is from config")
-	dataDir := dataDirFlag(fs)
-	s.ParseFlags(fs, args, help, "audit scan-store [-endpoint <storeserver address>]")
-
-	if fs.NArg() != 0 { // "audit scan-store help" is covered by this.
-		fs.Usage()
-		os.Exit(2)
-	}
-
-	if err := os.MkdirAll(*dataDir, 0700); err != nil {
-		s.Exit(err)
-	}
-
-	endpoint, err := upspin.ParseEndpoint("remote," + *endpointFlag)
-	if err != nil {
-		s.Exit(err)
-	}
-
-	now := time.Now()
-
-	store, err := bind.StoreServer(s.Config, *endpoint)
-	if err != nil {
-		s.Fail(err)
-		return
-	}
-	var (
-		token string
-		sum   int64
-		items []upspin.ListRefsItem
-	)
-	for {
-		b, _, _, err := store.Get(upspin.ListRefsMetadata + upspin.Reference(token))
-		if err != nil {
-			s.Exit(err)
-			return
-		}
-		var refs upspin.ListRefsResponse
-		err = json.Unmarshal(b, &refs)
-		if err != nil {
-			s.Exit(err)
-			return
-		}
-		for _, ri := range refs.Refs {
-			sum += ri.Size
-			items = append(items, ri)
-		}
-		token = refs.Next
-		if token == "" {
-			break
-		}
-	}
-	fmt.Printf("%s: %d bytes total (%s) in %d references\n", endpoint.NetAddr, sum, ByteSize(sum), len(items))
-	file := filepath.Join(*dataDir, fmt.Sprintf("%s%s_%d", storeFilePrefix, endpoint.NetAddr, now.Unix()))
-	s.writeItems(file, items)
-}