2023-12-23 12:50:47 +00:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"crypto/sha1"
|
2023-12-26 11:00:20 +00:00
|
|
|
"encoding/hex"
|
2023-12-23 12:50:47 +00:00
|
|
|
"fmt"
|
|
|
|
"io/fs"
|
|
|
|
"os"
|
2024-04-25 11:16:04 +00:00
|
|
|
"path/filepath"
|
|
|
|
"runtime"
|
2023-12-24 11:59:05 +00:00
|
|
|
"time"
|
2023-12-23 12:50:47 +00:00
|
|
|
|
2024-02-15 09:20:01 +00:00
|
|
|
"git.numtide.com/numtide/treefmt/format"
|
|
|
|
"git.numtide.com/numtide/treefmt/walk"
|
2024-01-11 20:52:22 +00:00
|
|
|
|
2024-01-03 08:08:57 +00:00
|
|
|
"github.com/charmbracelet/log"
|
|
|
|
|
2023-12-23 12:50:47 +00:00
|
|
|
"github.com/adrg/xdg"
|
|
|
|
"github.com/vmihailenco/msgpack/v5"
|
|
|
|
bolt "go.etcd.io/bbolt"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2024-01-03 08:08:57 +00:00
|
|
|
pathsBucket = "paths"
|
|
|
|
formattersBucket = "formatters"
|
2023-12-23 12:50:47 +00:00
|
|
|
)
|
|
|
|
|
2023-12-24 11:59:05 +00:00
|
|
|
// Entry represents a cache entry, indicating the last size and modified time for a file path.
|
|
|
|
type Entry struct {
|
|
|
|
Size int64
|
|
|
|
Modified time.Time
|
|
|
|
}
|
|
|
|
|
2024-04-25 11:16:04 +00:00
|
|
|
var (
|
|
|
|
db *bolt.DB
|
|
|
|
ReadBatchSize = 1024 * runtime.NumCPU()
|
|
|
|
logger *log.Logger
|
|
|
|
)
|
2023-12-23 12:50:47 +00:00
|
|
|
|
2023-12-24 11:59:05 +00:00
|
|
|
// Open creates an instance of bolt.DB for a given treeRoot path.
|
|
|
|
// If clean is true, Open will delete any existing data in the cache.
|
|
|
|
//
|
|
|
|
// The database will be located in `XDG_CACHE_DIR/treefmt/eval-cache/<id>.db`, where <id> is determined by hashing
|
|
|
|
// the treeRoot path. This associates a given treeRoot with a given instance of the cache.
|
2024-01-03 08:08:57 +00:00
|
|
|
func Open(treeRoot string, clean bool, formatters map[string]*format.Formatter) (err error) {
|
2024-04-25 11:16:04 +00:00
|
|
|
logger = log.WithPrefix("cache")
|
2024-01-03 08:08:57 +00:00
|
|
|
|
2023-12-23 12:50:47 +00:00
|
|
|
// determine a unique and consistent db name for the tree root
|
|
|
|
h := sha1.New()
|
|
|
|
h.Write([]byte(treeRoot))
|
|
|
|
digest := h.Sum(nil)
|
|
|
|
|
2023-12-26 11:00:20 +00:00
|
|
|
name := hex.EncodeToString(digest)
|
2023-12-23 12:50:47 +00:00
|
|
|
path, err := xdg.CacheFile(fmt.Sprintf("treefmt/eval-cache/%v.db", name))
|
2024-01-02 10:33:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%w: could not resolve local path for the cache", err)
|
|
|
|
}
|
2023-12-23 12:50:47 +00:00
|
|
|
|
|
|
|
db, err = bolt.Open(path, 0o600, nil)
|
|
|
|
if err != nil {
|
2024-01-02 10:33:50 +00:00
|
|
|
return fmt.Errorf("%w: failed to open cache", err)
|
2023-12-23 12:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = db.Update(func(tx *bolt.Tx) error {
|
2024-01-03 08:08:57 +00:00
|
|
|
// create bucket for tracking paths
|
|
|
|
pathsBucket, err := tx.CreateBucketIfNotExists([]byte(pathsBucket))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to create paths bucket", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// create bucket for tracking formatters
|
|
|
|
formattersBucket, err := tx.CreateBucketIfNotExists([]byte(formattersBucket))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to create formatters bucket", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// check for any newly configured or modified formatters
|
|
|
|
for name, formatter := range formatters {
|
|
|
|
|
|
|
|
stat, err := os.Lstat(formatter.Executable())
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to state formatter executable", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
entry, err := getEntry(formattersBucket, name)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to retrieve entry for formatter", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
clean = clean || entry == nil || !(entry.Size == stat.Size() && entry.Modified == stat.ModTime())
|
2024-04-25 11:16:04 +00:00
|
|
|
logger.Debug(
|
2024-01-03 08:08:57 +00:00
|
|
|
"checking if formatter has changed",
|
|
|
|
"name", name,
|
|
|
|
"clean", clean,
|
|
|
|
"entry", entry,
|
|
|
|
"stat", stat,
|
|
|
|
)
|
|
|
|
|
|
|
|
// record formatters info
|
|
|
|
entry = &Entry{
|
|
|
|
Size: stat.Size(),
|
|
|
|
Modified: stat.ModTime(),
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = putEntry(formattersBucket, name, entry); err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to write formatter entry", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check for any removed formatters
|
|
|
|
if err = formattersBucket.ForEach(func(key []byte, _ []byte) error {
|
|
|
|
_, ok := formatters[string(key)]
|
|
|
|
if !ok {
|
|
|
|
// remove the formatter entry from the cache
|
|
|
|
if err = formattersBucket.Delete(key); err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to remove formatter entry", err)
|
|
|
|
}
|
|
|
|
// indicate a clean is required
|
|
|
|
clean = true
|
|
|
|
}
|
2023-12-23 12:50:47 +00:00
|
|
|
return nil
|
2024-01-03 08:08:57 +00:00
|
|
|
}); err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to check for removed formatters", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if clean {
|
|
|
|
// remove all path entries
|
|
|
|
c := pathsBucket.Cursor()
|
|
|
|
for k, v := c.First(); !(k == nil && v == nil); k, v = c.Next() {
|
|
|
|
if err = c.Delete(); err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to remove path entry", err)
|
|
|
|
}
|
|
|
|
}
|
2023-12-23 12:50:47 +00:00
|
|
|
}
|
2024-01-03 08:08:57 +00:00
|
|
|
|
|
|
|
return nil
|
2023-12-23 12:50:47 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-12-24 11:59:05 +00:00
|
|
|
// Close closes any open instance of the cache.
|
2023-12-23 12:50:47 +00:00
|
|
|
func Close() error {
|
2023-12-23 15:00:39 +00:00
|
|
|
if db == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2023-12-23 12:50:47 +00:00
|
|
|
return db.Close()
|
|
|
|
}
|
|
|
|
|
2023-12-24 11:59:05 +00:00
|
|
|
// getEntry is a helper for reading cache entries from bolt.
|
|
|
|
func getEntry(bucket *bolt.Bucket, path string) (*Entry, error) {
|
2023-12-23 13:31:08 +00:00
|
|
|
b := bucket.Get([]byte(path))
|
|
|
|
if b != nil {
|
2023-12-24 11:59:05 +00:00
|
|
|
var cached Entry
|
2023-12-23 13:31:08 +00:00
|
|
|
if err := msgpack.Unmarshal(b, &cached); err != nil {
|
2024-01-02 10:33:50 +00:00
|
|
|
return nil, fmt.Errorf("%w: failed to unmarshal cache info for path '%v'", err, path)
|
2023-12-23 13:31:08 +00:00
|
|
|
}
|
|
|
|
return &cached, nil
|
|
|
|
} else {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-03 08:08:57 +00:00
|
|
|
// putEntry is a helper for writing cache entries into bolt.
|
|
|
|
func putEntry(bucket *bolt.Bucket, path string, entry *Entry) error {
|
|
|
|
bytes, err := msgpack.Marshal(entry)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to marshal cache entry", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = bucket.Put([]byte(path), bytes); err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to put cache entry", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-12-24 11:59:05 +00:00
|
|
|
// ChangeSet is used to walk a filesystem, starting at root, and outputting any new or changed paths using pathsCh.
|
|
|
|
// It determines if a path is new or has changed by comparing against cache entries.
|
2024-01-10 15:45:57 +00:00
|
|
|
func ChangeSet(ctx context.Context, walker walk.Walker, pathsCh chan<- string) error {
|
2024-04-25 11:16:04 +00:00
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
logger.Infof("finished generating change set in %v", time.Since(start))
|
|
|
|
}()
|
|
|
|
|
2024-01-07 18:57:51 +00:00
|
|
|
var tx *bolt.Tx
|
|
|
|
var bucket *bolt.Bucket
|
|
|
|
var processed int
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
// close any pending read tx
|
|
|
|
if tx != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
}
|
|
|
|
}()
|
2023-12-23 12:50:47 +00:00
|
|
|
|
2024-04-25 11:16:04 +00:00
|
|
|
// for quick removal of tree root from paths
|
|
|
|
relPathOffset := len(walker.Root()) + 1
|
|
|
|
|
2024-01-10 15:45:57 +00:00
|
|
|
return walker.Walk(ctx, func(path string, info fs.FileInfo, err error) error {
|
2024-01-11 20:52:22 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2024-01-07 18:57:51 +00:00
|
|
|
return ctx.Err()
|
2024-01-11 20:52:22 +00:00
|
|
|
default:
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%w: failed to walk path", err)
|
|
|
|
} else if info.IsDir() {
|
|
|
|
// ignore directories
|
|
|
|
return nil
|
|
|
|
}
|
2024-01-07 18:57:51 +00:00
|
|
|
}
|
2023-12-23 12:50:47 +00:00
|
|
|
|
2024-01-07 18:57:51 +00:00
|
|
|
// ignore symlinks
|
|
|
|
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
|
|
|
|
return nil
|
|
|
|
}
|
2023-12-23 12:50:47 +00:00
|
|
|
|
2024-01-07 18:57:51 +00:00
|
|
|
// open a new read tx if there isn't one in progress
|
|
|
|
// we have to periodically open a new read tx to prevent writes from being blocked
|
|
|
|
if tx == nil {
|
|
|
|
tx, err = db.Begin(false)
|
2023-12-23 13:31:08 +00:00
|
|
|
if err != nil {
|
2024-01-07 18:57:51 +00:00
|
|
|
return fmt.Errorf("%w: failed to open a new read tx", err)
|
2023-12-23 12:50:47 +00:00
|
|
|
}
|
2024-01-07 18:57:51 +00:00
|
|
|
bucket = tx.Bucket([]byte(pathsBucket))
|
|
|
|
}
|
2023-12-23 12:50:47 +00:00
|
|
|
|
2024-04-25 11:16:04 +00:00
|
|
|
relPath := path[relPathOffset:]
|
|
|
|
cached, err := getEntry(bucket, relPath)
|
2024-01-07 18:57:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-12-23 12:50:47 +00:00
|
|
|
|
2024-01-07 18:57:51 +00:00
|
|
|
changedOrNew := cached == nil || !(cached.Modified == info.ModTime() && cached.Size == info.Size())
|
2023-12-23 12:50:47 +00:00
|
|
|
|
2024-01-07 18:57:51 +00:00
|
|
|
if !changedOrNew {
|
|
|
|
// no change
|
2023-12-23 12:50:47 +00:00
|
|
|
return nil
|
2024-01-07 18:57:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// pass on the path
|
2024-01-11 20:52:22 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
2024-04-25 11:16:04 +00:00
|
|
|
pathsCh <- relPath
|
2024-01-11 20:52:22 +00:00
|
|
|
}
|
2024-01-07 18:57:51 +00:00
|
|
|
|
|
|
|
// close the current tx if we have reached the batch size
|
|
|
|
processed += 1
|
2024-04-25 11:16:04 +00:00
|
|
|
if processed == ReadBatchSize {
|
|
|
|
err = tx.Rollback()
|
|
|
|
tx = nil
|
|
|
|
return err
|
2024-01-07 18:57:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2023-12-23 12:50:47 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-12-24 11:59:05 +00:00
|
|
|
// Update is used to record updated cache information for the specified list of paths.
|
2024-04-25 11:16:04 +00:00
|
|
|
func Update(treeRoot string, paths []string) (int, error) {
|
|
|
|
start := time.Now()
|
|
|
|
defer func() {
|
|
|
|
logger.Infof("finished updating %v paths in %v", len(paths), time.Since(start))
|
|
|
|
}()
|
|
|
|
|
2023-12-23 12:50:47 +00:00
|
|
|
if len(paths) == 0 {
|
2023-12-23 13:31:08 +00:00
|
|
|
return 0, nil
|
2023-12-23 12:50:47 +00:00
|
|
|
}
|
|
|
|
|
2023-12-23 13:31:08 +00:00
|
|
|
var changes int
|
|
|
|
|
|
|
|
return changes, db.Update(func(tx *bolt.Tx) error {
|
2024-01-03 08:08:57 +00:00
|
|
|
bucket := tx.Bucket([]byte(pathsBucket))
|
2023-12-23 12:50:47 +00:00
|
|
|
|
|
|
|
for _, path := range paths {
|
2023-12-24 11:59:05 +00:00
|
|
|
cached, err := getEntry(bucket, path)
|
2023-12-23 13:31:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-04-25 11:16:04 +00:00
|
|
|
pathInfo, err := os.Stat(filepath.Join(treeRoot, path))
|
2023-12-23 12:50:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-12-23 13:31:08 +00:00
|
|
|
if cached == nil || !(cached.Modified == pathInfo.ModTime() && cached.Size == pathInfo.Size()) {
|
|
|
|
changes += 1
|
|
|
|
} else {
|
|
|
|
// no change to write
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-01-03 08:08:57 +00:00
|
|
|
entry := Entry{
|
2023-12-23 12:50:47 +00:00
|
|
|
Size: pathInfo.Size(),
|
|
|
|
Modified: pathInfo.ModTime(),
|
|
|
|
}
|
|
|
|
|
2024-01-03 08:08:57 +00:00
|
|
|
if err = putEntry(bucket, path, &entry); err != nil {
|
|
|
|
return err
|
2023-12-23 12:50:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|