mirror of
https://github.com/gopasspw/gopass.git
synced 2025-12-08 19:24:54 +00:00
Add experimental ondisk storage and rcs backend (#1366)
RELEASE_NOTES=n/a Signed-off-by: Dominik Schulz <dominik.schulz@gauner.org>
This commit is contained in:
parent
7005544d2e
commit
e6beee928d
2
Makefile
2
Makefile
@ -1,5 +1,5 @@
|
||||
FIRST_GOPATH := $(firstword $(subst :, ,$(GOPATH)))
|
||||
PKGS := $(shell go list ./... | grep -v /tests | grep -v /xcpb | grep -v /openpgp)
|
||||
PKGS := $(shell go list ./... | grep -v /tests | grep -v /xcpb | grep -v /gpb)
|
||||
GOFILES_NOVENDOR := $(shell find . -name vendor -prune -o -type f -name '*.go' -not -name '*.pb.go' -print)
|
||||
GOFILES_BUILD := $(shell find . -type f -name '*.go' -not -name '*_test.go')
|
||||
PROTOFILES := $(shell find . -name vendor -prune -o -type f -name '*.proto' -print)
|
||||
|
||||
@ -24,6 +24,17 @@ This is a volatile in-memory backend for tests.
|
||||
|
||||
**WARNING**: All data is lost when gopass stops!
|
||||
|
||||
### On Disk (ondisk)
|
||||
|
||||
This is an experimental on disk K/V backend. It stores the encrypted data in the
|
||||
filesystem in a content adressable manner. Currently the metadata is NOT encrypted
|
||||
but that is planned to be added soon.
|
||||
|
||||
This might become the default storage and RCS backend in gopass 2.x.
|
||||
|
||||
**WARNING**: The metadata is currently not encrypted and the disk format is
|
||||
still experimental. **DO NOT USE** unless you want to help with the implementation.
|
||||
|
||||
## RCS Backends (rcs)
|
||||
|
||||
These are revision control backends talking to various source control
|
||||
|
||||
@ -498,6 +498,10 @@ func (s *Action) GetCommands() []*cli.Command {
|
||||
Name: "rcs",
|
||||
Usage: "Select sync backend (git, gitcli, noop)",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "storage",
|
||||
Usage: "Select storage backend (fs, inmen, ondisk)",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nogit",
|
||||
Usage: "(DEPRECATED): Select noop RCS backend. Use '--rcs noop' instead",
|
||||
|
||||
@ -229,16 +229,11 @@ func (s *Action) generateSetPassword(ctx context.Context, name, key, password st
|
||||
|
||||
// replace password in existing secret
|
||||
if s.Store.Exists(ctx, name) {
|
||||
sec, ctx, err := s.Store.GetContext(ctx, name)
|
||||
if err != nil {
|
||||
return ctx, ExitError(ctx, ExitEncrypt, err, "failed to set key '%s' of '%s': %s", key, name, err)
|
||||
ctx, err := s.generateReplaceExisting(ctx, name, key, password, kvps)
|
||||
if err == nil {
|
||||
return ctx, nil
|
||||
}
|
||||
setMetadata(sec, kvps)
|
||||
sec.SetPassword(password)
|
||||
if err := s.Store.Set(sub.WithReason(ctx, "Generated password for YAML key"), name, sec); err != nil {
|
||||
return ctx, ExitError(ctx, ExitEncrypt, err, "failed to set key '%s' of '%s': %s", key, name, err)
|
||||
}
|
||||
return ctx, nil
|
||||
out.Error(ctx, "Failed to read existing secret. Creating anew. Error: %s", err.Error())
|
||||
}
|
||||
|
||||
// generate a completely new secret
|
||||
@ -259,6 +254,19 @@ func (s *Action) generateSetPassword(ctx context.Context, name, key, password st
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (s *Action) generateReplaceExisting(ctx context.Context, name, key, password string, kvps map[string]string) (context.Context, error) {
|
||||
sec, ctx, err := s.Store.GetContext(ctx, name)
|
||||
if err != nil {
|
||||
return ctx, ExitError(ctx, ExitEncrypt, err, "failed to set key '%s' of '%s': %s", key, name, err)
|
||||
}
|
||||
setMetadata(sec, kvps)
|
||||
sec.SetPassword(password)
|
||||
if err := s.Store.Set(sub.WithReason(ctx, "Generated password for YAML key"), name, sec); err != nil {
|
||||
return ctx, ExitError(ctx, ExitEncrypt, err, "failed to set key '%s' of '%s': %s", key, name, err)
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func setMetadata(sec store.Secret, kvps map[string]string) {
|
||||
for k, v := range kvps {
|
||||
_ = sec.SetValue(k, v)
|
||||
|
||||
@ -86,6 +86,10 @@ func initParseContext(ctx context.Context, c *cli.Context) context.Context {
|
||||
ctx = backend.WithRCSBackend(ctx, backend.Noop)
|
||||
}
|
||||
}
|
||||
if c.IsSet("storage") {
|
||||
out.Debug(ctx, "Using Storage: %s", c.String("storage"))
|
||||
ctx = backend.WithStorageBackendString(ctx, c.String("storage"))
|
||||
}
|
||||
|
||||
// default to git
|
||||
if !backend.HasRCSBackend(ctx) {
|
||||
|
||||
@ -15,6 +15,8 @@ const (
|
||||
Noop RCSBackend = iota
|
||||
// GitCLI is a git-cli based sync backend
|
||||
GitCLI
|
||||
// OnDiskRCS is the OnDisk storage backend in disguise as an RCS backend
|
||||
OnDiskRCS
|
||||
)
|
||||
|
||||
func (s RCSBackend) String() string {
|
||||
|
||||
@ -15,6 +15,8 @@ const (
|
||||
FS StorageBackend = iota
|
||||
// InMem is an in-memory mock store for tests
|
||||
InMem
|
||||
// OnDisk is an on-disk store
|
||||
OnDisk
|
||||
)
|
||||
|
||||
func (s StorageBackend) String() string {
|
||||
|
||||
139
pkg/backend/storage/kv/ondisk/fsck.go
Normal file
139
pkg/backend/storage/kv/ondisk/fsck.go
Normal file
@ -0,0 +1,139 @@
|
||||
package ondisk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/gopasspw/gopass/pkg/ctxutil"
|
||||
"github.com/gopasspw/gopass/pkg/fsutil"
|
||||
"github.com/gopasspw/gopass/pkg/out"
|
||||
)
|
||||
|
||||
// Fsck checks store integrity and performs a compaction
|
||||
func (o *OnDisk) Fsck(ctx context.Context) error {
|
||||
pcb := ctxutil.GetProgressCallback(ctx)
|
||||
|
||||
if err := o.Compact(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// build a list of existing files
|
||||
files := make(map[string]struct{}, len(o.idx.Entries)+1)
|
||||
files[idxFile] = struct{}{}
|
||||
files[idxBakFile] = struct{}{}
|
||||
for _, v := range o.idx.Entries {
|
||||
if v.IsDeleted() {
|
||||
continue
|
||||
}
|
||||
for _, r := range v.Revisions {
|
||||
files[r.Filename] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return filepath.Walk(o.dir, func(path string, fi os.FileInfo, err error) error {
|
||||
defer pcb()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.IsDir() && len(fi.Name()) != 2 && path != o.dir {
|
||||
out.Print(ctx, "Skipping unknown dir: %s", path)
|
||||
return filepath.SkipDir
|
||||
}
|
||||
out.Debug(ctx, "Checking: %s", path)
|
||||
if fi.IsDir() {
|
||||
return o.fsckCheckDir(ctx, path, fi)
|
||||
}
|
||||
relPath := strings.TrimPrefix(path, o.dir+string(filepath.Separator))
|
||||
if err := o.fsckCheckFile(ctx, relPath, fi); err != nil {
|
||||
return err
|
||||
}
|
||||
_, found := files[relPath]
|
||||
if found {
|
||||
return nil
|
||||
}
|
||||
out.Yellow(ctx, "Found orphaned file in store. Removing: %s", relPath)
|
||||
return os.Remove(path)
|
||||
})
|
||||
}
|
||||
|
||||
func (o *OnDisk) fsckCheckFile(ctx context.Context, relPath string, fi os.FileInfo) error {
|
||||
path := filepath.Join(o.dir, relPath)
|
||||
// check filename / hashsum
|
||||
fileHash, err := hashFromFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(fileHash) < 3 {
|
||||
return fmt.Errorf("invalid hash")
|
||||
}
|
||||
wantPath := filepath.Join(fileHash[0:2], fileHash[2:])
|
||||
if relPath != wantPath && !strings.Contains(relPath, idxFile) {
|
||||
wantFullPath := filepath.Join(o.dir, wantPath)
|
||||
out.Error(ctx, " Invalid checksum / path: Want %s for %s", wantPath, relPath)
|
||||
if err := os.Rename(path, wantFullPath); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Yellow(ctx, " Renamed %s to %s", relPath, wantPath)
|
||||
path = wantFullPath
|
||||
}
|
||||
|
||||
// check file modes
|
||||
if fi.Mode().Perm()&0177 == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out.Yellow(ctx, "Permissions too wide: %s (%s)", path, fi.Mode().String())
|
||||
|
||||
np := uint32(fi.Mode().Perm() & 0600)
|
||||
out.Green(ctx, " Fixing permissions from %s to %s", fi.Mode().Perm().String(), os.FileMode(np).Perm().String())
|
||||
if err := syscall.Chmod(path, np); err != nil {
|
||||
out.Error(ctx, " Failed to set permissions for %s to rw-------: %s", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OnDisk) fsckCheckDir(ctx context.Context, path string, fi os.FileInfo) error {
|
||||
// check if any group or other perms are set,
|
||||
// i.e. check for perms other than rwx------
|
||||
if fi.Mode().Perm()&077 != 0 {
|
||||
out.Yellow(ctx, "Permissions too wide %s on dir %s", fi.Mode().Perm().String(), path)
|
||||
|
||||
np := uint32(fi.Mode().Perm() & 0700)
|
||||
out.Green(ctx, " Fixing permissions from %s to %s", fi.Mode().Perm().String(), os.FileMode(np).Perm().String())
|
||||
if err := syscall.Chmod(path, np); err != nil {
|
||||
out.Error(ctx, " Failed to set permissions for %s to rwx------: %s", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// check for empty folders
|
||||
isEmpty, err := fsutil.IsEmptyDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isEmpty {
|
||||
out.Error(ctx, "Folder %s is empty. Removing", path)
|
||||
return os.Remove(path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hashFromFile(path string) (string, error) {
|
||||
fh, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, fh); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum(nil)), nil
|
||||
}
|
||||
7
pkg/backend/storage/kv/ondisk/gpb/Makefile
Normal file
7
pkg/backend/storage/kv/ondisk/gpb/Makefile
Normal file
@ -0,0 +1,7 @@
|
||||
all:
|
||||
protoc -I/usr/local/include -I. \
|
||||
-I${GOPATH}/src \
|
||||
-I${GOPATH}/src/github.com/googleapis/googleapis \
|
||||
-I${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
|
||||
--go_out=. \
|
||||
gpb.proto
|
||||
336
pkg/backend/storage/kv/ondisk/gpb/gpb.pb.go
Normal file
336
pkg/backend/storage/kv/ondisk/gpb/gpb.pb.go
Normal file
@ -0,0 +1,336 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.21.0-devel
|
||||
// protoc v3.12.0
|
||||
// source: gpb.proto
|
||||
|
||||
package gpb
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type Revision struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Created *timestamp.Timestamp `protobuf:"bytes,1,opt,name=Created,proto3" json:"Created,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=Message,proto3" json:"Message,omitempty"`
|
||||
Filename string `protobuf:"bytes,3,opt,name=Filename,proto3" json:"Filename,omitempty"`
|
||||
Tombstone bool `protobuf:"varint,4,opt,name=Tombstone,proto3" json:"Tombstone,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Revision) Reset() {
|
||||
*x = Revision{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_gpb_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Revision) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Revision) ProtoMessage() {}
|
||||
|
||||
func (x *Revision) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_gpb_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Revision.ProtoReflect.Descriptor instead.
|
||||
func (*Revision) Descriptor() ([]byte, []int) {
|
||||
return file_gpb_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Revision) GetCreated() *timestamp.Timestamp {
|
||||
if x != nil {
|
||||
return x.Created
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Revision) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Revision) GetFilename() string {
|
||||
if x != nil {
|
||||
return x.Filename
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Revision) GetTombstone() bool {
|
||||
if x != nil {
|
||||
return x.Tombstone
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||
Revisions []*Revision `protobuf:"bytes,2,rep,name=Revisions,proto3" json:"Revisions,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Entry) Reset() {
|
||||
*x = Entry{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_gpb_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Entry) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Entry) ProtoMessage() {}
|
||||
|
||||
func (x *Entry) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_gpb_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Entry.ProtoReflect.Descriptor instead.
|
||||
func (*Entry) Descriptor() ([]byte, []int) {
|
||||
return file_gpb_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *Entry) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Entry) GetRevisions() []*Revision {
|
||||
if x != nil {
|
||||
return x.Revisions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Store struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||
Entries map[string]*Entry `protobuf:"bytes,2,rep,name=Entries,proto3" json:"Entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (x *Store) Reset() {
|
||||
*x = Store{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_gpb_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Store) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Store) ProtoMessage() {}
|
||||
|
||||
func (x *Store) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_gpb_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Store.ProtoReflect.Descriptor instead.
|
||||
func (*Store) Descriptor() ([]byte, []int) {
|
||||
return file_gpb_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *Store) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Store) GetEntries() map[string]*Entry {
|
||||
if x != nil {
|
||||
return x.Entries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_gpb_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_gpb_proto_rawDesc = []byte{
|
||||
0x0a, 0x09, 0x67, 0x70, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x67, 0x70, 0x62,
|
||||
0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x22, 0x94, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34,
|
||||
0x0a, 0x07, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x43, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x6f,
|
||||
0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x54,
|
||||
0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x22, 0x48, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72,
|
||||
0x79, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x67, 0x70, 0x62, 0x2e, 0x52,
|
||||
0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04,
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65,
|
||||
0x12, 0x31, 0x0a, 0x07, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x17, 0x2e, 0x67, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x6e,
|
||||
0x74, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x45, 0x6e, 0x74, 0x72,
|
||||
0x69, 0x65, 0x73, 0x1a, 0x46, 0x0a, 0x0c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e,
|
||||
0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x67, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79,
|
||||
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x07, 0x5a, 0x05, 0x2e,
|
||||
0x3b, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_gpb_proto_rawDescOnce sync.Once
|
||||
file_gpb_proto_rawDescData = file_gpb_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_gpb_proto_rawDescGZIP() []byte {
|
||||
file_gpb_proto_rawDescOnce.Do(func() {
|
||||
file_gpb_proto_rawDescData = protoimpl.X.CompressGZIP(file_gpb_proto_rawDescData)
|
||||
})
|
||||
return file_gpb_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_gpb_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_gpb_proto_goTypes = []interface{}{
|
||||
(*Revision)(nil), // 0: gpb.Revision
|
||||
(*Entry)(nil), // 1: gpb.Entry
|
||||
(*Store)(nil), // 2: gpb.Store
|
||||
nil, // 3: gpb.Store.EntriesEntry
|
||||
(*timestamp.Timestamp)(nil), // 4: google.protobuf.Timestamp
|
||||
}
|
||||
var file_gpb_proto_depIdxs = []int32{
|
||||
4, // 0: gpb.Revision.Created:type_name -> google.protobuf.Timestamp
|
||||
0, // 1: gpb.Entry.Revisions:type_name -> gpb.Revision
|
||||
3, // 2: gpb.Store.Entries:type_name -> gpb.Store.EntriesEntry
|
||||
1, // 3: gpb.Store.EntriesEntry.value:type_name -> gpb.Entry
|
||||
4, // [4:4] is the sub-list for method output_type
|
||||
4, // [4:4] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_gpb_proto_init() }
|
||||
func file_gpb_proto_init() {
|
||||
if File_gpb_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_gpb_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Revision); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_gpb_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Entry); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_gpb_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Store); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_gpb_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_gpb_proto_goTypes,
|
||||
DependencyIndexes: file_gpb_proto_depIdxs,
|
||||
MessageInfos: file_gpb_proto_msgTypes,
|
||||
}.Build()
|
||||
File_gpb_proto = out.File
|
||||
file_gpb_proto_rawDesc = nil
|
||||
file_gpb_proto_goTypes = nil
|
||||
file_gpb_proto_depIdxs = nil
|
||||
}
|
||||
24
pkg/backend/storage/kv/ondisk/gpb/gpb.proto
Normal file
24
pkg/backend/storage/kv/ondisk/gpb/gpb.proto
Normal file
@ -0,0 +1,24 @@
|
||||
syntax = "proto3";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = ".;gpb";
|
||||
|
||||
package gpb;
|
||||
|
||||
message Revision {
|
||||
google.protobuf.Timestamp Created = 1;
|
||||
string Message = 2;
|
||||
string Filename = 3;
|
||||
bool Tombstone = 4;
|
||||
}
|
||||
|
||||
message Entry {
|
||||
string Name = 1;
|
||||
repeated Revision Revisions = 2;
|
||||
}
|
||||
|
||||
message Store {
|
||||
string Name = 1;
|
||||
map<string, Entry> Entries = 2;
|
||||
}
|
||||
47
pkg/backend/storage/kv/ondisk/gpb/sort.go
Normal file
47
pkg/backend/storage/kv/ondisk/gpb/sort.go
Normal file
@ -0,0 +1,47 @@
|
||||
package gpb
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ByRevision sorts to latest revision to the top, i.e. [0]
|
||||
type ByRevision []*Revision
|
||||
|
||||
func (r ByRevision) Len() int { return len(r) }
|
||||
func (r ByRevision) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r ByRevision) Less(i, j int) bool { return r[i].Created.Seconds > r[j].Created.Seconds }
|
||||
|
||||
// SortedRevisions returns a list of sorted revisions
|
||||
func (e *Entry) SortedRevisions() []*Revision {
|
||||
sort.Sort(ByRevision(e.Revisions))
|
||||
return e.Revisions
|
||||
}
|
||||
|
||||
// Latest returns the latest revision
|
||||
func (e *Entry) Latest() *Revision {
|
||||
sort.Sort(ByRevision(e.Revisions))
|
||||
return e.Revisions[0]
|
||||
}
|
||||
|
||||
// IsDeleted returns true is an entry was marked as deleted
|
||||
func (e *Entry) IsDeleted() bool {
|
||||
return e.Latest().GetTombstone()
|
||||
}
|
||||
|
||||
// Delete marks an entry as deleted
|
||||
func (e *Entry) Delete(msg string) bool {
|
||||
if e.IsDeleted() {
|
||||
return false
|
||||
}
|
||||
e.Revisions = append(e.Revisions, &Revision{
|
||||
Message: msg,
|
||||
Tombstone: true,
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
// Time returns the time a revision was created
|
||||
func (r *Revision) Time() time.Time {
|
||||
return time.Unix(r.Created.GetSeconds(), int64(r.Created.GetNanos()))
|
||||
}
|
||||
53
pkg/backend/storage/kv/ondisk/loader.go
Normal file
53
pkg/backend/storage/kv/ondisk/loader.go
Normal file
@ -0,0 +1,53 @@
|
||||
package ondisk
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gopasspw/gopass/pkg/backend"
|
||||
"github.com/gopasspw/gopass/pkg/out"
|
||||
)
|
||||
|
||||
const (
|
||||
name = "ondisk"
|
||||
)
|
||||
|
||||
func init() {
|
||||
backend.RegisterStorage(backend.OnDisk, name, &loader{})
|
||||
backend.RegisterRCS(backend.OnDiskRCS, name, &loader{})
|
||||
}
|
||||
|
||||
type loader struct{}
|
||||
|
||||
// New creates a new ondisk loader
|
||||
func (l loader) New(ctx context.Context, url *backend.URL) (backend.Storage, error) {
|
||||
be, err := New(url.Path)
|
||||
out.Debug(ctx, "Using Storage Backend: %s", be.String())
|
||||
return be, err
|
||||
}
|
||||
|
||||
// Open loads an existing ondisk repo
|
||||
func (l loader) Open(ctx context.Context, path string) (backend.RCS, error) {
|
||||
be, err := New(path)
|
||||
out.Debug(ctx, "Using RCS Backend: %s", be.String())
|
||||
return be, err
|
||||
}
|
||||
|
||||
// Clone loads an existing ondisk repo
|
||||
// WARNING: DOES NOT SUPPORT CLONE (yet)
|
||||
func (l loader) Clone(ctx context.Context, repo, path string) (backend.RCS, error) {
|
||||
be, err := New(path)
|
||||
out.Debug(ctx, "Using RCS Backend: %s", be.String())
|
||||
return be, err
|
||||
}
|
||||
|
||||
// Init creates a new ondisk repo
|
||||
func (l loader) Init(ctx context.Context, path, username, email string) (backend.RCS, error) {
|
||||
be, err := New(path)
|
||||
out.Debug(ctx, "Using RCS Backend: %s", be.String())
|
||||
return be, err
|
||||
}
|
||||
|
||||
// String returns ondisk
|
||||
func (l loader) String() string {
|
||||
return name
|
||||
}
|
||||
88
pkg/backend/storage/kv/ondisk/rcs.go
Normal file
88
pkg/backend/storage/kv/ondisk/rcs.go
Normal file
@ -0,0 +1,88 @@
|
||||
package ondisk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/gopasspw/gopass/pkg/backend"
|
||||
)
|
||||
|
||||
// Add is not supported / necessary
|
||||
func (o *OnDisk) Add(ctx context.Context, args ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit is not supported / necessary
|
||||
func (o *OnDisk) Commit(ctx context.Context, msg string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push is not implemented, yet
|
||||
func (o *OnDisk) Push(ctx context.Context, remote, location string) error {
|
||||
return fmt.Errorf("not yet implemented")
|
||||
}
|
||||
|
||||
// Pull is not implemented, yet
|
||||
func (o *OnDisk) Pull(ctx context.Context, remote, location string) error {
|
||||
return fmt.Errorf("not yet implemented")
|
||||
}
|
||||
|
||||
// InitConfig is not necessary
|
||||
func (o *OnDisk) InitConfig(ctx context.Context, name, email string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddRemote is not implemented, yet
|
||||
func (o *OnDisk) AddRemote(ctx context.Context, remote, location string) error {
|
||||
return fmt.Errorf("not yet implemented")
|
||||
}
|
||||
|
||||
// RemoveRemote is not implemented, yet
|
||||
func (o *OnDisk) RemoveRemote(ctx context.Context, remote string) error {
|
||||
return fmt.Errorf("not yet implemented")
|
||||
}
|
||||
|
||||
// Revisions returns a list of revisions for this entry
|
||||
func (o *OnDisk) Revisions(ctx context.Context, name string) ([]backend.Revision, error) {
|
||||
if !o.Exists(ctx, name) {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
e, err := o.getEntry(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
revs := make([]backend.Revision, 0, len(e.Revisions))
|
||||
for _, rev := range e.SortedRevisions() {
|
||||
revs = append(revs, backend.Revision{
|
||||
Hash: fmt.Sprintf("%d", rev.GetCreated().GetSeconds()),
|
||||
Subject: rev.Message,
|
||||
Date: rev.Time(),
|
||||
})
|
||||
}
|
||||
return revs, nil
|
||||
}
|
||||
|
||||
// GetRevision returns a single revision
|
||||
func (o *OnDisk) GetRevision(ctx context.Context, name, revision string) ([]byte, error) {
|
||||
if !o.Exists(ctx, name) {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
e, err := o.getEntry(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, rev := range e.SortedRevisions() {
|
||||
if revision == fmt.Sprintf("%d", rev.GetCreated().GetSeconds()) {
|
||||
path := filepath.Join(o.dir, rev.GetFilename())
|
||||
return ioutil.ReadFile(path)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
// Status is not necessary
|
||||
func (o *OnDisk) Status(ctx context.Context) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
228
pkg/backend/storage/kv/ondisk/store.go
Normal file
228
pkg/backend/storage/kv/ondisk/store.go
Normal file
@ -0,0 +1,228 @@
|
||||
package ondisk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/gopasspw/gopass/pkg/backend/storage/kv/ondisk/gpb"
|
||||
"github.com/gopasspw/gopass/pkg/ctxutil"
|
||||
"github.com/gopasspw/gopass/pkg/out"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
var (
|
||||
idxFile = "index.pb"
|
||||
idxBakFile = "index.pb.back"
|
||||
maxRev = 256
|
||||
delTTL = time.Hour * 24 * 365
|
||||
)
|
||||
|
||||
// OnDisk is an on disk key-value store
|
||||
type OnDisk struct {
|
||||
dir string
|
||||
idx *gpb.Store
|
||||
}
|
||||
|
||||
// New creates a new ondisk store
|
||||
func New(baseDir string) (*OnDisk, error) {
|
||||
idx, err := loadOrCreate(baseDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &OnDisk{
|
||||
dir: baseDir,
|
||||
idx: idx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func loadOrCreate(path string) (*gpb.Store, error) {
|
||||
path = filepath.Join(path, idxFile)
|
||||
buf, err := ioutil.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
return &gpb.Store{
|
||||
Name: filepath.Base(path),
|
||||
Entries: make(map[string]*gpb.Entry),
|
||||
}, nil
|
||||
}
|
||||
idx := &gpb.Store{}
|
||||
err = proto.Unmarshal(buf, idx)
|
||||
return idx, err
|
||||
}
|
||||
|
||||
func (o *OnDisk) saveIndex() error {
|
||||
buf, err := proto.Marshal(o.idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO the index should be encrypted
|
||||
os.Rename(filepath.Join(o.dir, idxFile), filepath.Join(o.dir, idxBakFile))
|
||||
return ioutil.WriteFile(filepath.Join(o.dir, idxFile), buf, 0600)
|
||||
}
|
||||
|
||||
// Get returns an entry
|
||||
func (o *OnDisk) Get(ctx context.Context, name string) ([]byte, error) {
|
||||
e, err := o.getEntry(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := e.Latest()
|
||||
if r == nil {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
path := filepath.Join(o.dir, r.GetFilename())
|
||||
out.Debug(ctx, "Get(%s) - Reading from %s", name, path)
|
||||
return ioutil.ReadFile(path)
|
||||
}
|
||||
|
||||
func filename(buf []byte) string {
|
||||
sum := fmt.Sprintf("%x", sha256.Sum256(buf))
|
||||
return filepath.Join(sum[0:2], sum[2:])
|
||||
}
|
||||
|
||||
// Set creates a new revision for an entry
|
||||
func (o *OnDisk) Set(ctx context.Context, name string, value []byte) error {
|
||||
fn := filename(value)
|
||||
fp := filepath.Join(o.dir, filename(value))
|
||||
if err := os.MkdirAll(filepath.Dir(fp), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(fp, value, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Debug(ctx, "Set(%s) - Wrote to %s", name, fp)
|
||||
e := o.getOrCreateEntry(ctx, name)
|
||||
msg := "Updated " + fn
|
||||
if cm := ctxutil.GetCommitMessage(ctx); cm != "" {
|
||||
msg = cm
|
||||
}
|
||||
e.Revisions = append(e.Revisions, &gpb.Revision{
|
||||
Created: ×tamppb.Timestamp{
|
||||
Seconds: time.Now().Unix(),
|
||||
},
|
||||
Message: msg,
|
||||
Filename: fn,
|
||||
})
|
||||
out.Debug(ctx, "Set(%s) - Added Revision", name)
|
||||
o.idx.Entries[name] = e
|
||||
return o.saveIndex()
|
||||
}
|
||||
|
||||
func (o *OnDisk) getEntry(name string) (*gpb.Entry, error) {
|
||||
em := o.idx.GetEntries()
|
||||
if em == nil {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
e, found := em[name]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (o *OnDisk) getOrCreateEntry(ctx context.Context, name string) *gpb.Entry {
|
||||
if e, found := o.idx.Entries[name]; found && e != nil {
|
||||
return e
|
||||
}
|
||||
out.Debug(ctx, "getEntry(%s) - Created new Entry", name)
|
||||
return &gpb.Entry{
|
||||
Name: name,
|
||||
Revisions: make([]*gpb.Revision, 0, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Delete removes an entry
|
||||
func (o *OnDisk) Delete(ctx context.Context, name string) error {
|
||||
if !o.Exists(ctx, name) {
|
||||
out.Debug(ctx, "Delete(%s) - Not adding tombstone for non-existing entry", name)
|
||||
return nil
|
||||
}
|
||||
// add tombstone
|
||||
e := o.getOrCreateEntry(ctx, name)
|
||||
e.Delete(ctxutil.GetCommitMessage(ctx))
|
||||
o.idx.Entries[name] = e
|
||||
|
||||
out.Debug(ctx, "Delete(%s) - Added tombstone")
|
||||
return o.saveIndex()
|
||||
}
|
||||
|
||||
// Exists checks if an entry exists
|
||||
func (o *OnDisk) Exists(ctx context.Context, name string) bool {
|
||||
_, found := o.idx.Entries[name]
|
||||
out.Debug(ctx, "Exists(%s): %t", name, found)
|
||||
return found
|
||||
}
|
||||
|
||||
// List lists all entries
|
||||
func (o *OnDisk) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
res := make([]string, 0, len(o.idx.Entries))
|
||||
for k, v := range o.idx.Entries {
|
||||
if v.IsDeleted() {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
res = append(res, k)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// IsDir is not supported
|
||||
func (o *OnDisk) IsDir(ctx context.Context, name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Prune removes all entries with a given prefix
|
||||
func (o *OnDisk) Prune(ctx context.Context, prefix string) error {
|
||||
l, _ := o.List(ctx, name)
|
||||
for _, e := range l {
|
||||
if err := o.Delete(ctx, e); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns ondisk
|
||||
func (o *OnDisk) Name() string {
|
||||
return name
|
||||
}
|
||||
|
||||
// Version returns 1.0.0
|
||||
func (o *OnDisk) Version(context.Context) semver.Version {
|
||||
return semver.Version{Major: 1}
|
||||
}
|
||||
|
||||
// String returns the name and path
|
||||
func (o *OnDisk) String() string {
|
||||
return fmt.Sprintf("%s(path: %s)", name, o.dir)
|
||||
}
|
||||
|
||||
// Available always returns nil
|
||||
func (o *OnDisk) Available(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compact will prune all deleted entries and truncate every other entry
|
||||
// to the last 10 revisions.
|
||||
func (o *OnDisk) Compact(ctx context.Context) error {
|
||||
for k, v := range o.idx.Entries {
|
||||
if v.IsDeleted() && time.Since(v.Latest().Time()) > delTTL {
|
||||
delete(o.idx.Entries, k)
|
||||
continue
|
||||
}
|
||||
sort.Sort(gpb.ByRevision(o.idx.Entries[k].Revisions))
|
||||
if len(o.idx.Entries[k].Revisions) > maxRev {
|
||||
o.idx.Entries[k].Revisions = o.idx.Entries[k].Revisions[0:maxRev]
|
||||
}
|
||||
}
|
||||
return o.saveIndex()
|
||||
}
|
||||
3
pkg/backend/storage/ondisk.go
Normal file
3
pkg/backend/storage/ondisk.go
Normal file
@ -0,0 +1,3 @@
|
||||
package storage
|
||||
|
||||
import _ "github.com/gopasspw/gopass/pkg/backend/storage/kv/ondisk" // register on-disk backend
|
||||
@ -35,6 +35,7 @@ const (
|
||||
ctxKeyAutoPrint
|
||||
ctxKeyGitInit
|
||||
ctxKeyForce
|
||||
ctxKeyCommitMessage
|
||||
)
|
||||
|
||||
// WithGlobalFlags parses any global flags from the cli context and returns
|
||||
@ -563,15 +564,29 @@ func WithForce(ctx context.Context, bv bool) context.Context {
|
||||
|
||||
// HasForce returns true if the context has the force flag set
|
||||
func HasForce(ctx context.Context) bool {
|
||||
_, ok := ctx.Value(ctxKeyForce).(bool)
|
||||
return ok
|
||||
return hasBool(ctx, ctxKeyForce)
|
||||
}
|
||||
|
||||
// IsForce returns the force flag value of the default (false)
|
||||
func IsForce(ctx context.Context) bool {
|
||||
bv, ok := ctx.Value(ctxKeyForce).(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return bv
|
||||
return is(ctx, ctxKeyForce, false)
|
||||
}
|
||||
|
||||
// WithCommitMessage returns a context with a commit message set
|
||||
func WithCommitMessage(ctx context.Context, sv string) context.Context {
|
||||
return context.WithValue(ctx, ctxKeyCommitMessage, sv)
|
||||
}
|
||||
|
||||
// HasCommitMessage returns true if the commit message was set
|
||||
func HasCommitMessage(ctx context.Context) bool {
|
||||
return hasBool(ctx, ctxKeyCommitMessage)
|
||||
}
|
||||
|
||||
// GetCommitMessage returns the set commit message or an empty string
|
||||
func GetCommitMessage(ctx context.Context) string {
|
||||
sv, ok := ctx.Value(ctxKeyCommitMessage).(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return sv
|
||||
}
|
||||
|
||||
20
pkg/ctxutil/helper.go
Normal file
20
pkg/ctxutil/helper.go
Normal file
@ -0,0 +1,20 @@
|
||||
package ctxutil
|
||||
|
||||
import "context"
|
||||
|
||||
// hasBool is a helper function for checking if a bool has been set in
|
||||
// the provided context.
|
||||
func hasBool(ctx context.Context, key contextKey) bool {
|
||||
_, ok := ctx.Value(key).(bool)
|
||||
return ok
|
||||
}
|
||||
|
||||
// is is a helper function for returning the value of a bool from the context
|
||||
// or the provided default.
|
||||
func is(ctx context.Context, key contextKey, def bool) bool {
|
||||
bv, ok := ctx.Value(key).(bool)
|
||||
if !ok {
|
||||
return def
|
||||
}
|
||||
return bv
|
||||
}
|
||||
@ -99,12 +99,14 @@ func (r *Store) initialize(ctx context.Context) error {
|
||||
ctx = backend.WithRCSBackend(ctx, r.cfg.Root.Path.RCS)
|
||||
}
|
||||
if !backend.HasStorageBackend(ctx) {
|
||||
out.Debug(ctx, "Using default storage backend: %s", r.cfg.Root.Path.Storage)
|
||||
ctx = backend.WithStorageBackend(ctx, r.cfg.Root.Path.Storage)
|
||||
}
|
||||
bu, err := backend.ParseURL(r.url.String())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse backend URL '%s': %s", r.url.String(), err)
|
||||
}
|
||||
out.Debug(ctx, "initialize - %s", bu.String())
|
||||
s, err := sub.New(ctx, r.cfg, "", bu, r.cfg.Directory())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to initialize the root store at '%s': %s", r.url.String(), err)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user