From 4de510778b6e55a2238374834d00b03726da76d4 Mon Sep 17 00:00:00 2001 From: Laria Carolin Chabowski Date: Tue, 3 Oct 2017 14:59:45 +0200 Subject: Documentation --- config/config.go | 30 ++++++++++++++++++++++++++++++ objects/object.go | 3 +++ objects/object_blob.go | 1 + objects/object_file.go | 25 +++++++++++++++++++++---- objects/object_snapshot.go | 5 +++++ objects/object_tree.go | 38 ++++++++++++++++++++++++++++---------- objects/properties.go | 13 +++++++------ objects/properties_test.go | 22 +++++++++++----------- storage/cloud/swift.go | 2 ++ storage/filter/filter.go | 15 +++++++++++++++ storage/local/local.go | 7 +++++++ storage/memory/memory.go | 3 +++ 12 files changed, 133 insertions(+), 31 deletions(-) diff --git a/config/config.go b/config/config.go index 46d1b47..b643477 100644 --- a/config/config.go +++ b/config/config.go @@ -1,3 +1,33 @@ +// Package config provides methods for configuring the petrific binary. +// +// The configuration file is located in `$XDG_CONFIG_HOME/petrific/config.toml`, +// where `$XDG_CONFIG_HOME` is typically `~/.config`. +// +// The configuration file is a TOML file, its main purpose is to define the used +// storage backends, it also defines which GPG key to use for snapshot signing. +// +// Here is an example configuration file: +// +// # This config key defines the default storage backend, as defined below ([storage.local_compressed]) +// default_storage = "local_compressed" +// +// [signing] +// # Use this GPG key to sign snapshots +// key = "0123456789ABCDEF0123456789ABCDEF01234567" +// +// # The storage.* sections define storage backends. +// # Every section must contain the key `method`, the other keys depend on the selected method. +// # For more details see the documentation for ../storage +// +// [storage.local] +// method="local" +// path="~/.local/share/petrific" +// +// [storage.local_compressed] +// method="filter" +// base="local" +// encode=["zlib-flate", "-compress"] +// decode=["zlib-flate", "-uncompress"] package config import ( diff --git a/objects/object.go b/objects/object.go index 1bc9dbf..f9907f2 100644 --- a/objects/object.go +++ b/objects/object.go @@ -23,6 +23,9 @@ var AllObjectTypes = []ObjectType{ OTSnapshot, } +// RawObject describes a serialized object plus it's type header. This is the content that will be saved. +// It is serialized as the type (ObjectType), a space byte, the size of the payload in bytes (encoded as decimal ASCII number), a newline ('\n') character and the payload. +// The encoding of the payload depends on the type. type RawObject struct { Type ObjectType Payload []byte diff --git a/objects/object_blob.go b/objects/object_blob.go index c35cb2a..a6b3f7b 100644 --- a/objects/object_blob.go +++ b/objects/object_blob.go @@ -1,5 +1,6 @@ package objects +// Blob is an object containing raw bytes. It's serialized payload is just that content. type Blob []byte func (b Blob) Type() ObjectType { diff --git a/objects/object_file.go b/objects/object_file.go index 7551193..6143ec2 100644 --- a/objects/object_file.go +++ b/objects/object_file.go @@ -7,16 +7,20 @@ import ( "strconv" ) +// FileFragment describes a fragment of a file. It consists of the ID of a blob and the size of the blob. +// The size is not necessary for reconstruction (the blob object already has a size) +// but it can speed up random access to the whole file by skipping previous fragments. +// It is serialized as `Properties` (see there) with the keys `blob` (ID of blob object) and `size` (decimal size of the blob) type FileFragment struct { Blob ObjectId Size uint64 } -func (ff FileFragment) toProperties() properties { - return properties{"blob": ff.Blob.String(), "size": strconv.FormatUint(ff.Size, 10)} +func (ff FileFragment) toProperties() Properties { + return Properties{"blob": ff.Blob.String(), "size": strconv.FormatUint(ff.Size, 10)} } -func (ff *FileFragment) fromProperties(p properties) error { +func (ff *FileFragment) fromProperties(p Properties) error { blob, ok := p["blob"] if !ok { return errors.New("Field `blob` is missing") @@ -41,6 +45,19 @@ func (a FileFragment) Equals(b FileFragment) bool { return a.Blob.Equals(b.Blob) && a.Size == b.Size } +// File describes a file object which ties together multiple Blob objects to a file. +// It is an ordered list of `FileFragment`s. The referenced blobs concatencated in the given order are the content of the file. +// +// Example: +// +// blob=sha3-256:1111111111111111111111111111111111111111111111111111111111111111&size=123 +// blob=sha3-256:1111111111111111111111111111111111111111111111111111111111111111&size=123 +// blob=sha3-256:2222222222222222222222222222222222222222222222222222222222222222&size=10 +// +// The file described by this object is 123+123+10 = 256 bytes long and consists of the blobs 111... (two times) and 222... +// +// Since the blob IDs and sizes don't change unless the file itself changes and the format will always serialize the same, +// serializing the same file twice results in exactly the same object with exactly the same ID. It will therefor only be stored once. type File []FileFragment func (f File) Type() ObjectType { @@ -72,7 +89,7 @@ func (f *File) FromPayload(payload []byte) error { continue } - props := make(properties) + props := make(Properties) if err := props.UnmarshalText(line); err != nil { return nil } diff --git a/objects/object_snapshot.go b/objects/object_snapshot.go index e86484d..49ca463 100644 --- a/objects/object_snapshot.go +++ b/objects/object_snapshot.go @@ -17,6 +17,11 @@ const ( snapshot_end_line = snapshot_end_marker + "\n" ) +// Snapshot objects describe the state of a directory structure at a given time. It references a tree object by it's ID, +// records the snapshot time and associates it ton an archive (which is just a short freeform text grouping multiple snapshots together). +// A snapshot can optionally contain a comment and can be signed with a gpg key. +// If the snapshot is signed and you trust the signature, you can automatically trust the whole associated file tree, +// since all references are really cryptographic hashes, guaranteeing data integrity. type Snapshot struct { Tree ObjectId Date time.Time diff --git a/objects/object_tree.go b/objects/object_tree.go index 609aa7e..2708aaa 100644 --- a/objects/object_tree.go +++ b/objects/object_tree.go @@ -29,7 +29,7 @@ type TreeEntry interface { User() string Group() string equalContent(TreeEntry) bool - toProperties() properties + toProperties() Properties } func compareTreeEntries(a, b TreeEntry) bool { @@ -62,8 +62,8 @@ func (teb TreeEntryBase) Group() string { return teb.group } -func (teb TreeEntryBase) toProperties() properties { - props := properties{"acl": teb.acl.String()} +func (teb TreeEntryBase) toProperties() Properties { + props := Properties{"acl": teb.acl.String()} if teb.user != "" { props["user"] = teb.user } @@ -93,7 +93,7 @@ func (tef TreeEntryFile) Type() TreeEntryType { return TETFile } -func (tef TreeEntryFile) toProperties() properties { +func (tef TreeEntryFile) toProperties() Properties { props := tef.TreeEntryBase.toProperties() props["ref"] = tef.Ref.String() return props @@ -120,7 +120,7 @@ func (ted TreeEntryDir) Type() TreeEntryType { return TETDir } -func (ted TreeEntryDir) toProperties() properties { +func (ted TreeEntryDir) toProperties() Properties { props := ted.TreeEntryBase.toProperties() props["ref"] = ted.Ref.String() return props @@ -147,7 +147,7 @@ func (tes TreeEntrySymlink) Type() TreeEntryType { return TETSymlink } -func (tes TreeEntrySymlink) toProperties() properties { +func (tes TreeEntrySymlink) toProperties() Properties { props := tes.TreeEntryBase.toProperties() props["target"] = tes.Target return props @@ -158,6 +158,24 @@ func (a TreeEntrySymlink) equalContent(_b TreeEntry) bool { return ok && a.TreeEntryBase.equalContent(b.TreeEntryBase) && a.Target == b.Target } +// Tree objects represent a filesystem tree / directory. +// It contains references to files (See `File`), symlinks and other trees plus their metadata. +// It is serialized as a sorted list of `Property` serializations (seperated by newline '\n'). +// All entries have the property keys "name" and "type" (and optionally "user", "group" and "acl" representing a posix ACL. +// Currently only the execution bit is actually considerer. Choosing posix ACLs gives us the +// possibility to extend the privilege system later). +// Further keys depend on the value of type: +// +// type=file, type=dir => +// +// ref: Holding the ID referencing a file / subtree +// +// type=symlink +// +// target: Holding the (relative) symlink path +// +// The Property format allows easy extension in the future while remaining compatible +// to older versions (they then simply ignore the additional properties). type Tree map[string]TreeEntry func (t Tree) Type() ObjectType { @@ -188,7 +206,7 @@ func (t Tree) Payload() (out []byte) { return } -func getObjectIdFromProps(p properties, key string) (ObjectId, error) { +func getObjectIdFromProps(p Properties, key string) (ObjectId, error) { raw, ok := p[key] if !ok { return ObjectId{}, fmt.Errorf("Missing key: %s", key) @@ -198,7 +216,7 @@ func getObjectIdFromProps(p properties, key string) (ObjectId, error) { return oid, err } -func defaultFileTreeEntryBase(_acl *acl.ACL, props properties) (base TreeEntryBase) { +func defaultFileTreeEntryBase(_acl *acl.ACL, props Properties) (base TreeEntryBase) { base.user = props["user"] base.group = props["group"] if _acl == nil { @@ -209,7 +227,7 @@ func defaultFileTreeEntryBase(_acl *acl.ACL, props properties) (base TreeEntryBa return } -func defaultDirTreeEntryBase(_acl *acl.ACL, props properties) (base TreeEntryBase) { +func defaultDirTreeEntryBase(_acl *acl.ACL, props Properties) (base TreeEntryBase) { base.user = props["user"] base.group = props["group"] if _acl == nil { @@ -229,7 +247,7 @@ func (t Tree) FromPayload(payload []byte) error { continue } - props := make(properties) + props := make(Properties) if err := props.UnmarshalText(line); err != nil { return err } diff --git a/objects/properties.go b/objects/properties.go index 8c92932..2e8c8ac 100644 --- a/objects/properties.go +++ b/objects/properties.go @@ -7,9 +7,10 @@ import ( "sort" ) -// properties are mappings from strings to strings that are encoded as a restricted version of URL query strings -// (only the characters [a-zA-Z0-9.:_-] are allowed, values are ordered by their key) -type properties map[string]string +// Properties are mappings from strings to strings that are encoded as a restricted version of URL query strings +// (only the characters [a-zA-Z0-9.:_-] are allowed, values are ordered by their key. +// This ordering and this character restrictions guarantee reproducible serialization) +type Properties map[string]string // escapePropertyString escapes all bytes not in [a-zA-Z0-9.,:_-] as %XX, where XX represents the hexadecimal value of the byte. // Compatible with URL query strings @@ -29,7 +30,7 @@ func escapePropertyString(s string) []byte { return out } -func (p properties) MarshalText() ([]byte, error) { // Guaranteed to not fail, error is only here to satisfy encoding.TextMarshaler +func (p Properties) MarshalText() ([]byte, error) { // Guaranteed to not fail, error is only here to satisfy encoding.TextMarshaler keys := make([]string, len(p)) i := 0 for k := range p { @@ -57,7 +58,7 @@ func (p properties) MarshalText() ([]byte, error) { // Guaranteed to not fail, e return out, nil } -func (p properties) UnmarshalText(text []byte) error { +func (p Properties) UnmarshalText(text []byte) error { vals, err := url.ParseQuery(string(text)) if err != nil { return err @@ -74,7 +75,7 @@ func (p properties) UnmarshalText(text []byte) error { return nil } -func (a properties) Equals(b properties) bool { +func (a Properties) Equals(b Properties) bool { for k, va := range a { vb, ok := b[k] if !ok || vb != va { diff --git a/objects/properties_test.go b/objects/properties_test.go index 96df5c8..7243ce3 100644 --- a/objects/properties_test.go +++ b/objects/properties_test.go @@ -22,13 +22,13 @@ func TestPropEscape(t *testing.T) { func TestPropertyMarshalling(t *testing.T) { tests := []struct { name string - in properties + in Properties want string }{ - {"empty", properties{}, ""}, - {"single", properties{"foo": "bar"}, "foo=bar"}, - {"simple", properties{"foo": "bar", "bar": "baz"}, "bar=baz&foo=bar"}, - {"escapes", properties{"foo&bar": "%=baz", "?": "!"}, "%3f=%21&foo%26bar=%25%3dbaz"}, + {"empty", Properties{}, ""}, + {"single", Properties{"foo": "bar"}, "foo=bar"}, + {"simple", Properties{"foo": "bar", "bar": "baz"}, "bar=baz&foo=bar"}, + {"escapes", Properties{"foo&bar": "%=baz", "?": "!"}, "%3f=%21&foo%26bar=%25%3dbaz"}, } for _, subtest := range tests { @@ -48,16 +48,16 @@ func TestPropertyUnmarshalling(t *testing.T) { tests := []struct { name string in string - want properties + want Properties }{ - {"empty", "", properties{}}, - {"single", "foo=bar", properties{"foo": "bar"}}, - {"simple", "bar=baz&foo=bar", properties{"foo": "bar", "bar": "baz"}}, - {"escapes", "%3f=%21&foo%26bar=%25%3dbaz", properties{"foo&bar": "%=baz", "?": "!"}}, + {"empty", "", Properties{}}, + {"single", "foo=bar", Properties{"foo": "bar"}}, + {"simple", "bar=baz&foo=bar", Properties{"foo": "bar", "bar": "baz"}}, + {"escapes", "%3f=%21&foo%26bar=%25%3dbaz", Properties{"foo&bar": "%=baz", "?": "!"}}, } for _, subtest := range tests { - have := make(properties) + have := make(Properties) err := have.UnmarshalText([]byte(subtest.in)) if err != nil { diff --git a/storage/cloud/swift.go b/storage/cloud/swift.go index a721f67..f08b1cc 100644 --- a/storage/cloud/swift.go +++ b/storage/cloud/swift.go @@ -35,6 +35,8 @@ type SwiftConfig struct { Timeout string `toml:"timeout,omitempty"` } +// SwiftStorageCreator creates an object storage that saves the objects to an openstack swift storage. +// Use the method "openstack-swift" in your config and refer to the `SwiftConfig` structure for additional config keys. func SwiftStorageCreator() storage.CreateStorageFromConfig { return cloudStorageCreator(func(conf config.Config, name string) (CloudStorage, error) { var storage_conf SwiftConfig diff --git a/storage/filter/filter.go b/storage/filter/filter.go index 52ecbfd..1628c1e 100644 --- a/storage/filter/filter.go +++ b/storage/filter/filter.go @@ -34,6 +34,21 @@ func (pf PipeFilter) Transform(b []byte) ([]byte, error) { return buf.Bytes(), nil } +// FilterSorage is a storage implementation wrapping around another storage, sending each raw object through an extrenal +// binary for custom de/encoding (think encryption, compression, ...). +// +// It is used in a configuration by using the method "filter". It needs the config key "base" referencing the name of +// another configured storage. Also needed are the string lists "decode" and "encode", describing which binary to call +// with which parameters. +// +// For example, here is a configuration for a filter storage wrapping a storage "foo", +// encrypting the content with gpg for the key "foobar" +// +// [storage.foo_encrypted] +// method="filter" +// base="foo" +// encode=["gpg", "--encrypt", "--recipient", "foobar"] +// decode=["gpg", "--decrypt"] type FilterStorage struct { Base storage.Storage Decode, Encode Filter diff --git a/storage/local/local.go b/storage/local/local.go index 7dc59cb..ae39eb5 100644 --- a/storage/local/local.go +++ b/storage/local/local.go @@ -20,6 +20,13 @@ func objectDir(id objects.ObjectId) string { return joinPath(string(id.Algo), hex.EncodeToString(id.Sum[0:1])) } +// LocalStorage is a storage implementation that saves your objects on your local filesystem. +// +// Example config: +// +// [storage.local_test] +// method="local" +// path="~/.local/share/petrific" # Save the objects here type LocalStorage struct { Path string index storage.Index diff --git a/storage/memory/memory.go b/storage/memory/memory.go index e73c51a..dc72f70 100644 --- a/storage/memory/memory.go +++ b/storage/memory/memory.go @@ -6,6 +6,9 @@ import ( "code.laria.me/petrific/storage" ) +// Memory storage is an in-memory storage. It is rather useless when using petrific, it is mostly used for internal testing. +// But if you want to use the memory storage anyway, you can do that by putting a storage section with the method +// "memory" in your config file type MemoryStorage struct { objects map[string][]byte bytype map[objects.ObjectType][]objects.ObjectId -- cgit v1.2.3-54-g00ecf