aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--backup/backup.go115
-rw-r--r--backup/backup_test.go133
-rw-r--r--cache/cache.go20
-rw-r--r--fs/fs.go41
-rw-r--r--fs/memory.go130
-rw-r--r--fs/os.go133
-rw-r--r--objects/object.go4
-rw-r--r--objects/object_tree.go41
-rw-r--r--objects/object_tree_test.go4
-rw-r--r--storage/memory_storage.go41
-rw-r--r--storage/storage.go37
11 files changed, 692 insertions, 7 deletions
diff --git a/backup/backup.go b/backup/backup.go
new file mode 100644
index 0000000..9c012e0
--- /dev/null
+++ b/backup/backup.go
@@ -0,0 +1,115 @@
+package backup
+
+import (
+ "code.laria.me/petrific/cache"
+ "code.laria.me/petrific/fs"
+ "code.laria.me/petrific/objects"
+ "code.laria.me/petrific/storage"
+ "io"
+ "time"
+)
+
+func WriteDir(
+ store storage.Storage,
+ abspath string,
+ d fs.Dir,
+ pcache cache.Cache,
+) (objects.ObjectId, error) {
+ children, err := d.Readdir()
+ if err != nil {
+ return objects.ObjectId{}, err
+ }
+
+ infos := make(objects.Tree)
+ for _, c := range children {
+ var info objects.TreeEntry
+
+ switch c.Type() {
+ case fs.FFile:
+ mtime, file_id, ok := pcache.PathUpdated(abspath)
+ if !ok || mtime.Before(c.ModTime()) {
+ // According to cache the file was changed
+
+ rwc, err := c.(fs.RegularFile).Open()
+ if err != nil {
+ return objects.ObjectId{}, err
+ }
+
+ file_id, err = WriteFile(store, rwc)
+ rwc.Close()
+ if err != nil {
+ return objects.ObjectId{}, err
+ }
+
+ pcache.SetPathUpdated(abspath+"/"+c.Name(), c.ModTime(), file_id)
+ }
+
+ info = objects.NewTreeEntryFile(file_id, c.Executable())
+ case fs.FDir:
+ subtree_id, err := WriteDir(store, abspath+"/"+c.Name(), c.(fs.Dir), pcache)
+ if err != nil {
+ return objects.ObjectId{}, err
+ }
+
+ info = objects.NewTreeEntryDir(subtree_id, c.Executable())
+ case fs.FSymlink:
+ target, err := c.(fs.Symlink).Readlink()
+ if err != nil {
+ return objects.ObjectId{}, err
+ }
+
+ info = objects.NewTreeEntrySymlink(target, c.Executable())
+ }
+
+ if info != nil {
+ infos[c.Name()] = info
+ }
+ }
+
+ return storage.SetObject(store, objects.ToRawObject(infos))
+}
+
+const BlobChunkSize = 16 * 1024 * 1024 // 16MB
+
+func WriteFile(store storage.Storage, r io.Reader) (objects.ObjectId, error) {
+ // Right now we will create the file as fixed chunks of size BlobChunkSize
+ // It Would be more efficient to use a dynamic/content aware chunking method but this can be added later.
+ // The way files are serialized allows for any chunk size and addition of more properties in the future while staying compatible
+
+ fragments := make(objects.File, 0)
+
+ read_buf := make([]byte, BlobChunkSize)
+ for {
+ n, err := io.ReadFull(r, read_buf)
+ if err == nil || err == io.ErrUnexpectedEOF {
+ content := objects.Blob(read_buf[:n])
+ blob_id, err := storage.SetObject(store, objects.ToRawObject(&content))
+ if err != nil {
+ return objects.ObjectId{}, err
+ }
+
+ fragments = append(fragments, objects.FileFragment{Blob: blob_id, Size: uint64(n)})
+ } else if err == io.EOF {
+ break
+ } else {
+ return objects.ObjectId{}, err
+ }
+ }
+
+ return storage.SetObject(store, objects.ToRawObject(&fragments))
+}
+
+func CreateSnapshot(
+ store storage.Storage,
+ tree_id objects.ObjectId,
+ date time.Time,
+ container string,
+ comment string,
+) (objects.ObjectId, error) {
+ return storage.SetObject(store, objects.ToRawObject(&objects.Snapshot{
+ Tree: tree_id,
+ Date: date,
+ Container: container,
+ Comment: comment,
+ }))
+}
diff --git a/backup/backup_test.go b/backup/backup_test.go
new file mode 100644
index 0000000..2fee26e
--- /dev/null
+++ b/backup/backup_test.go
@@ -0,0 +1,133 @@
+package backup
+
+import (
+ "bytes"
+ "code.laria.me/petrific/cache"
+ "code.laria.me/petrific/fs"
+ "code.laria.me/petrific/objects"
+ "code.laria.me/petrific/storage"
+ "testing"
+)
+
+func wantObject(
+ t *testing.T,
+ s storage.Storage,
+ id_str string,
+ want []byte,
+) {
+ id, err := objects.ParseObjectId(id_str)
+ if err != nil {
+ t.Errorf("Could not parse id: %s", err)
+ return
+ }
+
+ have, err := s.Get(id)
+ if err != nil {
+ t.Errorf("Could not get %s: %s", id, err)
+ return
+ }
+
+ if !bytes.Equal(want, have) {
+ t.Errorf("Wrong result for %s: (size=%d) %#s", id, len(have), have)
+ }
+}
+
+func TestWriteLargeFile(t *testing.T) {
+ s := storage.NewMemoryStorage()
+
+ id, err := WriteFile(s, bytes.NewReader(make([]byte, 2*BlobChunkSize+100)))
+ if err != nil {
+ t.Fatalf("Unexpected error when writing file: %s", err)
+ }
+
+ if id.String() != "sha3-256:ab7907ee6b45b343422a0354de500bcf99f5ff69fe8125be84e43d421803c34e" {
+ t.Errorf("Unexpected file id: %s", id)
+ }
+
+ want_large_blob := append([]byte("blob 16777216\n"), make([]byte, BlobChunkSize)...)
+ want_small_blob := append([]byte("blob 100\n"), make([]byte, 100)...)
+ want_file := []byte("file 274\n" +
+ "blob=sha3-256:7287cbb09bdd8a0d96a6f6297413cd9d09a2763814636245a5a44120e6351be3&size=16777216\n" +
+ "blob=sha3-256:7287cbb09bdd8a0d96a6f6297413cd9d09a2763814636245a5a44120e6351be3&size=16777216\n" +
+ "blob=sha3-256:ddf124464f7b80e95f4a9c704f79e7037ff5d731648ba6b40c769893b428128c&size=100\n")
+
+ wantObject(t, s, "sha3-256:ab7907ee6b45b343422a0354de500bcf99f5ff69fe8125be84e43d421803c34e", want_file)
+ wantObject(t, s, "sha3-256:7287cbb09bdd8a0d96a6f6297413cd9d09a2763814636245a5a44120e6351be3", want_large_blob)
+ wantObject(t, s, "sha3-256:ddf124464f7b80e95f4a9c704f79e7037ff5d731648ba6b40c769893b428128c", want_small_blob)
+}
+
+func mkfile(t *testing.T, d fs.Dir, name string, exec bool, content []byte) {
+ f, err := d.CreateChildFile(name, exec)
+ if err != nil {
+ t.Fatalf("Could not create file %s: %s", name, err)
+ }
+
+ rwc, err := f.Open()
+ if err != nil {
+ t.Fatalf("Could not create file %s: %s", name, err)
+ }
+ defer rwc.Close()
+
+ if _, err := rwc.Write(content); err != nil {
+ t.Fatalf("Could not create file %s: %s", name, err)
+ }
+}
+
+func TestWriteDir(t *testing.T) {
+ s := storage.NewMemoryStorage()
+
+ root := fs.NewMemoryFSRoot("root")
+
+ mkfile(t, root, "foo", false, []byte("foo"))
+ mkfile(t, root, "bar", true, []byte(""))
+ if _, err := root.CreateChildSymlink("baz", "foo"); err != nil {
+ t.Fatalf("Failed creating symlink baz: %s", err)
+ }
+ d, err := root.CreateChildDir("sub")
+ if err != nil {
+ t.Fatalf("Failed creating dir: %s", err)
+ }
+ mkfile(t, d, "a", false, []byte(""))
+ if _, err = d.CreateChildDir("b"); err != nil {
+ t.Fatalf("Failed creating dir: %s", err)
+ }
+
+ id, err := WriteDir(s, "", root, cache.NopCache{})
+ if err != nil {
+ t.Fatalf("Could not WriteDir: %s", err)
+ }
+
+ if id.String() != "sha3-256:09e881f57befa1eacec744e3857a36f0d9d5dd1fa72ba96564b467a3d7d0c0d5" {
+ t.Errorf("Unexpected dir id: %s", id)
+ }
+
+ //4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b
+ obj_emptyfile := []byte("file 0\n")
+
+ //ba632076629ff33238850c870fcb51e4b7b67b3d9dcb66314adbcf1770a5fea7
+ obj_fooblob := []byte("blob 3\nfoo")
+ //fa50ca1fc739852528ecc149b424a8ccbdf84b73c8718cde4525f2a410d79244
+ obj_foofile := []byte("file 86\nblob=sha3-256:ba632076629ff33238850c870fcb51e4b7b67b3d9dcb66314adbcf1770a5fea7&size=3\n")
+
+ //1dc6fae780ae4a1e823a5b8e26266356a2e1d22e5904b0652dcff6e3c0e72067
+ obj_emptytree := []byte("tree 0\n")
+
+ //f1716a1b0cad23b6faab9712243402b8f8e7919c377fc5d5d87bd465cef056d7
+ obj_subdir := []byte("tree 239\n" +
+ "acl=u::rw-,g::r--,o::r--&name=a&ref=sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b&type=file\n" +
+ "acl=u::rwx,g::r-x,o::r-x&name=b&ref=sha3-256:1dc6fae780ae4a1e823a5b8e26266356a2e1d22e5904b0652dcff6e3c0e72067&type=dir\n")
+
+ //09e881f57befa1eacec744e3857a36f0d9d5dd1fa72ba96564b467a3d7d0c0d5
+ obj_dir := []byte("tree 423\n" +
+ "acl=u::rw-,g::r--,o::r--&name=baz&target=foo&type=symlink\n" +
+ "acl=u::rw-,g::r--,o::r--&name=foo&ref=sha3-256:fa50ca1fc739852528ecc149b424a8ccbdf84b73c8718cde4525f2a410d79244&type=file\n" +
+ "acl=u::rwx,g::r-x,o::r-x&name=bar&ref=sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b&type=file\n" +
+ "acl=u::rwx,g::r-x,o::r-x&name=sub&ref=sha3-256:f1716a1b0cad23b6faab9712243402b8f8e7919c377fc5d5d87bd465cef056d7&type=dir\n")
+
+ wantObject(t, s, "sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b", obj_emptyfile)
+ wantObject(t, s, "sha3-256:ba632076629ff33238850c870fcb51e4b7b67b3d9dcb66314adbcf1770a5fea7", obj_fooblob)
+ wantObject(t, s, "sha3-256:fa50ca1fc739852528ecc149b424a8ccbdf84b73c8718cde4525f2a410d79244", obj_foofile)
+ wantObject(t, s, "sha3-256:1dc6fae780ae4a1e823a5b8e26266356a2e1d22e5904b0652dcff6e3c0e72067", obj_emptytree)
+ wantObject(t, s, "sha3-256:f1716a1b0cad23b6faab9712243402b8f8e7919c377fc5d5d87bd465cef056d7", obj_subdir) //!
+ wantObject(t, s, "sha3-256:09e881f57befa1eacec744e3857a36f0d9d5dd1fa72ba96564b467a3d7d0c0d5", obj_dir) //!
+}
diff --git a/cache/cache.go b/cache/cache.go
new file mode 100644
index 0000000..32b8325
--- /dev/null
+++ b/cache/cache.go
@@ -0,0 +1,20 @@
+package cache
+
+import (
+ "code.laria.me/petrific/objects"
+ "time"
+)
+
+type Cache interface {
+ PathUpdated(path string) (mtime time.Time, id objects.ObjectId, ok bool)
+ SetPathUpdated(path string, mtime time.Time, id objects.ObjectId)
+}
+
+type NopCache struct{}
+
+func (NopCache) PathUpdated(_ string) (_ time.Time, _ objects.ObjectId, ok bool) {
+ ok = false
+ return
+}
+
+func (NopCache) SetPathUpdated(_ string, _ time.Time, _ objects.ObjectId) {}
diff --git a/fs/fs.go b/fs/fs.go
new file mode 100644
index 0000000..cbfeb44
--- /dev/null
+++ b/fs/fs.go
@@ -0,0 +1,41 @@
+package fs
+
+import (
+ "io"
+ "time"
+)
+
+type FileType string
+
+const (
+ FFile FileType = "file"
+ FDir FileType = "dir"
+ FSymlink FileType = "symlink"
+)
+
+type File interface {
+ Type() FileType // Depending on type, the File must also implement RegularFile (FFile), Dir (FDir) or Symlink (FSymlink)
+ Name() string
+ Executable() bool // For now we will only record the executable bit instead of all permission bits
+ ModTime() time.Time
+ Delete() error
+}
+
+type RegularFile interface {
+ File
+ Open() (io.ReadWriteCloser, error)
+}
+
+type Dir interface {
+ File
+ Readdir() ([]File, error)
+
+ CreateChildFile(name string, exec bool) (RegularFile, error)
+ CreateChildDir(name string) (Dir, error)
+ CreateChildSymlink(name string, target string) (Symlink, error)
+}
+
+type Symlink interface {
+ File
+ Readlink() (string, error)
+}
diff --git a/fs/memory.go b/fs/memory.go
new file mode 100644
index 0000000..5f5d327
--- /dev/null
+++ b/fs/memory.go
@@ -0,0 +1,130 @@
+package fs
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "time"
+)
+
+type memfsBase struct {
+ parent *memfsDir
+ name string
+ exec bool
+ mtime time.Time
+}
+
+func (b memfsBase) Name() string { return b.name }
+func (b memfsBase) Executable() bool { return b.exec }
+func (b memfsBase) ModTime() time.Time { return b.mtime }
+
+func (b memfsBase) Delete() error {
+ if b.parent == nil {
+ return errors.New("Root entry can not be deleted")
+ }
+ b.parent.deleteChild(b.name)
+ return nil
+}
+
+type memfsFile struct {
+ memfsBase
+ content *bytes.Buffer
+}
+
+func (memfsFile) Type() FileType { return FFile }
+
+func (f memfsFile) Open() (io.ReadWriteCloser, error) {
+ return f, nil
+}
+
+func (f memfsFile) Read(p []byte) (int, error) {
+ return f.content.Read(p)
+}
+
+func (f memfsFile) Write(p []byte) (int, error) {
+ return f.content.Write(p)
+}
+
+func (memfsBase) Close() error {
+ return nil
+}
+
+type memfsDir struct {
+ memfsBase
+ children map[string]File
+}
+
+func (memfsDir) Type() FileType { return FDir }
+
+func (d memfsDir) Readdir() ([]File, error) {
+ l := make([]File, 0, len(d.children))
+
+ for _, f := range d.children {
+ l = append(l, f)
+ }
+
+ return l, nil
+}
+
+func (d memfsDir) createChildBase(name string, exec bool) memfsBase {
+ return memfsBase{
+ parent: &d,
+ name: name,
+ exec: exec,
+ mtime: time.Now(),
+ }
+}
+
+func (d memfsDir) CreateChildFile(name string, exec bool) (RegularFile, error) {
+ child := memfsFile{
+ memfsBase: d.createChildBase(name, exec),
+ content: new(bytes.Buffer),
+ }
+ d.children[name] = child
+ return child, nil
+}
+
+func (d memfsDir) CreateChildDir(name string) (Dir, error) {
+ child := memfsDir{
+ memfsBase: d.createChildBase(name, true),
+ children: make(map[string]File),
+ }
+ d.children[name] = child
+ return child, nil
+}
+
+func (d memfsDir) CreateChildSymlink(name string, target string) (Symlink, error) {
+ child := memfsSymlink{
+ memfsBase: d.createChildBase(name, false),
+ target: target,
+ }
+ d.children[name] = child
+ return child, nil
+}
+
+func (d *memfsDir) deleteChild(name string) {
+ delete(d.children, name)
+}
+
+func NewMemoryFSRoot(name string) Dir {
+ return memfsDir{
+ memfsBase: memfsBase{
+ parent: nil,
+ name: name,
+ exec: true,
+ mtime: time.Now(),
+ },
+ children: make(map[string]File),
+ }
+}
+
+type memfsSymlink struct {
+ memfsBase
+ target string
+}
+
+func (memfsSymlink) Type() FileType { return FSymlink }
+
+func (s memfsSymlink) Readlink() (string, error) {
+ return s.target, nil
+}
diff --git a/fs/os.go b/fs/os.go
new file mode 100644
index 0000000..f251c66
--- /dev/null
+++ b/fs/os.go
@@ -0,0 +1,133 @@
+package fs
+
+import (
+ "io"
+ "os"
+ "time"
+)
+
+func openOSFile(path string) (osFile, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return osFile{}, err
+ }
+
+ return osFile{
+ fullpath: path,
+ fi: fi,
+ }, nil
+}
+
+type osFile struct {
+ fullpath string
+ fi os.FileInfo
+}
+
+func (f osFile) Type() FileType {
+ m := f.fi.Mode()
+ if m.IsDir() {
+ return FDir
+ }
+ if m.IsRegular() {
+ return FFile
+ }
+ if m&os.ModeSymlink != 0 {
+ return FSymlink
+ }
+ return "unknown"
+}
+
+func (f osFile) Name() string {
+ return f.fi.Name()
+}
+
+func (f osFile) Executable() bool {
+ return f.fi.Mode()&0100 != 0 // x bit set for user?
+}
+
+func (f osFile) ModTime() time.Time {
+ return f.fi.ModTime()
+}
+
+func (f osFile) Delete() error {
+ return os.RemoveAll(f.fullpath)
+}
+
+func (f osFile) Open() (io.ReadWriteCloser, error) {
+ fh, err := os.Open(f.fullpath)
+ if err != nil {
+ return nil, err
+ }
+ return fh, nil
+}
+
+func (f osFile) Readdir() (list []File, err error) {
+ fh, err := os.Open(f.fullpath)
+ if err != nil {
+ return
+ }
+ defer fh.Close()
+
+ infos, err := fh.Readdir(-1)
+ if err != nil {
+ return
+ }
+
+ for _, fi := range infos {
+ if fi.Name() == "." || fi.Name() == ".." {
+ continue
+ }
+
+ list = append(list, osFile{
+ fullpath: f.fullpath + string(os.PathSeparator) + fi.Name(),
+ fi: fi,
+ })
+ }
+
+ return
+}
+
+func perms(executable bool) os.FileMode {
+ if executable {
+ return 0755
+ } else {
+ return 0644
+ }
+}
+
+func (f osFile) CreateChildFile(name string, exec bool) (RegularFile, error) {
+ p := f.fullpath + string(os.PathSeparator) + name
+
+ fh, err := os.OpenFile(p, os.O_RDWR|os.O_CREATE, perms(exec))
+ if err != nil {
+ return nil, err
+ }
+ fh.Close()
+
+ return openOSFile(p)
+}
+
+func (f osFile) CreateChildDir(name string) (Dir, error) {
+ p := f.fullpath + string(os.PathSeparator) + name
+
+ if err := os.Mkdir(p, perms(true)); err != nil {
+ return nil, err
+ }
+
+ return openOSFile(p)
+}
+
+func (f osFile) CreateChildSymlink(name string, target string) (Symlink, error) {
+ p := f.fullpath + string(os.PathSeparator) + name
+
+ err := os.Symlink(target, p)
+ if err != nil {
+ return nil, err
+ }
+
+ return openOSFile(p)
+}
+
+func (f osFile) Readlink() (string, error) {
+ return os.Readlink(f.fullpath)
+}
diff --git a/objects/object.go b/objects/object.go
index 4e096ad..b192912 100644
--- a/objects/object.go
+++ b/objects/object.go
@@ -138,3 +138,7 @@ func (ro RawObject) Object() (o Object, err error) {
}
return
}
+
+func ToRawObject(o Object) RawObject {
+ return RawObject{Type: o.Type(), Payload: o.Payload()}
+}
diff --git a/objects/object_tree.go b/objects/object_tree.go
index 2a8f982..609aa7e 100644
--- a/objects/object_tree.go
+++ b/objects/object_tree.go
@@ -41,6 +41,15 @@ type TreeEntryBase struct {
user, group string
}
+func baseFromExec(exec bool) (base TreeEntryBase) {
+ if exec {
+ base.acl = acl.ACLFromUnixPerms(0755)
+ } else {
+ base.acl = acl.ACLFromUnixPerms(0644)
+ }
+ return
+}
+
func (teb TreeEntryBase) ACL() acl.ACL {
return teb.acl
}
@@ -54,11 +63,14 @@ func (teb TreeEntryBase) Group() string {
}
func (teb TreeEntryBase) toProperties() properties {
- return properties{
- "acl": teb.acl.String(),
- "user": teb.user,
- "group": teb.group,
+ props := properties{"acl": teb.acl.String()}
+ if teb.user != "" {
+ props["user"] = teb.user
+ }
+ if teb.group != "" {
+ props["group"] = teb.group
}
+ return props
}
func (a TreeEntryBase) equalContent(b TreeEntryBase) bool {
@@ -70,6 +82,13 @@ type TreeEntryFile struct {
Ref ObjectId
}
+func NewTreeEntryFile(ref ObjectId, exec bool) TreeEntryFile {
+ return TreeEntryFile{
+ TreeEntryBase: baseFromExec(exec),
+ Ref: ref,
+ }
+}
+
func (tef TreeEntryFile) Type() TreeEntryType {
return TETFile
}
@@ -90,6 +109,13 @@ type TreeEntryDir struct {
Ref ObjectId
}
+func NewTreeEntryDir(ref ObjectId, exec bool) TreeEntryDir {
+ return TreeEntryDir{
+ TreeEntryBase: baseFromExec(exec),
+ Ref: ref,
+ }
+}
+
func (ted TreeEntryDir) Type() TreeEntryType {
return TETDir
}
@@ -110,6 +136,13 @@ type TreeEntrySymlink struct {
Target string
}
+func NewTreeEntrySymlink(target string, exec bool) TreeEntrySymlink {
+ return TreeEntrySymlink{
+ TreeEntryBase: baseFromExec(exec),
+ Target: target,
+ }
+}
+
func (tes TreeEntrySymlink) Type() TreeEntryType {
return TETSymlink
}
diff --git a/objects/object_tree_test.go b/objects/object_tree_test.go
index 3ec9e25..1949375 100644
--- a/objects/object_tree_test.go
+++ b/objects/object_tree_test.go
@@ -49,8 +49,6 @@ var (
"": acl.PermR | acl.PermW,
},
},
- user: "user4",
- group: "group4",
},
Ref: genId(0x33),
},
@@ -59,7 +57,7 @@ var (
testTreeSerialization = []byte("" +
"acl=u::rw-,g::r--,o::r--&group=group1&name=foo&ref=sha3-256:1111111111111111111111111111111111111111111111111111111111111111&type=file&user=user1\n" +
"acl=u::rw-,g::r--,o::r--&group=group3&name=baz&target=%2ff%c3%b6%c3%b6%26b%c3%a4r%2f%f0%9f%92%be&type=symlink&user=user3\n" +
- "acl=u::rw-,u:user1:rw-,g::r--,o::r--,m::rw-&group=group4&name=%f0%9f%98%83&ref=sha3-256:3333333333333333333333333333333333333333333333333333333333333333&type=file&user=user4\n" +
+ "acl=u::rw-,u:user1:rw-,g::r--,o::r--,m::rw-&name=%f0%9f%98%83&ref=sha3-256:3333333333333333333333333333333333333333333333333333333333333333&type=file\n" +
"acl=u::rwx,g::r-x,o::r-x&group=group2&name=bar&ref=sha3-256:2222222222222222222222222222222222222222222222222222222222222222&type=dir&user=user2\n")
)
diff --git a/storage/memory_storage.go b/storage/memory_storage.go
new file mode 100644
index 0000000..0c90981
--- /dev/null
+++ b/storage/memory_storage.go
@@ -0,0 +1,41 @@
+package storage
+
+import (
+ "code.laria.me/petrific/objects"
+)
+
+type MemoryStorage struct {
+ objects map[string][]byte
+ bytype map[objects.ObjectType][]objects.ObjectId
+}
+
+func NewMemoryStorage() Storage {
+ return MemoryStorage{
+ objects: make(map[string][]byte),
+ bytype: make(map[objects.ObjectType][]objects.ObjectId),
+ }
+}
+
+func (ms MemoryStorage) Get(id objects.ObjectId) ([]byte, error) {
+ b, ok := ms.objects[id.String()]
+ if !ok {
+ return nil, ObjectNotFound
+ }
+ return b, nil
+}
+
+func (ms MemoryStorage) Has(id objects.ObjectId) (bool, error) {
+ _, ok := ms.objects[id.String()]
+ return ok, nil
+}
+
+func (ms MemoryStorage) Set(id objects.ObjectId, typ objects.ObjectType, raw []byte) error {
+ ms.objects[id.String()] = raw
+ ms.bytype[typ] = append(ms.bytype[typ], id)
+
+ return nil
+}
+
+func (ms MemoryStorage) List(typ objects.ObjectType) ([]objects.ObjectId, error) {
+ return ms.bytype[typ], nil
+}
diff --git a/storage/storage.go b/storage/storage.go
new file mode 100644
index 0000000..424661c
--- /dev/null
+++ b/storage/storage.go
@@ -0,0 +1,37 @@
+package storage
+
+import (
+ "bytes"
+ "code.laria.me/petrific/objects"
+ "errors"
+)
+
+var (
+ ObjectNotFound = errors.New("Object not found")
+)
+
+type Storage interface {
+ Get(id objects.ObjectId) ([]byte, error)
+ Has(id objects.ObjectId) (bool, error)
+ Set(id objects.ObjectId, typ objects.ObjectType, raw []byte) error
+ List(typ objects.ObjectType) ([]objects.ObjectId, error)
+}
+
+func SetObject(s Storage, o objects.RawObject) (id objects.ObjectId, err error) {
+ buf := new(bytes.Buffer)
+
+ id, err = o.SerializeAndId(buf, objects.OIdAlgoDefault)
+ if err != nil {
+ return
+ }
+
+ ok, err := s.Has(id)
+ if err != nil {
+ return
+ }
+
+ if !ok {
+ err = s.Set(id, o.Type, buf.Bytes())
+ }
+ return
+}