From 3d645268b0030fba5cc2c66bdcc715274cd5bb1c Mon Sep 17 00:00:00 2001 From: Laria Carolin Chabowski Date: Wed, 12 Jul 2017 17:18:52 +0200 Subject: Implement restoring --- backup/backup_test.go | 49 ++++-------------- backup/common_test.go | 30 +++++++++++ backup/restore.go | 134 +++++++++++++++++++++++++++++++++++++++++++++++++ backup/restore_test.go | 109 ++++++++++++++++++++++++++++++++++++++++ fs/fs.go | 3 ++ fs/memory.go | 51 +++++++++++++++---- fs/os.go | 26 ++++++++-- objects/id.go | 8 +++ storage/storage.go | 44 ++++++++++++++++ 9 files changed, 400 insertions(+), 54 deletions(-) create mode 100644 backup/common_test.go create mode 100644 backup/restore.go create mode 100644 backup/restore_test.go diff --git a/backup/backup_test.go b/backup/backup_test.go index 2fee26e..714d814 100644 --- a/backup/backup_test.go +++ b/backup/backup_test.go @@ -12,15 +12,9 @@ import ( func wantObject( t *testing.T, s storage.Storage, - id_str string, + id objects.ObjectId, want []byte, ) { - id, err := objects.ParseObjectId(id_str) - if err != nil { - t.Errorf("Could not parse id: %s", err) - return - } - have, err := s.Get(id) if err != nil { t.Errorf("Could not get %s: %s", id, err) @@ -51,9 +45,9 @@ func TestWriteLargeFile(t *testing.T) { "blob=sha3-256:7287cbb09bdd8a0d96a6f6297413cd9d09a2763814636245a5a44120e6351be3&size=16777216\n" + "blob=sha3-256:ddf124464f7b80e95f4a9c704f79e7037ff5d731648ba6b40c769893b428128c&size=100\n") - wantObject(t, s, "sha3-256:ab7907ee6b45b343422a0354de500bcf99f5ff69fe8125be84e43d421803c34e", want_file) - wantObject(t, s, "sha3-256:7287cbb09bdd8a0d96a6f6297413cd9d09a2763814636245a5a44120e6351be3", want_large_blob) - wantObject(t, s, "sha3-256:ddf124464f7b80e95f4a9c704f79e7037ff5d731648ba6b40c769893b428128c", want_small_blob) + wantObject(t, s, objects.MustParseObjectId("sha3-256:ab7907ee6b45b343422a0354de500bcf99f5ff69fe8125be84e43d421803c34e"), want_file) + wantObject(t, s, objects.MustParseObjectId("sha3-256:7287cbb09bdd8a0d96a6f6297413cd9d09a2763814636245a5a44120e6351be3"), want_large_blob) + wantObject(t, s, objects.MustParseObjectId("sha3-256:ddf124464f7b80e95f4a9c704f79e7037ff5d731648ba6b40c769893b428128c"), want_small_blob) } func mkfile(t *testing.T, d fs.Dir, name string, exec bool, content []byte) { @@ -101,33 +95,10 @@ func TestWriteDir(t *testing.T) { t.Errorf("Unexpected dir id: %s", id) } - //4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b - obj_emptyfile := []byte("file 0\n") - - //ba632076629ff33238850c870fcb51e4b7b67b3d9dcb66314adbcf1770a5fea7 - obj_fooblob := []byte("blob 3\nfoo") - //fa50ca1fc739852528ecc149b424a8ccbdf84b73c8718cde4525f2a410d79244 - obj_foofile := []byte("file 86\nblob=sha3-256:ba632076629ff33238850c870fcb51e4b7b67b3d9dcb66314adbcf1770a5fea7&size=3\n") - - //1dc6fae780ae4a1e823a5b8e26266356a2e1d22e5904b0652dcff6e3c0e72067 - obj_emptytree := []byte("tree 0\n") - - //f1716a1b0cad23b6faab9712243402b8f8e7919c377fc5d5d87bd465cef056d7 - obj_subdir := []byte("tree 239\n" + - "acl=u::rw-,g::r--,o::r--&name=a&ref=sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b&type=file\n" + - "acl=u::rwx,g::r-x,o::r-x&name=b&ref=sha3-256:1dc6fae780ae4a1e823a5b8e26266356a2e1d22e5904b0652dcff6e3c0e72067&type=dir\n") - - //09e881f57befa1eacec744e3857a36f0d9d5dd1fa72ba96564b467a3d7d0c0d5 - obj_dir := []byte("tree 423\n" + - "acl=u::rw-,g::r--,o::r--&name=baz&target=foo&type=symlink\n" + - "acl=u::rw-,g::r--,o::r--&name=foo&ref=sha3-256:fa50ca1fc739852528ecc149b424a8ccbdf84b73c8718cde4525f2a410d79244&type=file\n" + - "acl=u::rwx,g::r-x,o::r-x&name=bar&ref=sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b&type=file\n" + - "acl=u::rwx,g::r-x,o::r-x&name=sub&ref=sha3-256:f1716a1b0cad23b6faab9712243402b8f8e7919c377fc5d5d87bd465cef056d7&type=dir\n") - - wantObject(t, s, "sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b", obj_emptyfile) - wantObject(t, s, "sha3-256:ba632076629ff33238850c870fcb51e4b7b67b3d9dcb66314adbcf1770a5fea7", obj_fooblob) - wantObject(t, s, "sha3-256:fa50ca1fc739852528ecc149b424a8ccbdf84b73c8718cde4525f2a410d79244", obj_foofile) - wantObject(t, s, "sha3-256:1dc6fae780ae4a1e823a5b8e26266356a2e1d22e5904b0652dcff6e3c0e72067", obj_emptytree) - wantObject(t, s, "sha3-256:f1716a1b0cad23b6faab9712243402b8f8e7919c377fc5d5d87bd465cef056d7", obj_subdir) //! - wantObject(t, s, "sha3-256:09e881f57befa1eacec744e3857a36f0d9d5dd1fa72ba96564b467a3d7d0c0d5", obj_dir) //! + wantObject(t, s, objid_emptyfile, obj_emptyfile) + wantObject(t, s, objid_fooblob, obj_fooblob) + wantObject(t, s, objid_foofile, obj_foofile) + wantObject(t, s, objid_emptytree, obj_emptytree) + wantObject(t, s, objid_subtree, obj_subtree) + wantObject(t, s, objid_testtree, obj_testtree) } diff --git a/backup/common_test.go b/backup/common_test.go new file mode 100644 index 0000000..f06d737 --- /dev/null +++ b/backup/common_test.go @@ -0,0 +1,30 @@ +package backup + +import ( + "code.laria.me/petrific/objects" +) + +var ( + objid_emptyfile = objects.MustParseObjectId("sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b") + obj_emptyfile = []byte("file 0\n") + + objid_fooblob = objects.MustParseObjectId("sha3-256:ba632076629ff33238850c870fcb51e4b7b67b3d9dcb66314adbcf1770a5fea7") + obj_fooblob = []byte("blob 3\nfoo") + objid_foofile = objects.MustParseObjectId("sha3-256:fa50ca1fc739852528ecc149b424a8ccbdf84b73c8718cde4525f2a410d79244") + obj_foofile = []byte("file 86\nblob=sha3-256:ba632076629ff33238850c870fcb51e4b7b67b3d9dcb66314adbcf1770a5fea7&size=3\n") + + objid_emptytree = objects.MustParseObjectId("sha3-256:1dc6fae780ae4a1e823a5b8e26266356a2e1d22e5904b0652dcff6e3c0e72067") + obj_emptytree = []byte("tree 0\n") + + objid_subtree = objects.MustParseObjectId("sha3-256:f1716a1b0cad23b6faab9712243402b8f8e7919c377fc5d5d87bd465cef056d7") + obj_subtree = []byte("tree 239\n" + + "acl=u::rw-,g::r--,o::r--&name=a&ref=sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b&type=file\n" + + "acl=u::rwx,g::r-x,o::r-x&name=b&ref=sha3-256:1dc6fae780ae4a1e823a5b8e26266356a2e1d22e5904b0652dcff6e3c0e72067&type=dir\n") + + objid_testtree = objects.MustParseObjectId("sha3-256:09e881f57befa1eacec744e3857a36f0d9d5dd1fa72ba96564b467a3d7d0c0d5") + obj_testtree = []byte("tree 423\n" + + "acl=u::rw-,g::r--,o::r--&name=baz&target=foo&type=symlink\n" + + "acl=u::rw-,g::r--,o::r--&name=foo&ref=sha3-256:fa50ca1fc739852528ecc149b424a8ccbdf84b73c8718cde4525f2a410d79244&type=file\n" + + "acl=u::rwx,g::r-x,o::r-x&name=bar&ref=sha3-256:4a10682307d5b5dc072d1b862497296640176109347b149aad38cd640000491b&type=file\n" + + "acl=u::rwx,g::r-x,o::r-x&name=sub&ref=sha3-256:f1716a1b0cad23b6faab9712243402b8f8e7919c377fc5d5d87bd465cef056d7&type=dir\n") +) diff --git a/backup/restore.go b/backup/restore.go new file mode 100644 index 0000000..6a03200 --- /dev/null +++ b/backup/restore.go @@ -0,0 +1,134 @@ +package backup + +import ( + "code.laria.me/petrific/acl" + "code.laria.me/petrific/fs" + "code.laria.me/petrific/objects" + "code.laria.me/petrific/storage" + "fmt" + "io" + "os" +) + +func RestoreFile(s storage.Storage, id objects.ObjectId, w io.Writer) error { + file, err := storage.GetObjectOfType(s, id, objects.OTFile) + if err != nil { + return err + } + + for i, fragment := range *file.(*objects.File) { + blob_obj, err := storage.GetObjectOfType(s, fragment.Blob, objects.OTBlob) + if err != nil { + return err + } + blob := *blob_obj.(*objects.Blob) + + if uint64(len(blob)) != fragment.Size { + return fmt.Errorf("RestoreFile: blob size of %s doesn't match size in fragment %d of file %s", fragment.Blob, i, id) + } + + if _, err := w.Write(blob); err != nil { + return err + } + } + + return nil +} + +func execBitFromACL(a acl.ACL) bool { + return a.ToUnixPerms()&0100 != 0 +} + +func RestoreDir(s storage.Storage, id objects.ObjectId, root fs.Dir) error { + tree_obj, err := storage.GetObjectOfType(s, id, objects.OTTree) + tree := tree_obj.(objects.Tree) + + seen := make(map[string]struct{}) + + for name, file_info := range tree { + switch file_info.Type() { + case objects.TETFile: + tmpname := fmt.Sprintf(".petrific-%d-%s", os.Getpid(), id) + new_file, err := root.CreateChildFile(tmpname, execBitFromACL(file_info.ACL())) + if err != nil { + return err + } + rwc, err := new_file.Open() + if err != nil { + return err + } + + if err := RestoreFile(s, file_info.(objects.TreeEntryFile).Ref, rwc); err != nil { + rwc.Close() + return err + } + rwc.Close() + + if err := root.RenameChild(tmpname, name); err != nil { + return err + } + case objects.TETDir: + var subdir fs.Dir + + // Try to use existing directory + child, err := root.GetChild(name) + if err == nil { + if child.Type() == fs.FDir { + subdir = child.(fs.Dir) + } else { + if err := child.Delete(); err != nil { + return err + } + } + } else if err != os.ErrNotExist { + return err + } + + // Create directory, if it doesn't exist + if subdir == nil { + subdir, err = root.CreateChildDir(name) + if err != nil { + return err + } + } + + if err := RestoreDir(s, file_info.(objects.TreeEntryDir).Ref, subdir); err != nil { + return err + } + case objects.TETSymlink: + // Is there already a child of that name? If yes, delete it + child, err := root.GetChild(name) + if err == nil { + if err := child.Delete(); err != nil { + return err + } + } else if err != os.ErrNotExist { + return err + } + + if _, err := root.CreateChildSymlink(name, file_info.(objects.TreeEntrySymlink).Target); err != nil { + return err + } + default: + return fmt.Errorf("child '%s' of %s has unknown tree entry type %s", name, id, file_info.Type()) + } + + seen[name] = struct{}{} + } + + // We now restored all children, we now need to remove the children of root, that shouldn't be there accoring to the backup + children, err := root.Readdir() + if err != nil { + return err + } + for _, c := range children { + _, ok := seen[c.Name()] + if !ok { + if err := c.Delete(); err != nil { + return err + } + } + } + + return nil +} diff --git a/backup/restore_test.go b/backup/restore_test.go new file mode 100644 index 0000000..1a88e5f --- /dev/null +++ b/backup/restore_test.go @@ -0,0 +1,109 @@ +package backup + +import ( + "bytes" + "code.laria.me/petrific/fs" + "code.laria.me/petrific/objects" + "code.laria.me/petrific/storage" + "io" + "testing" +) + +func withChildOfType(t *testing.T, root fs.Dir, name string, ft fs.FileType, do func(*testing.T, fs.File)) { + f, err := root.GetChild(name) + if err != nil { + t.Errorf("Could not GetChild(%s): %s", name, err) + return + } + + if f.Type() != ft { + t.Errorf("Child '%s' has type %s, expected %s", name, f.Type(), ft) + return + } + + do(t, f) +} + +func wantFileWithContent(want []byte, exec bool) func(*testing.T, fs.File) { + return func(t *testing.T, f fs.File) { + rf := f.(fs.RegularFile) + + if rf.Executable() != exec { + t.Errorf("Child '%s' has executable bit %b, expected %b", f.Name(), rf.Executable(), exec) + } + + rwc, err := rf.Open() + if err != nil { + t.Errorf("could not open child '%s': %s", f.Name(), err) + } + defer rwc.Close() + + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rwc); err != nil { + t.Errorf("Could not read content of child '%s': %s", err) + return + } + + have := buf.Bytes() + if !bytes.Equal(have, want) { + t.Errorf("Unexpected content of child '%s': %s", f.Name(), have) + } + } +} + +func wantDir(n int, fx func(*testing.T, fs.Dir)) func(*testing.T, fs.File) { + return func(t *testing.T, f fs.File) { + d := f.(fs.Dir) + + children, err := d.Readdir() + if err != nil { + t.Errorf("Could not Readdir() '%s': %s", f.Name(), err) + return + } + + if len(children) != n { + t.Errorf("Expected '%s' to have %d children, got %d", f.Name(), n, len(children)) + return + } + + fx(t, d) + } +} + +func TestRestoreDir(t *testing.T) { + s := storage.NewMemoryStorage() + + s.Set(objid_emptyfile, objects.OTFile, obj_emptyfile) + s.Set(objid_fooblob, objects.OTBlob, obj_fooblob) + s.Set(objid_foofile, objects.OTFile, obj_foofile) + s.Set(objid_emptytree, objects.OTTree, obj_emptytree) + s.Set(objid_subtree, objects.OTTree, obj_subtree) + s.Set(objid_testtree, objects.OTTree, obj_testtree) + + root := fs.NewMemoryFSRoot("") + + if err := RestoreDir(s, objid_testtree, root); err != nil { + t.Fatalf("Unexpected error from RestoreDir(): %s", err) + } + + wantDir(4, func(t *testing.T, root fs.Dir) { + withChildOfType(t, root, "foo", fs.FFile, wantFileWithContent([]byte("foo"), false)) + withChildOfType(t, root, "bar", fs.FFile, wantFileWithContent([]byte{}, true)) + withChildOfType(t, root, "baz", fs.FSymlink, func(t *testing.T, f fs.File) { + target, err := f.(fs.Symlink).Readlink() + if err != nil { + t.Errorf("Could not Readlink() child 'baz': %s", err) + return + } + + if target != "foo" { + t.Errorf("Unexpected target for baz: %s", target) + } + }) + + withChildOfType(t, root, "sub", fs.FDir, wantDir(2, func(t *testing.T, d fs.Dir) { + withChildOfType(t, d, "a", fs.FFile, wantFileWithContent([]byte{}, false)) + withChildOfType(t, d, "b", fs.FDir, wantDir(0, func(t *testing.T, d fs.Dir) {})) + })) + })(t, root) +} diff --git a/fs/fs.go b/fs/fs.go index cbfeb44..b06ded8 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -29,10 +29,13 @@ type RegularFile interface { type Dir interface { File Readdir() ([]File, error) + GetChild(name string) (File, error) // Must return os.ErrNotExist, if child doesn't exist CreateChildFile(name string, exec bool) (RegularFile, error) CreateChildDir(name string) (Dir, error) CreateChildSymlink(name string, target string) (Symlink, error) + + RenameChild(oldname, newname string) error } type Symlink interface { diff --git a/fs/memory.go b/fs/memory.go index 5f5d327..221ff36 100644 --- a/fs/memory.go +++ b/fs/memory.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "io" + "os" "time" ) @@ -14,7 +15,13 @@ type memfsBase struct { mtime time.Time } +type memfsChild interface { + File + setName(string) +} + func (b memfsBase) Name() string { return b.name } +func (b *memfsBase) setName(n string) { b.name = n } func (b memfsBase) Executable() bool { return b.exec } func (b memfsBase) ModTime() time.Time { return b.mtime } @@ -45,13 +52,13 @@ func (f memfsFile) Write(p []byte) (int, error) { return f.content.Write(p) } -func (memfsBase) Close() error { +func (memfsFile) Close() error { return nil } type memfsDir struct { memfsBase - children map[string]File + children map[string]memfsChild } func (memfsDir) Type() FileType { return FDir } @@ -66,6 +73,14 @@ func (d memfsDir) Readdir() ([]File, error) { return l, nil } +func (d memfsDir) GetChild(name string) (File, error) { + c, ok := d.children[name] + if !ok { + return nil, os.ErrNotExist + } + return c, nil +} + func (d memfsDir) createChildBase(name string, exec bool) memfsBase { return memfsBase{ parent: &d, @@ -80,17 +95,17 @@ func (d memfsDir) CreateChildFile(name string, exec bool) (RegularFile, error) { memfsBase: d.createChildBase(name, exec), content: new(bytes.Buffer), } - d.children[name] = child - return child, nil + d.children[name] = &child + return &child, nil } func (d memfsDir) CreateChildDir(name string) (Dir, error) { child := memfsDir{ memfsBase: d.createChildBase(name, true), - children: make(map[string]File), + children: make(map[string]memfsChild), } - d.children[name] = child - return child, nil + d.children[name] = &child + return &child, nil } func (d memfsDir) CreateChildSymlink(name string, target string) (Symlink, error) { @@ -98,23 +113,37 @@ func (d memfsDir) CreateChildSymlink(name string, target string) (Symlink, error memfsBase: d.createChildBase(name, false), target: target, } - d.children[name] = child - return child, nil + d.children[name] = &child + return &child, nil } func (d *memfsDir) deleteChild(name string) { delete(d.children, name) } +func (d *memfsDir) RenameChild(oldname, newname string) error { + c, ok := d.children[oldname] + if !ok { + return os.ErrNotExist + } + + c.setName(newname) + + delete(d.children, oldname) + d.children[newname] = c + + return nil +} + func NewMemoryFSRoot(name string) Dir { - return memfsDir{ + return &memfsDir{ memfsBase: memfsBase{ parent: nil, name: name, exec: true, mtime: time.Now(), }, - children: make(map[string]File), + children: make(map[string]memfsChild), } } diff --git a/fs/os.go b/fs/os.go index f251c66..0a4f614 100644 --- a/fs/os.go +++ b/fs/os.go @@ -3,9 +3,14 @@ package fs import ( "io" "os" + "strings" "time" ) +func pathJoin(parts ...string) string { + return strings.Join(parts, string(os.PathSeparator)) +} + func openOSFile(path string) (osFile, error) { fi, err := os.Lstat(path) if err != nil { @@ -79,7 +84,7 @@ func (f osFile) Readdir() (list []File, err error) { } list = append(list, osFile{ - fullpath: f.fullpath + string(os.PathSeparator) + fi.Name(), + fullpath: pathJoin(f.fullpath, fi.Name()), fi: fi, }) } @@ -87,6 +92,15 @@ func (f osFile) Readdir() (list []File, err error) { return } +func (f osFile) GetChild(name string) (File, error) { + path := pathJoin(f.fullpath, name) + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + return osFile{path, fi}, nil +} + func perms(executable bool) os.FileMode { if executable { return 0755 @@ -96,7 +110,7 @@ func perms(executable bool) os.FileMode { } func (f osFile) CreateChildFile(name string, exec bool) (RegularFile, error) { - p := f.fullpath + string(os.PathSeparator) + name + p := pathJoin(f.fullpath, name) fh, err := os.OpenFile(p, os.O_RDWR|os.O_CREATE, perms(exec)) if err != nil { @@ -108,7 +122,7 @@ func (f osFile) CreateChildFile(name string, exec bool) (RegularFile, error) { } func (f osFile) CreateChildDir(name string) (Dir, error) { - p := f.fullpath + string(os.PathSeparator) + name + p := pathJoin(f.fullpath, name) if err := os.Mkdir(p, perms(true)); err != nil { return nil, err @@ -118,7 +132,7 @@ func (f osFile) CreateChildDir(name string) (Dir, error) { } func (f osFile) CreateChildSymlink(name string, target string) (Symlink, error) { - p := f.fullpath + string(os.PathSeparator) + name + p := pathJoin(f.fullpath, name) err := os.Symlink(target, p) if err != nil { @@ -128,6 +142,10 @@ func (f osFile) CreateChildSymlink(name string, target string) (Symlink, error) return openOSFile(p) } +func (f osFile) RenameChild(oldname, newname string) error { + return os.Rename(pathJoin(f.fullpath, oldname), pathJoin(f.fullpath, newname)) +} + func (f osFile) Readlink() (string, error) { return os.Readlink(f.fullpath) } diff --git a/objects/id.go b/objects/id.go index e651801..224b3cd 100644 --- a/objects/id.go +++ b/objects/id.go @@ -68,6 +68,14 @@ func ParseObjectId(s string) (oid ObjectId, err error) { return } +func MustParseObjectId(s string) ObjectId { + id, err := ParseObjectId(s) + if err != nil { + panic(err) + } + return id +} + func (a ObjectId) Equals(b ObjectId) bool { return a.Algo == b.Algo && bytes.Equal(a.Sum, b.Sum) } diff --git a/storage/storage.go b/storage/storage.go index 424661c..45b1973 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -4,6 +4,8 @@ import ( "bytes" "code.laria.me/petrific/objects" "errors" + "fmt" + "io" ) var ( @@ -35,3 +37,45 @@ func SetObject(s Storage, o objects.RawObject) (id objects.ObjectId, err error) } return } + +type IdMismatchErr struct { + Want, Have objects.ObjectId +} + +func (iderr IdMismatchErr) Error() string { + return fmt.Sprintf("ID verification failed: want %s, have %s", iderr.Want, iderr.Have) +} + +// GetObjects gets an object from a Storage and parses and verifies it (check it's checksum/id) +func GetObject(s Storage, id objects.ObjectId) (objects.RawObject, error) { + raw, err := s.Get(id) + if err != nil { + return objects.RawObject{}, err + } + + idgen := id.Algo.Generator() + r := io.TeeReader(bytes.NewReader(raw), idgen) + + obj, err := objects.Unserialize(r) + if err != nil { + return objects.RawObject{}, err + } + + if have_id := idgen.GetId(); !have_id.Equals(id) { + return objects.RawObject{}, IdMismatchErr{id, have_id} + } + return obj, nil +} + +func GetObjectOfType(s Storage, id objects.ObjectId, t objects.ObjectType) (objects.Object, error) { + rawobj, err := GetObject(s, id) + if err != nil { + return nil, err + } + + if rawobj.Type != t { + return nil, fmt.Errorf("GetObjectOfType: Wrong object type %s (want %s)", rawobj.Type, t) + } + + return rawobj.Object() +} -- cgit v1.2.3-70-g09d2