|
|
@@ -5,6 +5,8 @@ from nilmdb.utils.printf import * |
|
|
|
from nose.tools import * |
|
|
|
from nose.tools import assert_raises |
|
|
|
import itertools |
|
|
|
import errno |
|
|
|
import pickle |
|
|
|
|
|
|
|
from testutil.helpers import * |
|
|
|
|
|
|
@@ -16,19 +18,26 @@ from nilmdb.server.bulkdata import BulkData |
|
|
|
class TestBulkData(object): |
|
|
|
|
|
|
|
def test_bulkdata(self): |
|
|
|
for (size, files, db) in [ ( 0, 0, testdb ), |
|
|
|
for (size, files, db) in [ ( None, None, testdb ), |
|
|
|
( 25, 1000, testdb ), |
|
|
|
( 1000, 3, testdb.decode("utf-8") ) ]: |
|
|
|
recursive_unlink(db) |
|
|
|
os.mkdir(db) |
|
|
|
self.do_basic(db, size, files) |
|
|
|
|
|
|
|
def test_corruption(self): |
|
|
|
db = testdb |
|
|
|
recursive_unlink(db) |
|
|
|
os.mkdir(db) |
|
|
|
|
|
|
|
# Remove lock before close |
|
|
|
data = BulkData(db) |
|
|
|
os.unlink(data.lock) |
|
|
|
data.close() |
|
|
|
|
|
|
|
def do_basic(self, db, size, files): |
|
|
|
"""Do the basic test with variable file_size and files_per_dir""" |
|
|
|
if not size or not files: |
|
|
|
data = BulkData(db) |
|
|
|
else: |
|
|
|
data = BulkData(db, file_size = size, files_per_dir = files) |
|
|
|
data = BulkData(db, file_size = size, files_per_dir = files) |
|
|
|
|
|
|
|
# Try opening it again (should result in locking error) |
|
|
|
with assert_raises(IOError) as e: |
|
|
@@ -42,11 +51,18 @@ class TestBulkData(object): |
|
|
|
data.create("foo/bar", "uint16_8") |
|
|
|
data.create("/foo/bar", "uint16_8") |
|
|
|
data.create("/foo/baz/quux", "float64_16") |
|
|
|
with assert_raises(ValueError): |
|
|
|
with assert_raises(ValueError) as e: |
|
|
|
data.create("/foo/bar/baz", "uint16_8") |
|
|
|
in_("path is subdir of existing node", str(e.exception)) |
|
|
|
with assert_raises(ValueError): |
|
|
|
data.create("/foo/baz", "float64_16") |
|
|
|
|
|
|
|
# filename too long (tests error paths in _create_parents) |
|
|
|
with assert_raises(OSError) as e: |
|
|
|
data.create("/test/long/" + "a"*10000 + "/foo", "int32_1") |
|
|
|
eq_(e.exception.errno, errno.ENAMETOOLONG) |
|
|
|
|
|
|
|
|
|
|
|
# get node -- see if caching works |
|
|
|
nodes = [] |
|
|
|
for i in range(5000): |
|
|
@@ -95,14 +111,37 @@ class TestBulkData(object): |
|
|
|
node.remove(9999,9998) |
|
|
|
|
|
|
|
# close, reopen |
|
|
|
# reopen |
|
|
|
data.close() |
|
|
|
if not size or not files: |
|
|
|
data = BulkData(db) |
|
|
|
else: |
|
|
|
data = BulkData(db, file_size = size, files_per_dir = files) |
|
|
|
data = BulkData(db, file_size = size, files_per_dir = files) |
|
|
|
node = data.getnode("/foo/bar") |
|
|
|
|
|
|
|
# make an empty dir that will get ignored by _get_nrows |
|
|
|
data.close() |
|
|
|
os.mkdir(os.path.join(testdb, b"data/foo/bar/0123")) |
|
|
|
data = BulkData(db, file_size = size, files_per_dir = files) |
|
|
|
node = data.getnode("/foo/bar") |
|
|
|
|
|
|
|
# make a corrupted file that's the wrong size |
|
|
|
data.close() |
|
|
|
with open(os.path.join(testdb, b"data/foo/bar/0123/0123"), "wb") as f: |
|
|
|
f.write(b"x"*17) |
|
|
|
data = BulkData(db, file_size = size, files_per_dir = files) |
|
|
|
with assert_raises(ValueError) as e: |
|
|
|
node = data.getnode("/foo/bar") |
|
|
|
in_("file offset is not a multiple of data size", str(e.exception)) |
|
|
|
|
|
|
|
# mess with format |
|
|
|
data.close() |
|
|
|
with open(os.path.join(testdb, b"data/foo/bar/_format"), "rb") as f: |
|
|
|
fmt = pickle.load(f) |
|
|
|
fmt["version"] = 2 |
|
|
|
with open(os.path.join(testdb, b"data/foo/bar/_format"), "wb") as f: |
|
|
|
pickle.dump(fmt, f, 2) |
|
|
|
data = BulkData(db, file_size = size, files_per_dir = files) |
|
|
|
with assert_raises(NotImplementedError) as e: |
|
|
|
node = data.getnode("/foo/bar") |
|
|
|
in_("old version 2 bulk data store is not supported", str(e.exception)) |
|
|
|
|
|
|
|
# Extract slices |
|
|
|
for s in misc_slices: |
|
|
|
eq_(get_node_slice(s), raw[s]) |
|
|
|