You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

161 lines
5.5 KiB

  1. # -*- coding: utf-8 -*-
  2. import nilmdb
  3. from nilmdb.utils.printf import *
  4. from nose.tools import *
  5. from nose.tools import assert_raises
  6. import itertools
  7. import errno
  8. import pickle
  9. from testutil.helpers import *
  10. testdb = b"tests/bulkdata-testdb"
  11. import nilmdb.server.bulkdata
  12. from nilmdb.server.bulkdata import BulkData
  13. class TestBulkData(object):
  14. def test_bulkdata(self):
  15. for (size, files, db) in [ ( None, None, testdb ),
  16. ( 25, 1000, testdb ),
  17. ( 1000, 3, testdb.decode("utf-8") ) ]:
  18. recursive_unlink(db)
  19. os.mkdir(db)
  20. self.do_basic(db, size, files)
  21. def test_corruption(self):
  22. db = testdb
  23. recursive_unlink(db)
  24. os.mkdir(db)
  25. # Remove lock before close
  26. data = BulkData(db)
  27. os.unlink(data.lock)
  28. data.close()
  29. def do_basic(self, db, size, files):
  30. """Do the basic test with variable file_size and files_per_dir"""
  31. data = BulkData(db, file_size = size, files_per_dir = files)
  32. # Try opening it again (should result in locking error)
  33. with assert_raises(IOError) as e:
  34. data2 = BulkData(db)
  35. in_("already locked by another process", str(e.exception))
  36. # create empty
  37. with assert_raises(ValueError):
  38. data.create("/foo", "uint16_8")
  39. with assert_raises(ValueError):
  40. data.create("foo/bar", "uint16_8")
  41. data.create("/foo/bar", "uint16_8")
  42. data.create("/foo/baz/quux", "float64_16")
  43. with assert_raises(ValueError) as e:
  44. data.create("/foo/bar/baz", "uint16_8")
  45. in_("path is subdir of existing node", str(e.exception))
  46. with assert_raises(ValueError):
  47. data.create("/foo/baz", "float64_16")
  48. # filename too long (tests error paths in _create_parents)
  49. with assert_raises(OSError) as e:
  50. data.create("/test/long/" + "a"*10000 + "/foo", "int32_1")
  51. eq_(e.exception.errno, errno.ENAMETOOLONG)
  52. # get node -- see if caching works
  53. nodes = []
  54. for i in range(5000):
  55. nodes.append(data.getnode("/foo/bar"))
  56. nodes.append(data.getnode("/foo/baz/quux"))
  57. del nodes
  58. def get_node_slice(key):
  59. if isinstance(key, slice):
  60. return [ node.get_data(x, x+1) for x in
  61. range(*key.indices(node.nrows)) ]
  62. return node.get_data(key, key+1)
  63. # Test node
  64. node = data.getnode("/foo/bar")
  65. with assert_raises(IndexError):
  66. x = get_node_slice(0)
  67. with assert_raises(IndexError):
  68. x = node[0] # timestamp
  69. raw = []
  70. for i in range(1000):
  71. raw.append(b"%d 1 2 3 4 5 6 7 8\n" % (10000 + i))
  72. node.append_data(b"".join(raw[0:1]), 0, 50000)
  73. node.append_data(b"".join(raw[1:100]), 0, 50000)
  74. node.append_data(b"".join(raw[100:]), 0, 50000)
  75. misc_slices = [ 0, 100, slice(None), slice(0), slice(10),
  76. slice(5,10), slice(3,None), slice(3,-3),
  77. slice(20,10), slice(200,100,-1), slice(None,0,-1),
  78. slice(100,500,5) ]
  79. # Extract slices
  80. for s in misc_slices:
  81. eq_(get_node_slice(s), raw[s])
  82. # Extract misc slices while appending, to make sure the
  83. # data isn't being added in the middle of the file
  84. for s in [2, slice(1,5), 2, slice(1,5)]:
  85. node.append_data(b"0 0 0 0 0 0 0 0 0\n", 0, 50000)
  86. raw.append(b"0 0 0 0 0 0 0 0 0\n")
  87. eq_(get_node_slice(s), raw[s])
  88. # Get some coverage of remove; remove is more fully tested
  89. # in cmdline
  90. with assert_raises(IndexError):
  91. node.remove(9999,9998)
  92. # close, reopen
  93. data.close()
  94. data = BulkData(db, file_size = size, files_per_dir = files)
  95. node = data.getnode("/foo/bar")
  96. # make an empty dir that will get ignored by _get_nrows
  97. data.close()
  98. os.mkdir(os.path.join(testdb, b"data/foo/bar/0123"))
  99. data = BulkData(db, file_size = size, files_per_dir = files)
  100. node = data.getnode("/foo/bar")
  101. # make a corrupted file that's the wrong size
  102. data.close()
  103. with open(os.path.join(testdb, b"data/foo/bar/0123/0123"), "wb") as f:
  104. f.write(b"x"*17)
  105. data = BulkData(db, file_size = size, files_per_dir = files)
  106. with assert_raises(ValueError) as e:
  107. node = data.getnode("/foo/bar")
  108. in_("file offset is not a multiple of data size", str(e.exception))
  109. # mess with format
  110. data.close()
  111. with open(os.path.join(testdb, b"data/foo/bar/_format"), "rb") as f:
  112. fmt = pickle.load(f)
  113. fmt["version"] = 2
  114. with open(os.path.join(testdb, b"data/foo/bar/_format"), "wb") as f:
  115. pickle.dump(fmt, f, 2)
  116. data = BulkData(db, file_size = size, files_per_dir = files)
  117. with assert_raises(NotImplementedError) as e:
  118. node = data.getnode("/foo/bar")
  119. in_("old version 2 bulk data store is not supported", str(e.exception))
  120. # Extract slices
  121. for s in misc_slices:
  122. eq_(get_node_slice(s), raw[s])
  123. # destroy
  124. with assert_raises(ValueError):
  125. data.destroy("/foo")
  126. with assert_raises(ValueError):
  127. data.destroy("/foo/baz")
  128. with assert_raises(ValueError):
  129. data.destroy("/foo/qwerty")
  130. data.destroy("/foo/baz/quux")
  131. data.destroy("/foo/bar")
  132. # close
  133. data.close()