You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

122 lines
4.0 KiB

  1. # -*- coding: utf-8 -*-
  2. import nilmdb
  3. from nilmdb.utils.printf import *
  4. from nose.tools import *
  5. from nose.tools import assert_raises
  6. import itertools
  7. from testutil.helpers import *
  8. testdb = "tests/bulkdata-testdb"
  9. import nilmdb.server.bulkdata
  10. from nilmdb.server.bulkdata import BulkData
  11. class TestBulkData(object):
  12. def test_bulkdata(self):
  13. for (size, files, db) in [ ( 0, 0, testdb ),
  14. ( 25, 1000, testdb ),
  15. ( 1000, 3, testdb.decode("utf-8") ) ]:
  16. recursive_unlink(db)
  17. os.mkdir(db)
  18. self.do_basic(db, size, files)
  19. def do_basic(self, db, size, files):
  20. """Do the basic test with variable file_size and files_per_dir"""
  21. if not size or not files:
  22. data = BulkData(db)
  23. else:
  24. data = BulkData(db, file_size = size, files_per_dir = files)
  25. # Try opening it again (should result in locking error)
  26. with assert_raises(IOError) as e:
  27. data2 = BulkData(db)
  28. in_("already locked by another process", str(e.exception))
  29. # create empty
  30. with assert_raises(ValueError):
  31. data.create("/foo", "uint16_8")
  32. with assert_raises(ValueError):
  33. data.create("foo/bar", "uint16_8")
  34. data.create("/foo/bar", "uint16_8")
  35. data.create(u"/foo/baz/quux", "float64_16")
  36. with assert_raises(ValueError):
  37. data.create("/foo/bar/baz", "uint16_8")
  38. with assert_raises(ValueError):
  39. data.create("/foo/baz", "float64_16")
  40. # get node -- see if caching works
  41. nodes = []
  42. for i in range(5000):
  43. nodes.append(data.getnode("/foo/bar"))
  44. nodes.append(data.getnode("/foo/baz/quux"))
  45. del nodes
  46. def get_node_slice(key):
  47. if isinstance(key, slice):
  48. return [ node.get_data(x, x+1) for x in
  49. xrange(*key.indices(node.nrows)) ]
  50. return node.get_data(key, key+1)
  51. # Test node
  52. node = data.getnode("/foo/bar")
  53. with assert_raises(IndexError):
  54. x = get_node_slice(0)
  55. with assert_raises(IndexError):
  56. x = node[0] # timestamp
  57. raw = []
  58. for i in range(1000):
  59. raw.append("%d 1 2 3 4 5 6 7 8\n" % (10000 + i))
  60. node.append_data("".join(raw[0:1]), 0, 50000)
  61. node.append_data("".join(raw[1:100]), 0, 50000)
  62. node.append_data("".join(raw[100:]), 0, 50000)
  63. misc_slices = [ 0, 100, slice(None), slice(0), slice(10),
  64. slice(5,10), slice(3,None), slice(3,-3),
  65. slice(20,10), slice(200,100,-1), slice(None,0,-1),
  66. slice(100,500,5) ]
  67. # Extract slices
  68. for s in misc_slices:
  69. eq_(get_node_slice(s), raw[s])
  70. # Extract misc slices while appending, to make sure the
  71. # data isn't being added in the middle of the file
  72. for s in [2, slice(1,5), 2, slice(1,5)]:
  73. node.append_data("0 0 0 0 0 0 0 0 0\n", 0, 50000)
  74. raw.append("0 0 0 0 0 0 0 0 0\n")
  75. eq_(get_node_slice(s), raw[s])
  76. # Get some coverage of remove; remove is more fully tested
  77. # in cmdline
  78. with assert_raises(IndexError):
  79. node.remove(9999,9998)
  80. # close, reopen
  81. # reopen
  82. data.close()
  83. if not size or not files:
  84. data = BulkData(db)
  85. else:
  86. data = BulkData(db, file_size = size, files_per_dir = files)
  87. node = data.getnode("/foo/bar")
  88. # Extract slices
  89. for s in misc_slices:
  90. eq_(get_node_slice(s), raw[s])
  91. # destroy
  92. with assert_raises(ValueError):
  93. data.destroy("/foo")
  94. with assert_raises(ValueError):
  95. data.destroy("/foo/baz")
  96. with assert_raises(ValueError):
  97. data.destroy("/foo/qwerty")
  98. data.destroy("/foo/baz/quux")
  99. data.destroy("/foo/bar")
  100. # close
  101. data.close()