Browse Source

Support using a higher initial nrows in bulkdata, for tests

This gives an easy way to get a large values in the database start_pos
and end_pos fields, which is necessary for testing failure modes when
those get too large (e.g. on 32-bit systems).  Adjust tests to make
use of this knob.
tags/nilmdb-1.10.1
Jim Paris 9 years ago
parent
commit
022b50950f
2 changed files with 25 additions and 7 deletions
  1. +17
    -4
      nilmdb/server/bulkdata.py
  2. +8
    -3
      tests/test_cmdline.py

+ 17
- 4
nilmdb/server/bulkdata.py View File

@@ -43,6 +43,12 @@ class BulkData(object):
# 32768 files per dir should work even on FAT32
self.files_per_dir = 32768

if "initial_nrows" in kwargs:
self.initial_nrows = kwargs["initial_nrows"]
else:
# First row is 0
self.initial_nrows = 0

# Make root path
if not os.path.isdir(self.root):
os.mkdir(self.root)
@@ -254,7 +260,7 @@ class BulkData(object):
path = self._encode_filename(unicodepath)
elements = path.lstrip('/').split('/')
ospath = os.path.join(self.root, *elements)
return Table(ospath)
return Table(ospath, self.initial_nrows)

@nilmdb.utils.must_close(wrap_verify = False)
class Table(object):
@@ -291,9 +297,10 @@ class Table(object):
pickle.dump(fmt, f, 2)

# Normal methods
def __init__(self, root):
def __init__(self, root, initial_nrows):
"""'root' is the full OS path to the directory of this table"""
self.root = root
self.initial_nrows = initial_nrows

# Load the format
with open(os.path.join(self.root, "_format"), "rb") as f:
@@ -353,8 +360,14 @@ class Table(object):
# Convert to row number
return self._row_from_offset(subdir, filename, offset)

# No files, so no data
return 0
# No files, so no data. We typically start at row 0 in this
# case, although initial_nrows is specified during some tests
# to exercise other parts of the code better. Since we have
# no files yet, round initial_nrows up so it points to a row
# that would begin a new file.
nrows = ((self.initial_nrows + (self.rows_per_file - 1)) //
self.rows_per_file) * self.rows_per_file
return nrows

def _offset_from_row(self, row):
"""Return a (subdir, filename, offset, count) tuple:


+ 8
- 3
tests/test_cmdline.py View File

@@ -834,9 +834,12 @@ class TestCmdline(object):
def test_13_files(self):
# Test BulkData's ability to split into multiple files,
# by forcing the file size to be really small.
# Also increase the initial nrows, so that start/end positions
# in the database are very large (> 32 bit)
server_stop()
server_start(bulkdata_args = { "file_size" : 920, # 23 rows per file
"files_per_dir" : 3 })
"files_per_dir" : 3,
"initial_nrows" : 2**40 })

# Fill data
self.ok("create /newton/prep float32_8")
@@ -888,7 +891,8 @@ class TestCmdline(object):
server_stop()
server_start(max_removals = 4321,
bulkdata_args = { "file_size" : 920, # 23 rows per file
"files_per_dir" : 3 })
"files_per_dir" : 3,
"initial_nrows" : 2**40 })
self.do_remove_files()
self.ok("destroy -R /newton/prep") # destroy again

@@ -897,7 +901,8 @@ class TestCmdline(object):
server_stop()
server_start(max_int_removals = 1,
bulkdata_args = { "file_size" : 920, # 23 rows per file
"files_per_dir" : 3 })
"files_per_dir" : 3,
"initial_nrows" : 2**40 })
self.do_remove_files()

def do_remove_files(self):


Loading…
Cancel
Save