Browse Source

Fix bug where too much data was getting written to each file.

We were still calculating the maximum number of rows correctly,
so the extra data was really extra and would get re-written to the
beginning of the subsequent file.

The only case in which this would lead to database issues is if the
very last file was lengthened incorrectly, and the "nrows" calculation
would therefore be wrong when the database was reopened.  Still, even
in that case, it should just leave a small gap in the data, not cause
any errors.
tags/nilmdb-1.6.0
Jim Paris 11 years ago
parent
commit
b98ff1331a
2 changed files with 16 additions and 2 deletions
  1. +1
    -1
      nilmdb/server/rocket.c
  2. +15
    -1
      tests/test_numpyclient.py

+ 1
- 1
nilmdb/server/rocket.c View File

@@ -468,7 +468,7 @@ static PyObject *Rocket_append_binary(Rocket *self, PyObject *args)
}

/* Write binary data */
if (fwrite(data, data_len, 1, self->file) != 1) {
if (fwrite(data, self->binary_size, rows, self->file) != rows) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}


+ 15
- 1
tests/test_numpyclient.py View File

@@ -28,7 +28,10 @@ def setup_module():
recursive_unlink(testdb)

# Start web app on a custom port
test_db = nilmdb.utils.serializer_proxy(nilmdb.server.NilmDB)(testdb)
test_db = nilmdb.utils.serializer_proxy(nilmdb.server.NilmDB)(
testdb, bulkdata_args = { "file_size" : 16384,
"files_per_dir" : 3 } )

test_server = nilmdb.server.Server(test_db, host = "127.0.0.1",
port = 32180, stoppable = False,
fast_shutdown = True,
@@ -179,6 +182,17 @@ class TestNumpyClient(object):
assert(np.array_equal(a,b))
assert(np.array_equal(a,c))

# Make sure none of the files are greater than 16384 bytes as
# we configured with the bulkdata_args above.
datapath = os.path.join(testdb, "data")
for (dirpath, dirnames, filenames) in os.walk(datapath):
for f in filenames:
fn = os.path.join(dirpath, f)
size = os.path.getsize(fn)
if size > 16384:
raise AssertionError(sprintf("%s is too big: %d > %d\n",
fn, size, 16384))

nilmdb.client.numpyclient.StreamInserterNumpy._max_data = old_max_data
client.close()



Loading…
Cancel
Save