|
|
@@ -0,0 +1,50 @@ |
|
|
|
#!/usr/bin/python |
|
|
|
|
|
|
|
import os |
|
|
|
import sys |
|
|
|
import cPickle as pickle |
|
|
|
import argparse |
|
|
|
import fcntl |
|
|
|
import re |
|
|
|
from nilmdb.client.numpyclient import layout_to_dtype |
|
|
|
|
|
|
|
parser = argparse.ArgumentParser( |
|
|
|
description = """ |
|
|
|
Fix database corruption where binary writes caused too much data to be |
|
|
|
written to the file. Truncates files to the correct length. This was |
|
|
|
fixed by b98ff1331a515ad47fd3203615e835b529b039f9. |
|
|
|
""") |
|
|
|
parser.add_argument("path", action="store", help='Database root path') |
|
|
|
parser.add_argument("-y", "--yes", action="store_true", help='Fix them') |
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
lock = os.path.join(args.path, "data.lock") |
|
|
|
with open(lock, "w") as f: |
|
|
|
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) |
|
|
|
|
|
|
|
fix = {} |
|
|
|
|
|
|
|
for (path, dirs, files) in os.walk(args.path): |
|
|
|
if "_format" in files: |
|
|
|
with open(os.path.join(path, "_format")) as format: |
|
|
|
fmt = pickle.load(format) |
|
|
|
rowsize = layout_to_dtype(fmt["layout"]).itemsize |
|
|
|
maxsize = rowsize * fmt["rows_per_file"] |
|
|
|
fix[path] = maxsize |
|
|
|
if maxsize < 128000000: # sanity check |
|
|
|
raise Exception("bad maxsize " + str(maxsize)) |
|
|
|
|
|
|
|
for fixpath in fix: |
|
|
|
for (path, dirs, files) in os.walk(fixpath): |
|
|
|
for fn in files: |
|
|
|
if not re.match("^[0-9a-f]{4,}$", fn): |
|
|
|
continue |
|
|
|
fn = os.path.join(path, fn) |
|
|
|
size = os.path.getsize(fn) |
|
|
|
maxsize = fix[fixpath] |
|
|
|
if size > maxsize: |
|
|
|
diff = size - maxsize |
|
|
|
print diff, "too big:", fn |
|
|
|
if args.yes: |
|
|
|
with open(fn, "a+") as dbfile: |
|
|
|
dbfile.truncate(maxsize) |