Compare commits
4 Commits
nilmtools-
...
nilmtools-
Author | SHA1 | Date | |
---|---|---|---|
33c3586bea | |||
c1e0f8ffbc | |||
d2853bdb0e | |||
a4d4bc22fc |
17
Makefile
17
Makefile
@@ -8,26 +8,33 @@ else
|
|||||||
@echo "Try 'make install'"
|
@echo "Try 'make install'"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
test: test_pipewatch
|
test: test_trainola3
|
||||||
|
|
||||||
test_pipewatch:
|
test_pipewatch:
|
||||||
nilmtools/pipewatch.py -t 3 "seq 10 20" "seq 20 30"
|
nilmtools/pipewatch.py -t 3 "seq 10 20" "seq 20 30"
|
||||||
|
|
||||||
test_trainola:
|
test_trainola:
|
||||||
-nilmtool -u http://bucket/nilmdb remove -s min -e max \
|
|
||||||
/sharon/prep-a-matches
|
|
||||||
nilmtools/trainola.py "$$(cat extras/trainola-test-param-2.js)"
|
|
||||||
-nilmtool -u http://bucket/nilmdb remove -s min -e max \
|
-nilmtool -u http://bucket/nilmdb remove -s min -e max \
|
||||||
/sharon/prep-a-matches
|
/sharon/prep-a-matches
|
||||||
nilmtools/trainola.py "$$(cat extras/trainola-test-param.js)"
|
nilmtools/trainola.py "$$(cat extras/trainola-test-param.js)"
|
||||||
|
|
||||||
|
test_trainola2:
|
||||||
|
-nilmtool -u http://bucket/nilmdb remove -s min -e max \
|
||||||
|
/sharon/prep-a-matches
|
||||||
|
nilmtools/trainola.py "$$(cat extras/trainola-test-param-2.js)"
|
||||||
|
|
||||||
|
test_trainola3:
|
||||||
|
-nilmtool -u "http://bucket/nilmdb" destroy -R /test/jim
|
||||||
|
nilmtool -u "http://bucket/nilmdb" create /test/jim uint8_3
|
||||||
|
nilmtools/trainola.py "$$(cat extras/trainola-test-param-3.js)"
|
||||||
|
nilmtool -u "http://bucket/nilmdb" extract /test/jim -s min -e max
|
||||||
|
|
||||||
test_cleanup:
|
test_cleanup:
|
||||||
nilmtools/cleanup.py -e extras/cleanup.cfg
|
nilmtools/cleanup.py -e extras/cleanup.cfg
|
||||||
nilmtools/cleanup.py extras/cleanup.cfg
|
nilmtools/cleanup.py extras/cleanup.cfg
|
||||||
|
|
||||||
test_insert:
|
test_insert:
|
||||||
nilmtools/insert.py --file --dry-run /test/foo </dev/null
|
nilmtools/insert.py --skip --file --dry-run /foo/bar ~/data/20130311T2100.prep1.gz ~/data/20130311T2100.prep1.gz ~/data/20130311T2200.prep1.gz
|
||||||
|
|
||||||
test_copy:
|
test_copy:
|
||||||
nilmtools/copy_wildcard.py -U "http://nilmdb.com/bucket/" -D /lees*
|
nilmtools/copy_wildcard.py -U "http://nilmdb.com/bucket/" -D /lees*
|
||||||
|
40
extras/trainola-test-param-3.js
Normal file
40
extras/trainola-test-param-3.js
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
{
|
||||||
|
"url": "http://bucket/nilmdb",
|
||||||
|
"stream": "/sharon/prep-a",
|
||||||
|
"dest_stream": "/test/jim",
|
||||||
|
"start": 1364184839901599,
|
||||||
|
"end": 1364184942407610.2,
|
||||||
|
|
||||||
|
"columns": [ { "index": 0, "name": "P1" } ],
|
||||||
|
|
||||||
|
"exemplars": [
|
||||||
|
{
|
||||||
|
"name": "A - True DBL Freezer ON",
|
||||||
|
"dest_column": 0,
|
||||||
|
"url": "http://bucket/nilmdb",
|
||||||
|
"stream": "/sharon/prep-a",
|
||||||
|
"columns": [ { "index": 0, "name": "P1" } ],
|
||||||
|
"start": 1365277707649000,
|
||||||
|
"end": 1365277710705000
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "A - Boiler 1 Fan OFF",
|
||||||
|
"dest_column": 1,
|
||||||
|
"url": "http://bucket/nilmdb",
|
||||||
|
"stream": "/sharon/prep-a",
|
||||||
|
"columns": [ { "index": 0, "name": "P1" } ],
|
||||||
|
"start": 1364188370735000,
|
||||||
|
"end": 1364188373819000
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "A - True DBL Freezer OFF",
|
||||||
|
"dest_column": 2,
|
||||||
|
"url": "http://bucket/nilmdb",
|
||||||
|
"stream": "/sharon/prep-a",
|
||||||
|
"columns": [ { "index": 0, "name": "P1" } ],
|
||||||
|
"start": 1365278087982000,
|
||||||
|
"end": 1365278089340000
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
@@ -32,7 +32,7 @@ def main(argv = None):
|
|||||||
extractor = NumpyClient(f.src.url).stream_extract_numpy
|
extractor = NumpyClient(f.src.url).stream_extract_numpy
|
||||||
inserter = NumpyClient(f.dest.url).stream_insert_numpy_context
|
inserter = NumpyClient(f.dest.url).stream_insert_numpy_context
|
||||||
for i in f.intervals():
|
for i in f.intervals():
|
||||||
print "Processing", f.interval_string(i)
|
print "Processing", i.human_string()
|
||||||
with inserter(f.dest.path, i.start, i.end) as insert_ctx:
|
with inserter(f.dest.path, i.start, i.end) as insert_ctx:
|
||||||
for data in extractor(f.src.path, i.start, i.end):
|
for data in extractor(f.src.path, i.start, i.end):
|
||||||
insert_ctx.insert(data)
|
insert_ctx.insert(data)
|
||||||
|
@@ -53,7 +53,8 @@ def parse_args(argv = None):
|
|||||||
is stepped forward to match 'clock'.
|
is stepped forward to match 'clock'.
|
||||||
|
|
||||||
- If 'data' is running ahead, there is overlap in the data, and an
|
- If 'data' is running ahead, there is overlap in the data, and an
|
||||||
error is raised.
|
error is raised. If '--ignore' is specified, the current file
|
||||||
|
is skipped instead of raising an error.
|
||||||
"""))
|
"""))
|
||||||
parser.add_argument("-u", "--url", action="store",
|
parser.add_argument("-u", "--url", action="store",
|
||||||
default="http://localhost/nilmdb/",
|
default="http://localhost/nilmdb/",
|
||||||
@@ -61,6 +62,8 @@ def parse_args(argv = None):
|
|||||||
group = parser.add_argument_group("Misc options")
|
group = parser.add_argument_group("Misc options")
|
||||||
group.add_argument("-D", "--dry-run", action="store_true",
|
group.add_argument("-D", "--dry-run", action="store_true",
|
||||||
help="Parse files, but don't insert any data")
|
help="Parse files, but don't insert any data")
|
||||||
|
group.add_argument("-s", "--skip", action="store_true",
|
||||||
|
help="Skip files if the data would overlap")
|
||||||
group.add_argument("-m", "--max-gap", action="store", default=10.0,
|
group.add_argument("-m", "--max-gap", action="store", default=10.0,
|
||||||
metavar="SEC", type=float,
|
metavar="SEC", type=float,
|
||||||
help="Max discrepency between clock and data "
|
help="Max discrepency between clock and data "
|
||||||
@@ -235,6 +238,10 @@ def main(argv = None):
|
|||||||
"is %s but clock time is only %s",
|
"is %s but clock time is only %s",
|
||||||
timestamp_to_human(data_ts),
|
timestamp_to_human(data_ts),
|
||||||
timestamp_to_human(clock_ts))
|
timestamp_to_human(clock_ts))
|
||||||
|
if args.skip:
|
||||||
|
printf("%s\n", err)
|
||||||
|
printf("Skipping the remainder of this file\n")
|
||||||
|
break
|
||||||
raise ParseError(filename, err)
|
raise ParseError(filename, err)
|
||||||
|
|
||||||
if (data_ts + max_gap) < clock_ts:
|
if (data_ts + max_gap) < clock_ts:
|
||||||
|
@@ -106,9 +106,14 @@ class Exemplar(object):
|
|||||||
|
|
||||||
def peak_detect(data, delta):
|
def peak_detect(data, delta):
|
||||||
"""Simple min/max peak detection algorithm, taken from my code
|
"""Simple min/max peak detection algorithm, taken from my code
|
||||||
in the disagg.m from the 10-8-5 paper"""
|
in the disagg.m from the 10-8-5 paper.
|
||||||
mins = [];
|
|
||||||
maxs = [];
|
Returns an array of peaks: each peak is a tuple
|
||||||
|
(n, p, is_max)
|
||||||
|
where n is the row number in 'data', and p is 'data[n]',
|
||||||
|
and is_max is True if this is a maximum, False if it's a minimum,
|
||||||
|
"""
|
||||||
|
peaks = [];
|
||||||
cur_min = (None, np.inf)
|
cur_min = (None, np.inf)
|
||||||
cur_max = (None, -np.inf)
|
cur_max = (None, -np.inf)
|
||||||
lookformax = False
|
lookformax = False
|
||||||
@@ -119,15 +124,15 @@ def peak_detect(data, delta):
|
|||||||
cur_min = (n, p)
|
cur_min = (n, p)
|
||||||
if lookformax:
|
if lookformax:
|
||||||
if p < (cur_max[1] - delta):
|
if p < (cur_max[1] - delta):
|
||||||
maxs.append(cur_max)
|
peaks.append((cur_max[0], cur_max[1], True))
|
||||||
cur_min = (n, p)
|
cur_min = (n, p)
|
||||||
lookformax = False
|
lookformax = False
|
||||||
else:
|
else:
|
||||||
if p > (cur_min[1] + delta):
|
if p > (cur_min[1] + delta):
|
||||||
mins.append(cur_min)
|
peaks.append((cur_min[0], cur_min[1], False))
|
||||||
cur_max = (n, p)
|
cur_max = (n, p)
|
||||||
lookformax = True
|
lookformax = True
|
||||||
return (mins, maxs)
|
return peaks
|
||||||
|
|
||||||
def timestamp_to_short_human(timestamp):
|
def timestamp_to_short_human(timestamp):
|
||||||
dt = datetime_tz.datetime_tz.fromtimestamp(timestamp_to_seconds(timestamp))
|
dt = datetime_tz.datetime_tz.fromtimestamp(timestamp_to_seconds(timestamp))
|
||||||
@@ -164,11 +169,35 @@ def trainola_matcher(data, interval, args, insert_func, final_chunk):
|
|||||||
|
|
||||||
# Find the peaks using the column with the largest amplitude
|
# Find the peaks using the column with the largest amplitude
|
||||||
biggest = e.scale.index(max(e.scale))
|
biggest = e.scale.index(max(e.scale))
|
||||||
peaks_minmax = peak_detect(corrs[biggest], 0.1)
|
peaks = peak_detect(corrs[biggest], 0.1)
|
||||||
peaks = [ p[0] for p in peaks_minmax[1] ]
|
|
||||||
|
|
||||||
# Now look at every peak
|
# To try to reduce false positives, discard peaks where
|
||||||
for row in peaks:
|
# there's a higher-magnitude peak (either min or max) within
|
||||||
|
# one exemplar width nearby.
|
||||||
|
good_peak_locations = []
|
||||||
|
for (i, (n, p, is_max)) in enumerate(peaks):
|
||||||
|
if not is_max:
|
||||||
|
continue
|
||||||
|
ok = True
|
||||||
|
# check up to 'e.count' rows before this one
|
||||||
|
j = i-1
|
||||||
|
while ok and j >= 0 and peaks[j][0] > (n - e.count):
|
||||||
|
if abs(peaks[j][1]) > abs(p):
|
||||||
|
ok = False
|
||||||
|
j -= 1
|
||||||
|
|
||||||
|
# check up to 'e.count' rows after this one
|
||||||
|
j = i+1
|
||||||
|
while ok and j < len(peaks) and peaks[j][0] < (n + e.count):
|
||||||
|
if abs(peaks[j][1]) > abs(p):
|
||||||
|
ok = False
|
||||||
|
j += 1
|
||||||
|
|
||||||
|
if ok:
|
||||||
|
good_peak_locations.append(n)
|
||||||
|
|
||||||
|
# Now look at all good peaks
|
||||||
|
for row in good_peak_locations:
|
||||||
# Correlation for each column must be close enough to 1.
|
# Correlation for each column must be close enough to 1.
|
||||||
for (corr, scale) in zip(corrs, e.scale):
|
for (corr, scale) in zip(corrs, e.scale):
|
||||||
# The accepted distance from 1 is based on the relative
|
# The accepted distance from 1 is based on the relative
|
||||||
|
Reference in New Issue
Block a user