|
|
@@ -16,6 +16,7 @@ import nilmrun |
|
|
|
from collections import OrderedDict |
|
|
|
import sys |
|
|
|
import functools |
|
|
|
import collections |
|
|
|
|
|
|
|
class DataError(ValueError): |
|
|
|
pass |
|
|
@@ -118,7 +119,7 @@ def peak_detect(data, delta): |
|
|
|
|
|
|
|
def trainola_matcher(data, interval, args, insert_func, final_chunk): |
|
|
|
"""Perform cross-correlation match""" |
|
|
|
( columns, exemplars ) = args |
|
|
|
( src_columns, dest_count, exemplars ) = args |
|
|
|
nrows = data.shape[0] |
|
|
|
|
|
|
|
# We want at least 10% more points than the widest exemplar. |
|
|
@@ -129,20 +130,20 @@ def trainola_matcher(data, interval, args, insert_func, final_chunk): |
|
|
|
# This is how many points we'll consider valid in the |
|
|
|
# cross-correlation. |
|
|
|
valid = nrows + 1 - widest |
|
|
|
matches = [] |
|
|
|
matches = collections.defaultdict(list) |
|
|
|
|
|
|
|
# Try matching against each of the exemplars |
|
|
|
for e_num, e in enumerate(exemplars): |
|
|
|
for e in exemplars: |
|
|
|
corrs = [] |
|
|
|
|
|
|
|
# Compute cross-correlation for each column |
|
|
|
for c in e.columns: |
|
|
|
a = data[:,columns[c] + 1] |
|
|
|
b = e.data[:,e.columns[c]] |
|
|
|
for col_name in e.columns: |
|
|
|
a = data[:, src_columns[col_name] + 1] |
|
|
|
b = e.data[:, e.columns[col_name]] |
|
|
|
corr = scipy.signal.fftconvolve(a, np.flipud(b), 'valid')[0:valid] |
|
|
|
|
|
|
|
# Scale by the norm of the exemplar |
|
|
|
corr = corr / e.scale[columns[c]] |
|
|
|
corr = corr / e.scale[e.columns[col_name]] |
|
|
|
corrs.append(corr) |
|
|
|
|
|
|
|
# Find the peaks using the column with the largest amplitude |
|
|
@@ -151,7 +152,7 @@ def trainola_matcher(data, interval, args, insert_func, final_chunk): |
|
|
|
peaks = [ p[0] for p in peaks_minmax[1] ] |
|
|
|
|
|
|
|
# Now look at every peak |
|
|
|
for p in peaks: |
|
|
|
for row in peaks: |
|
|
|
# Correlation for each column must be close enough to 1. |
|
|
|
for (corr, scale) in zip(corrs, e.scale): |
|
|
|
# The accepted distance from 1 is based on the relative |
|
|
@@ -159,27 +160,29 @@ def trainola_matcher(data, interval, args, insert_func, final_chunk): |
|
|
|
# scale 1.0 -> distance 0.1 |
|
|
|
# scale 0.0 -> distance 1.0 |
|
|
|
distance = 1 - 0.9 * (scale / e.scale[biggest]) |
|
|
|
if abs(corr[p] - 1) > distance: |
|
|
|
if abs(corr[row] - 1) > distance: |
|
|
|
# No match |
|
|
|
break |
|
|
|
else: |
|
|
|
# Successful match |
|
|
|
matches.append((p, e_num)) |
|
|
|
|
|
|
|
# Print matches |
|
|
|
for (point, e_num) in sorted(matches): |
|
|
|
# Ignore matches that showed up at the very tail of the window, |
|
|
|
# and shorten the window accordingly. This is an attempt to avoid |
|
|
|
# problems at chunk boundaries. |
|
|
|
if point > (valid - 50) and not final_chunk: |
|
|
|
valid -= 50 |
|
|
|
break |
|
|
|
print "matched", data[point,0], "exemplar", exemplars[e_num].name |
|
|
|
|
|
|
|
#from matplotlib import pyplot as p |
|
|
|
#p.plot(data[:,1:3]) |
|
|
|
#p.show() |
|
|
|
matches[row].append(e) |
|
|
|
|
|
|
|
# Insert matches into destination stream. |
|
|
|
matched_rows = sorted(matches.keys()) |
|
|
|
out = np.zeros((len(matched_rows), dest_count + 1)) |
|
|
|
|
|
|
|
for n, row in enumerate(matched_rows): |
|
|
|
# Fill timestamp |
|
|
|
out[n][0] = data[row, 0] |
|
|
|
|
|
|
|
# Mark matched exemplars |
|
|
|
for exemplar in matches[row]: |
|
|
|
out[n, exemplar.dest_column + 1] = 1.0 |
|
|
|
|
|
|
|
# Insert it |
|
|
|
insert_func(out) |
|
|
|
|
|
|
|
# Return how many rows we processed |
|
|
|
return max(valid, 0) |
|
|
|
|
|
|
|
def trainola(conf): |
|
|
@@ -249,7 +252,7 @@ def trainola(conf): |
|
|
|
printf(" %s\n", interval.human_string()) |
|
|
|
nilmtools.filter.process_numpy_interval( |
|
|
|
interval, extractor, inserter, rows * 3, |
|
|
|
trainola_matcher, (src_columns, exemplars)) |
|
|
|
trainola_matcher, (src_columns, dest.layout_count, exemplars)) |
|
|
|
|
|
|
|
return "done" |
|
|
|
|
|
|
@@ -265,7 +268,7 @@ def main(argv = None): |
|
|
|
try: |
|
|
|
# Passed in a JSON string (e.g. on the command line) |
|
|
|
conf = json.loads(argv[0]) |
|
|
|
except TypeError: |
|
|
|
except TypeError as e: |
|
|
|
# Passed in the config dictionary (e.g. from NilmRun) |
|
|
|
conf = argv[0] |
|
|
|
|
|
|
|