Browse Source

Trainola inserts into the destination stream now

tags/nilmtools-1.3.1^0
Jim Paris 11 years ago
parent
commit
25c35a56f6
3 changed files with 64 additions and 34 deletions
  1. +4
    -8
      Makefile
  2. +31
    -0
      extras/trainola-test-param.js
  3. +29
    -26
      nilmtools/trainola.py

+ 4
- 8
Makefile View File

@@ -8,24 +8,21 @@ else
@echo "Try 'make install'"
endif

test:
-nilmtool -u http://bucket/nilmdb remove -s min -e max /sharon/prep-a-matches
make -C ../nilmrun
test: test_trainola

test_trainola:
@make install >/dev/null
nilmtools/trainola.py
-nilmtool -u http://bucket/nilmdb remove -s min -e max \
/sharon/prep-a-matches
nilmtools/trainola.py "$$(cat extras/trainola-test-param.js)"

test_cleanup:
nilmtools/cleanup.py -e extras/cleanup.cfg
nilmtools/cleanup.py extras/cleanup.cfg

test_insert:
@make install >/dev/null
nilmtools/insert.py --file --dry-run /test/foo </dev/null

test_copy:
@make install >/dev/null
nilmtools/copy_wildcard.py -U "http://nilmdb.com/bucket/" -D /lees*

/tmp/raw.dat:
@@ -35,7 +32,6 @@ test_copy:
--eval 'save("-ascii","/tmp/raw.dat","raw");'

test_prep: /tmp/raw.dat
@make install >/dev/null
-nilmtool destroy -R /test/raw
-nilmtool destroy -R /test/sinefit
-nilmtool destroy -R /test/prep


+ 31
- 0
extras/trainola-test-param.js View File

@@ -0,0 +1,31 @@
{ "url": "http://bucket.mit.edu/nilmdb",
"dest_stream": "/sharon/prep-a-matches",
"stream": "/sharon/prep-a",
"start": 1366111383280463,
"end": 1366126163457797,
"columns": [ { "name": "P1", "index": 0 },
{ "name": "Q1", "index": 1 },
{ "name": "P3", "index": 2 } ],
"exemplars": [
{ "name": "Boiler Pump ON",
"url": "http://bucket.mit.edu/nilmdb",
"stream": "/sharon/prep-a",
"start": 1366260494269078,
"end": 1366260608185031,
"dest_column": 0,
"columns": [ { "name": "P1", "index": 0 },
{ "name": "Q1", "index": 1 }
]
},
{ "name": "Boiler Pump OFF",
"url": "http://bucket.mit.edu/nilmdb",
"stream": "/sharon/prep-a",
"start": 1366260864215764,
"end": 1366260870882998,
"dest_column": 1,
"columns": [ { "name": "P1", "index": 0 },
{ "name": "Q1", "index": 1 }
]
}
]
}

+ 29
- 26
nilmtools/trainola.py View File

@@ -16,6 +16,7 @@ import nilmrun
from collections import OrderedDict
import sys
import functools
import collections

class DataError(ValueError):
pass
@@ -118,7 +119,7 @@ def peak_detect(data, delta):

def trainola_matcher(data, interval, args, insert_func, final_chunk):
"""Perform cross-correlation match"""
( columns, exemplars ) = args
( src_columns, dest_count, exemplars ) = args
nrows = data.shape[0]

# We want at least 10% more points than the widest exemplar.
@@ -129,20 +130,20 @@ def trainola_matcher(data, interval, args, insert_func, final_chunk):
# This is how many points we'll consider valid in the
# cross-correlation.
valid = nrows + 1 - widest
matches = []
matches = collections.defaultdict(list)

# Try matching against each of the exemplars
for e_num, e in enumerate(exemplars):
for e in exemplars:
corrs = []

# Compute cross-correlation for each column
for c in e.columns:
a = data[:,columns[c] + 1]
b = e.data[:,e.columns[c]]
for col_name in e.columns:
a = data[:, src_columns[col_name] + 1]
b = e.data[:, e.columns[col_name]]
corr = scipy.signal.fftconvolve(a, np.flipud(b), 'valid')[0:valid]

# Scale by the norm of the exemplar
corr = corr / e.scale[columns[c]]
corr = corr / e.scale[e.columns[col_name]]
corrs.append(corr)

# Find the peaks using the column with the largest amplitude
@@ -151,7 +152,7 @@ def trainola_matcher(data, interval, args, insert_func, final_chunk):
peaks = [ p[0] for p in peaks_minmax[1] ]

# Now look at every peak
for p in peaks:
for row in peaks:
# Correlation for each column must be close enough to 1.
for (corr, scale) in zip(corrs, e.scale):
# The accepted distance from 1 is based on the relative
@@ -159,27 +160,29 @@ def trainola_matcher(data, interval, args, insert_func, final_chunk):
# scale 1.0 -> distance 0.1
# scale 0.0 -> distance 1.0
distance = 1 - 0.9 * (scale / e.scale[biggest])
if abs(corr[p] - 1) > distance:
if abs(corr[row] - 1) > distance:
# No match
break
else:
# Successful match
matches.append((p, e_num))

# Print matches
for (point, e_num) in sorted(matches):
# Ignore matches that showed up at the very tail of the window,
# and shorten the window accordingly. This is an attempt to avoid
# problems at chunk boundaries.
if point > (valid - 50) and not final_chunk:
valid -= 50
break
print "matched", data[point,0], "exemplar", exemplars[e_num].name

#from matplotlib import pyplot as p
#p.plot(data[:,1:3])
#p.show()
matches[row].append(e)

# Insert matches into destination stream.
matched_rows = sorted(matches.keys())
out = np.zeros((len(matched_rows), dest_count + 1))

for n, row in enumerate(matched_rows):
# Fill timestamp
out[n][0] = data[row, 0]

# Mark matched exemplars
for exemplar in matches[row]:
out[n, exemplar.dest_column + 1] = 1.0

# Insert it
insert_func(out)

# Return how many rows we processed
return max(valid, 0)

def trainola(conf):
@@ -249,7 +252,7 @@ def trainola(conf):
printf(" %s\n", interval.human_string())
nilmtools.filter.process_numpy_interval(
interval, extractor, inserter, rows * 3,
trainola_matcher, (src_columns, exemplars))
trainola_matcher, (src_columns, dest.layout_count, exemplars))

return "done"

@@ -265,7 +268,7 @@ def main(argv = None):
try:
# Passed in a JSON string (e.g. on the command line)
conf = json.loads(argv[0])
except TypeError:
except TypeError as e:
# Passed in the config dictionary (e.g. from NilmRun)
conf = argv[0]



Loading…
Cancel
Save