Compare commits
2 Commits
bxinterval
...
nilmdb-ori
Author | SHA1 | Date | |
---|---|---|---|
e201f67684 | |||
44fce1ff33 |
@@ -1,9 +0,0 @@
|
||||
# -*- conf -*-
|
||||
|
||||
[run]
|
||||
# branch = True
|
||||
|
||||
[report]
|
||||
exclude_lines =
|
||||
pragma: no cover
|
||||
if 0:
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
.coverage
|
||||
*.pyc
|
22
Makefile
22
Makefile
@@ -1,20 +1,2 @@
|
||||
all: test
|
||||
|
||||
tool:
|
||||
python nilmtool.py --help
|
||||
python nilmtool.py list --help
|
||||
python nilmtool.py -u asfdadsf list
|
||||
|
||||
lint:
|
||||
pylint -f parseable nilmdb
|
||||
|
||||
test:
|
||||
nosetests
|
||||
|
||||
profile:
|
||||
nosetests --with-profile
|
||||
|
||||
clean::
|
||||
find . -name '*pyc' | xargs rm -f
|
||||
rm -f .coverage
|
||||
rm -rf tests/*testdb*
|
||||
all:
|
||||
nosetests nilmdb/test_interval.py
|
||||
|
@@ -1,2 +1,4 @@
|
||||
sudo apt-get install python-nose python-coverage
|
||||
sudo apt-get install python-tables cython python-cherrypy3
|
||||
To install,
|
||||
|
||||
python seutp.py install
|
||||
|
||||
|
5
TODO
5
TODO
@@ -1,5 +0,0 @@
|
||||
- Merge adjacent intervals on insert (maybe with client help?)
|
||||
|
||||
- Better testing:
|
||||
- see about getting coverage on layout.pyx
|
||||
- layout.pyx performance tests, before and after generalization
|
26
bin/nilm-test.py
Executable file
26
bin/nilm-test.py
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from nilmdb import Interval
|
||||
from optparse import OptionParser
|
||||
import sys
|
||||
|
||||
version = "1.0"
|
||||
|
||||
parser = OptionParser()
|
||||
parser.add_option("-d", "--db", dest="database", metavar="DATABASE",
|
||||
help="location of sqlite database")
|
||||
parser.add_option("-V", "--version", dest="version", default=False, action="store_true",
|
||||
help="print version then exit")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if (options.version):
|
||||
print "This script version: " + version
|
||||
sys.exit(0)
|
||||
|
||||
if options.database is None:
|
||||
print "Error: database is mandatory"
|
||||
sys.exit(1)
|
||||
|
||||
print "Database is " + options.database
|
||||
|
@@ -1,710 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
#
|
||||
# Disable the invalid name warning as we are inheriting from a standard library
|
||||
# object.
|
||||
# pylint: disable-msg=C6409,W0212
|
||||
|
||||
"""A version of the datetime module which *cares* about timezones.
|
||||
|
||||
This module will never return a naive datetime object. This requires the module
|
||||
know your local timezone, which it tries really hard to figure out.
|
||||
|
||||
You can override the detection by using the datetime.tzaware.defaulttz_set
|
||||
method. It the module is unable to figure out the timezone itself this method
|
||||
*must* be called before the normal module is imported. If done before importing
|
||||
it can also speed up the time taken to import as the defaulttz will no longer
|
||||
try and do the detection.
|
||||
"""
|
||||
|
||||
__author__ = "tansell@google.com (Tim Ansell)"
|
||||
|
||||
import calendar
|
||||
import datetime
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import time
|
||||
import warnings
|
||||
import dateutil.parser
|
||||
import dateutil.relativedelta
|
||||
import dateutil.tz
|
||||
import pytz
|
||||
import pytz_abbr
|
||||
|
||||
|
||||
try:
|
||||
# pylint: disable-msg=C6204
|
||||
import functools
|
||||
except ImportError, e:
|
||||
|
||||
class functools(object):
|
||||
"""Fake replacement for a full functools."""
|
||||
|
||||
# pylint: disable-msg=W0613
|
||||
@staticmethod
|
||||
def wraps(f, *args, **kw):
|
||||
return f
|
||||
|
||||
|
||||
# Need to patch pytz.utc to have a _utcoffset so you can normalize/localize
|
||||
# using it.
|
||||
pytz.utc._utcoffset = datetime.timedelta()
|
||||
|
||||
|
||||
timedelta = datetime.timedelta
|
||||
|
||||
|
||||
def _tzinfome(tzinfo):
|
||||
"""Gets a tzinfo object from a string.
|
||||
|
||||
Args:
|
||||
tzinfo: A string (or string like) object, or a datetime.tzinfo object.
|
||||
|
||||
Returns:
|
||||
An datetime.tzinfo object.
|
||||
|
||||
Raises:
|
||||
UnknownTimeZoneError: If the timezone given can't be decoded.
|
||||
"""
|
||||
if not isinstance(tzinfo, datetime.tzinfo):
|
||||
try:
|
||||
tzinfo = pytz.timezone(tzinfo)
|
||||
except AttributeError:
|
||||
raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo)
|
||||
return tzinfo
|
||||
|
||||
|
||||
# Our "local" timezone
|
||||
_localtz = None
|
||||
|
||||
|
||||
def localtz():
|
||||
"""Get the local timezone.
|
||||
|
||||
Returns:
|
||||
The localtime timezone as a tzinfo object.
|
||||
"""
|
||||
# pylint: disable-msg=W0603
|
||||
global _localtz
|
||||
if _localtz is None:
|
||||
_localtz = detect_timezone()
|
||||
return _localtz
|
||||
|
||||
|
||||
def localtz_set(timezone):
|
||||
"""Set the local timezone."""
|
||||
# pylint: disable-msg=W0603
|
||||
global _localtz
|
||||
_localtz = _tzinfome(timezone)
|
||||
|
||||
|
||||
def detect_timezone():
|
||||
"""Try and detect the timezone that Python is currently running in.
|
||||
|
||||
We have a bunch of different methods for trying to figure this out (listed in
|
||||
order they are attempted).
|
||||
* Try TZ environment variable.
|
||||
* Try and find /etc/timezone file (with timezone name).
|
||||
* Try and find /etc/localtime file (with timezone data).
|
||||
* Try and match a TZ to the current dst/offset/shortname.
|
||||
|
||||
Returns:
|
||||
The detected local timezone as a tzinfo object
|
||||
|
||||
Raises:
|
||||
pytz.UnknownTimeZoneError: If it was unable to detect a timezone.
|
||||
"""
|
||||
# First we try the TZ variable
|
||||
tz = _detect_timezone_environ()
|
||||
if tz is not None:
|
||||
return tz
|
||||
|
||||
# Second we try /etc/timezone and use the value in that
|
||||
tz = _detect_timezone_etc_timezone()
|
||||
if tz is not None:
|
||||
return tz
|
||||
|
||||
# Next we try and see if something matches the tzinfo in /etc/localtime
|
||||
tz = _detect_timezone_etc_localtime()
|
||||
if tz is not None:
|
||||
return tz
|
||||
|
||||
# Next we try and use a similiar method to what PHP does.
|
||||
# We first try to search on time.tzname, time.timezone, time.daylight to
|
||||
# match a pytz zone.
|
||||
warnings.warn("Had to fall back to worst detection method (the 'PHP' "
|
||||
"method).")
|
||||
|
||||
tz = _detect_timezone_php()
|
||||
if tz is not None:
|
||||
return tz
|
||||
|
||||
raise pytz.UnknownTimeZoneError("Unable to detect your timezone!")
|
||||
|
||||
|
||||
def _detect_timezone_environ():
|
||||
if "TZ" in os.environ:
|
||||
try:
|
||||
return pytz.timezone(os.environ["TZ"])
|
||||
except (IOError, pytz.UnknownTimeZoneError):
|
||||
warnings.warn("You provided a TZ environment value (%r) we did not "
|
||||
"understand!" % os.environ["TZ"])
|
||||
|
||||
|
||||
def _detect_timezone_etc_timezone():
|
||||
if os.path.exists("/etc/timezone"):
|
||||
try:
|
||||
tz = file("/etc/timezone").read().strip()
|
||||
try:
|
||||
return pytz.timezone(tz)
|
||||
except (IOError, pytz.UnknownTimeZoneError), ei:
|
||||
warnings.warn("Your /etc/timezone file references a timezone (%r) that"
|
||||
" is not valid (%r)." % (tz, ei))
|
||||
|
||||
# Problem reading the /etc/timezone file
|
||||
except IOError, eo:
|
||||
warnings.warn("Could not access your /etc/timezone file: %s" % eo)
|
||||
|
||||
|
||||
def _detect_timezone_etc_localtime():
|
||||
matches = []
|
||||
if os.path.exists("/etc/localtime"):
|
||||
localtime = pytz.tzfile.build_tzinfo("/etc/localtime",
|
||||
file("/etc/localtime"))
|
||||
|
||||
# See if we can find a "Human Name" for this..
|
||||
for tzname in pytz.all_timezones:
|
||||
tz = _tzinfome(tzname)
|
||||
|
||||
if dir(tz) != dir(localtime):
|
||||
continue
|
||||
|
||||
for attrib in dir(tz):
|
||||
# Ignore functions and specials
|
||||
if callable(getattr(tz, attrib)) or attrib.startswith("__"):
|
||||
continue
|
||||
|
||||
# This will always be different
|
||||
if attrib == "zone" or attrib == "_tzinfos":
|
||||
continue
|
||||
|
||||
if getattr(tz, attrib) != getattr(localtime, attrib):
|
||||
break
|
||||
|
||||
# We get here iff break didn't happen, i.e. no meaningful attributes
|
||||
# differ between tz and localtime
|
||||
else:
|
||||
matches.append(tzname)
|
||||
|
||||
if len(matches) == 1:
|
||||
return _tzinfome(matches[0])
|
||||
else:
|
||||
# Warn the person about this!
|
||||
warning = "Could not get a human name for your timezone: "
|
||||
if len(matches) > 1:
|
||||
warning += ("We detected multiple matches for your /etc/localtime. "
|
||||
"(Matches where %s)" % matches)
|
||||
return _tzinfome(matches[0])
|
||||
else:
|
||||
warning += "We detected no matches for your /etc/localtime."
|
||||
warnings.warn(warning)
|
||||
|
||||
# Register /etc/localtime as the timezone loaded.
|
||||
pytz._tzinfo_cache['/etc/localtime'] = localtime
|
||||
return localtime
|
||||
|
||||
|
||||
def _detect_timezone_php():
|
||||
tomatch = (time.tzname[0], time.timezone, time.daylight)
|
||||
now = datetime.datetime.now()
|
||||
|
||||
matches = []
|
||||
for tzname in pytz.all_timezones:
|
||||
try:
|
||||
tz = pytz.timezone(tzname)
|
||||
except IOError:
|
||||
continue
|
||||
|
||||
try:
|
||||
indst = tz.localize(now).timetuple()[-1]
|
||||
|
||||
if tomatch == (tz._tzname, -tz._utcoffset.seconds, indst):
|
||||
matches.append(tzname)
|
||||
|
||||
# pylint: disable-msg=W0704
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if len(matches) > 1:
|
||||
warnings.warn("We detected multiple matches for the timezone, choosing "
|
||||
"the first %s. (Matches where %s)" % (matches[0], matches))
|
||||
return pytz.timezone(matches[0])
|
||||
|
||||
|
||||
class datetime_tz(datetime.datetime):
|
||||
"""An extension of the inbuilt datetime adding more functionality.
|
||||
|
||||
The extra functionality includes:
|
||||
* Partial parsing support (IE 2006/02/30 matches %Y/%M/%D %H:%M)
|
||||
* Full integration with pytz (just give it the string of the timezone!)
|
||||
* Proper support for going to/from Unix timestamps (which are in UTC!).
|
||||
"""
|
||||
__slots__ = ["is_dst"]
|
||||
|
||||
def __new__(cls, *args, **kw):
|
||||
args = list(args)
|
||||
if not args:
|
||||
raise TypeError("Not enough arguments given.")
|
||||
|
||||
# See if we are given a tzinfo object...
|
||||
tzinfo = None
|
||||
if isinstance(args[-1], (datetime.tzinfo, basestring)):
|
||||
tzinfo = _tzinfome(args.pop(-1))
|
||||
elif kw.get("tzinfo", None) is not None:
|
||||
tzinfo = _tzinfome(kw.pop("tzinfo"))
|
||||
|
||||
# Create a datetime object if we don't have one
|
||||
if isinstance(args[0], datetime.datetime):
|
||||
# Convert the datetime instance to a datetime object.
|
||||
newargs = (list(args[0].timetuple()[0:6]) +
|
||||
[args[0].microsecond, args[0].tzinfo])
|
||||
dt = datetime.datetime(*newargs)
|
||||
|
||||
if tzinfo is None and dt.tzinfo is None:
|
||||
raise TypeError("Must specify a timezone!")
|
||||
|
||||
if tzinfo is not None and dt.tzinfo is not None:
|
||||
raise TypeError("Can not give a timezone with timezone aware"
|
||||
" datetime object! (Use localize.)")
|
||||
else:
|
||||
dt = datetime.datetime(*args, **kw)
|
||||
|
||||
if dt.tzinfo is not None:
|
||||
# Re-normalize the dt object
|
||||
dt = dt.tzinfo.normalize(dt)
|
||||
|
||||
else:
|
||||
if tzinfo is None:
|
||||
tzinfo = localtz()
|
||||
|
||||
try:
|
||||
dt = tzinfo.localize(dt, is_dst=None)
|
||||
except pytz.AmbiguousTimeError:
|
||||
is_dst = None
|
||||
if "is_dst" in kw:
|
||||
is_dst = kw.pop("is_dst")
|
||||
|
||||
try:
|
||||
dt = tzinfo.localize(dt, is_dst)
|
||||
except IndexError:
|
||||
raise pytz.AmbiguousTimeError("No such time exists!")
|
||||
|
||||
newargs = list(dt.timetuple()[0:6])+[dt.microsecond, dt.tzinfo]
|
||||
obj = datetime.datetime.__new__(cls, *newargs)
|
||||
obj.is_dst = obj.dst() != datetime.timedelta(0)
|
||||
return obj
|
||||
|
||||
def asdatetime(self, naive=True):
|
||||
"""Return this datetime_tz as a datetime object.
|
||||
|
||||
Args:
|
||||
naive: Return *without* any tz info.
|
||||
|
||||
Returns:
|
||||
This datetime_tz as a datetime object.
|
||||
"""
|
||||
args = list(self.timetuple()[0:6])+[self.microsecond]
|
||||
if not naive:
|
||||
args.append(self.tzinfo)
|
||||
return datetime.datetime(*args)
|
||||
|
||||
def asdate(self):
|
||||
"""Return this datetime_tz as a date object.
|
||||
|
||||
Returns:
|
||||
This datetime_tz as a date object.
|
||||
"""
|
||||
return datetime.date(self.year, self.month, self.day)
|
||||
|
||||
def totimestamp(self):
|
||||
"""Convert this datetime object back to a unix timestamp.
|
||||
|
||||
The Unix epoch is the time 00:00:00 UTC on January 1, 1970.
|
||||
|
||||
Returns:
|
||||
Unix timestamp.
|
||||
"""
|
||||
return calendar.timegm(self.utctimetuple())+1e-6*self.microsecond
|
||||
|
||||
def astimezone(self, tzinfo):
|
||||
"""Returns a version of this timestamp converted to the given timezone.
|
||||
|
||||
Args:
|
||||
tzinfo: Either a datetime.tzinfo object or a string (which will be looked
|
||||
up in pytz.
|
||||
|
||||
Returns:
|
||||
A datetime_tz object in the given timezone.
|
||||
"""
|
||||
# Assert we are not a naive datetime object
|
||||
assert self.tzinfo is not None
|
||||
|
||||
tzinfo = _tzinfome(tzinfo)
|
||||
|
||||
d = self.asdatetime(naive=False).astimezone(tzinfo)
|
||||
return datetime_tz(d)
|
||||
|
||||
# pylint: disable-msg=C6113
|
||||
def replace(self, **kw):
|
||||
"""Return datetime with new specified fields given as arguments.
|
||||
|
||||
For example, dt.replace(days=4) would return a new datetime_tz object with
|
||||
exactly the same as dt but with the days attribute equal to 4.
|
||||
|
||||
Any attribute can be replaced, but tzinfo can not be set to None.
|
||||
|
||||
Args:
|
||||
Any datetime_tz attribute.
|
||||
|
||||
Returns:
|
||||
A datetime_tz object with the attributes replaced.
|
||||
|
||||
Raises:
|
||||
TypeError: If the given replacement is invalid.
|
||||
"""
|
||||
if "tzinfo" in kw:
|
||||
if kw["tzinfo"] is None:
|
||||
raise TypeError("Can not remove the timezone use asdatetime()")
|
||||
|
||||
is_dst = None
|
||||
if "is_dst" in kw:
|
||||
is_dst = kw["is_dst"]
|
||||
del kw["is_dst"]
|
||||
else:
|
||||
# Use our own DST setting..
|
||||
is_dst = self.is_dst
|
||||
|
||||
replaced = self.asdatetime().replace(**kw)
|
||||
|
||||
return datetime_tz(replaced, tzinfo=self.tzinfo.zone, is_dst=is_dst)
|
||||
|
||||
# pylint: disable-msg=C6310
|
||||
@classmethod
|
||||
def smartparse(cls, toparse, tzinfo=None):
|
||||
"""Method which uses dateutil.parse and extras to try and parse the string.
|
||||
|
||||
Valid dates are found at:
|
||||
http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2
|
||||
|
||||
Other valid formats include:
|
||||
"now" or "today"
|
||||
"yesterday"
|
||||
"tommorrow"
|
||||
"5 minutes ago"
|
||||
"10 hours ago"
|
||||
"10h5m ago"
|
||||
"start of yesterday"
|
||||
"end of tommorrow"
|
||||
"end of 3rd of March"
|
||||
|
||||
Args:
|
||||
toparse: The string to parse.
|
||||
tzinfo: Timezone for the resultant datetime_tz object should be in.
|
||||
(Defaults to your local timezone.)
|
||||
|
||||
Returns:
|
||||
New datetime_tz object.
|
||||
|
||||
Raises:
|
||||
ValueError: If unable to make sense of the input.
|
||||
"""
|
||||
# Default for empty fields are:
|
||||
# year/month/day == now
|
||||
# hour/minute/second/microsecond == 0
|
||||
toparse = toparse.strip()
|
||||
|
||||
if tzinfo is None:
|
||||
dt = cls.now()
|
||||
else:
|
||||
dt = cls.now(tzinfo)
|
||||
|
||||
default = dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
|
||||
# Remove "start of " and "end of " prefix in the string
|
||||
if toparse.lower().startswith("end of "):
|
||||
toparse = toparse[7:].strip()
|
||||
|
||||
dt += datetime.timedelta(days=1)
|
||||
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
dt -= datetime.timedelta(microseconds=1)
|
||||
|
||||
default = dt
|
||||
|
||||
elif toparse.lower().startswith("start of "):
|
||||
toparse = toparse[9:].strip()
|
||||
|
||||
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
default = dt
|
||||
|
||||
# Handle strings with "now", "today", "yesterday", "tomorrow" and "ago".
|
||||
# Need to use lowercase
|
||||
toparselower = toparse.lower()
|
||||
|
||||
if toparselower in ["now", "today"]:
|
||||
pass
|
||||
|
||||
elif toparselower == "yesterday":
|
||||
dt -= datetime.timedelta(days=1)
|
||||
|
||||
elif toparselower == "tommorrow":
|
||||
dt += datetime.timedelta(days=1)
|
||||
|
||||
elif "ago" in toparselower:
|
||||
# Remove the "ago" bit
|
||||
toparselower = toparselower[:-3]
|
||||
# Replace all "a day and an hour" with "1 day 1 hour"
|
||||
toparselower = toparselower.replace("a ", "1 ")
|
||||
toparselower = toparselower.replace("an ", "1 ")
|
||||
toparselower = toparselower.replace(" and ", " ")
|
||||
|
||||
# Match the following
|
||||
# 1 hour ago
|
||||
# 1h ago
|
||||
# 1 h ago
|
||||
# 1 hour ago
|
||||
# 2 hours ago
|
||||
# Same with minutes, seconds, etc.
|
||||
|
||||
tocheck = ("seconds", "minutes", "hours", "days", "weeks", "months",
|
||||
"years")
|
||||
result = {}
|
||||
for match in re.finditer("([0-9]+)([^0-9]*)", toparselower):
|
||||
amount = int(match.group(1))
|
||||
unit = match.group(2).strip()
|
||||
|
||||
for bit in tocheck:
|
||||
regex = "^([%s]|((%s)s?))$" % (
|
||||
bit[0], bit[:-1])
|
||||
|
||||
bitmatch = re.search(regex, unit)
|
||||
if bitmatch:
|
||||
result[bit] = amount
|
||||
break
|
||||
else:
|
||||
raise ValueError("Was not able to parse date unit %r!" % unit)
|
||||
|
||||
delta = dateutil.relativedelta.relativedelta(**result)
|
||||
dt -= delta
|
||||
|
||||
else:
|
||||
# Handle strings with normal datetime format, use original case.
|
||||
dt = dateutil.parser.parse(toparse, default=default.asdatetime(),
|
||||
tzinfos=pytz_abbr.tzinfos)
|
||||
if dt is None:
|
||||
raise ValueError("Was not able to parse date!")
|
||||
|
||||
if dt.tzinfo is pytz_abbr.unknown:
|
||||
dt = dt.replace(tzinfo=None)
|
||||
|
||||
if dt.tzinfo is None:
|
||||
if tzinfo is None:
|
||||
tzinfo = localtz()
|
||||
dt = cls(dt, tzinfo)
|
||||
else:
|
||||
if isinstance(dt.tzinfo, pytz_abbr.tzabbr):
|
||||
abbr = dt.tzinfo
|
||||
dt = dt.replace(tzinfo=None)
|
||||
dt = cls(dt, abbr.zone, is_dst=abbr.dst)
|
||||
|
||||
dt = cls(dt)
|
||||
|
||||
return dt
|
||||
|
||||
@classmethod
|
||||
def utcfromtimestamp(cls, timestamp):
|
||||
"""Returns a datetime object of a given timestamp (in UTC)."""
|
||||
obj = datetime.datetime.utcfromtimestamp(timestamp)
|
||||
obj = pytz.utc.localize(obj)
|
||||
return cls(obj)
|
||||
|
||||
@classmethod
|
||||
def fromtimestamp(cls, timestamp):
|
||||
"""Returns a datetime object of a given timestamp (in local tz)."""
|
||||
d = cls.utcfromtimestamp(timestamp)
|
||||
return d.astimezone(localtz())
|
||||
|
||||
@classmethod
|
||||
def utcnow(cls):
|
||||
"""Return a new datetime representing UTC day and time."""
|
||||
obj = datetime.datetime.utcnow()
|
||||
obj = cls(obj, tzinfo=pytz.utc)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def now(cls, tzinfo=None):
|
||||
"""[tz] -> new datetime with tz's local day and time."""
|
||||
obj = cls.utcnow()
|
||||
if tzinfo is None:
|
||||
tzinfo = localtz()
|
||||
return obj.astimezone(tzinfo)
|
||||
|
||||
today = now
|
||||
|
||||
@staticmethod
|
||||
def fromordinal(ordinal):
|
||||
raise SyntaxError("Not enough information to create a datetime_tz object "
|
||||
"from an ordinal. Please use datetime.date.fromordinal")
|
||||
|
||||
|
||||
class iterate(object):
|
||||
"""Helpful iterators for working with datetime_tz objects."""
|
||||
|
||||
@staticmethod
|
||||
def between(start, delta, end=None):
|
||||
"""Return an iterator between this date till given end point.
|
||||
|
||||
Example usage:
|
||||
>>> d = datetime_tz.smartparse("5 days ago")
|
||||
2008/05/12 11:45
|
||||
>>> for i in d.between(timedelta(days=1), datetime_tz.now()):
|
||||
>>> print i
|
||||
2008/05/12 11:45
|
||||
2008/05/13 11:45
|
||||
2008/05/14 11:45
|
||||
2008/05/15 11:45
|
||||
2008/05/16 11:45
|
||||
|
||||
Args:
|
||||
start: The date to start at.
|
||||
delta: The interval to iterate with.
|
||||
end: (Optional) Date to end at. If not given the iterator will never
|
||||
terminate.
|
||||
|
||||
Yields:
|
||||
datetime_tz objects.
|
||||
"""
|
||||
toyield = start
|
||||
while end is None or toyield < end:
|
||||
yield toyield
|
||||
toyield += delta
|
||||
|
||||
@staticmethod
|
||||
def weeks(start, end=None):
|
||||
"""Iterate over the weeks between the given datetime_tzs.
|
||||
|
||||
Args:
|
||||
start: datetime_tz to start from.
|
||||
end: (Optional) Date to end at, if not given the iterator will never
|
||||
terminate.
|
||||
|
||||
Returns:
|
||||
An iterator which generates datetime_tz objects a week apart.
|
||||
"""
|
||||
return iterate.between(start, datetime.timedelta(days=7), end)
|
||||
|
||||
@staticmethod
|
||||
def days(start, end=None):
|
||||
"""Iterate over the days between the given datetime_tzs.
|
||||
|
||||
Args:
|
||||
start: datetime_tz to start from.
|
||||
end: (Optional) Date to end at, if not given the iterator will never
|
||||
terminate.
|
||||
|
||||
Returns:
|
||||
An iterator which generates datetime_tz objects a day apart.
|
||||
"""
|
||||
return iterate.between(start, datetime.timedelta(days=1), end)
|
||||
|
||||
@staticmethod
|
||||
def hours(start, end=None):
|
||||
"""Iterate over the hours between the given datetime_tzs.
|
||||
|
||||
Args:
|
||||
start: datetime_tz to start from.
|
||||
end: (Optional) Date to end at, if not given the iterator will never
|
||||
terminate.
|
||||
|
||||
Returns:
|
||||
An iterator which generates datetime_tz objects a hour apart.
|
||||
"""
|
||||
return iterate.between(start, datetime.timedelta(hours=1), end)
|
||||
|
||||
@staticmethod
|
||||
def minutes(start, end=None):
|
||||
"""Iterate over the minutes between the given datetime_tzs.
|
||||
|
||||
Args:
|
||||
start: datetime_tz to start from.
|
||||
end: (Optional) Date to end at, if not given the iterator will never
|
||||
terminate.
|
||||
|
||||
Returns:
|
||||
An iterator which generates datetime_tz objects a minute apart.
|
||||
"""
|
||||
return iterate.between(start, datetime.timedelta(minutes=1), end)
|
||||
|
||||
@staticmethod
|
||||
def seconds(start, end=None):
|
||||
"""Iterate over the seconds between the given datetime_tzs.
|
||||
|
||||
Args:
|
||||
start: datetime_tz to start from.
|
||||
end: (Optional) Date to end at, if not given the iterator will never
|
||||
terminate.
|
||||
|
||||
Returns:
|
||||
An iterator which generates datetime_tz objects a second apart.
|
||||
"""
|
||||
return iterate.between(start, datetime.timedelta(minutes=1), end)
|
||||
|
||||
|
||||
def _wrap_method(name):
|
||||
"""Wrap a method.
|
||||
|
||||
Patch a method which might return a datetime.datetime to return a
|
||||
datetime_tz.datetime_tz instead.
|
||||
|
||||
Args:
|
||||
name: The name of the method to patch
|
||||
"""
|
||||
method = getattr(datetime.datetime, name)
|
||||
|
||||
# Have to give the second argument as method has no __module__ option.
|
||||
@functools.wraps(method, ("__name__", "__doc__"), ())
|
||||
def wrapper(*args, **kw):
|
||||
r = method(*args, **kw)
|
||||
|
||||
if isinstance(r, datetime.datetime) and not isinstance(r, datetime_tz):
|
||||
r = datetime_tz(r)
|
||||
return r
|
||||
|
||||
setattr(datetime_tz, name, wrapper)
|
||||
|
||||
for methodname in ["__add__", "__radd__", "__rsub__", "__sub__", "combine"]:
|
||||
|
||||
# Make sure we have not already got an override for this method
|
||||
assert methodname not in datetime_tz.__dict__
|
||||
|
||||
_wrap_method(methodname)
|
||||
|
||||
|
||||
__all__ = ['datetime_tz', 'detect_timezone', 'iterate', 'localtz',
|
||||
'localtz_set', 'timedelta', '_detect_timezone_environ',
|
||||
'_detect_timezone_etc_localtime', '_detect_timezone_etc_timezone',
|
||||
'_detect_timezone_php']
|
@@ -1,230 +0,0 @@
|
||||
#!/usr/bin/python2.4
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2010 Google Inc. All Rights Reserved.
|
||||
#
|
||||
|
||||
"""
|
||||
Common time zone acronyms/abbreviations for use with the datetime_tz module.
|
||||
|
||||
*WARNING*: There are lots of caveats when using this module which are listed
|
||||
below.
|
||||
|
||||
CAVEAT 1: The acronyms/abbreviations are not globally unique, they are not even
|
||||
unique within a region. For example, EST can mean any of,
|
||||
Eastern Standard Time in Australia (which is 10 hour ahead of UTC)
|
||||
Eastern Standard Time in North America (which is 5 hours behind UTC)
|
||||
|
||||
Where there are two abbreviations the more popular one will appear in the all
|
||||
dictionary, while the less common one will only appear in that countries region
|
||||
dictionary. IE If using all, EST will be mapped to Eastern Standard Time in
|
||||
North America.
|
||||
|
||||
CAVEAT 2: Many of the acronyms don't map to a neat Oslon timezones. For example,
|
||||
Eastern European Summer Time (EEDT) is used by many different countries in
|
||||
Europe *at different times*! If the acronym does not map neatly to one zone it
|
||||
is mapped to the Etc/GMT+-XX Oslon zone. This means that any date manipulations
|
||||
can end up with idiot things like summer time in the middle of winter.
|
||||
|
||||
CAVEAT 3: The Summer/Standard time difference is really important! For an hour
|
||||
each year it is needed to determine which time you are actually talking about.
|
||||
2002-10-27 01:20:00 EST != 2002-10-27 01:20:00 EDT
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import pytz
|
||||
import pytz.tzfile
|
||||
|
||||
|
||||
class tzabbr(datetime.tzinfo):
|
||||
"""A timezone abbreviation.
|
||||
|
||||
*WARNING*: This is not a tzinfo implementation! Trying to use this as tzinfo
|
||||
object will result in failure. We inherit from datetime.tzinfo so we can get
|
||||
through the dateutil checks.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# A "marker" tzinfo object which is used to signify an unknown timezone.
|
||||
unknown = datetime.tzinfo(0)
|
||||
|
||||
|
||||
regions = {'all': {}, 'military': {}}
|
||||
# Create a special alias for the all and military regions
|
||||
all = regions['all']
|
||||
military = regions['military']
|
||||
|
||||
|
||||
def tzabbr_register(abbr, name, region, zone, dst):
|
||||
"""Register a new timezone abbreviation in the global registry.
|
||||
|
||||
If another abbreviation with the same name has already been registered it new
|
||||
abbreviation will only be registered in region specific dictionary.
|
||||
"""
|
||||
newabbr = tzabbr()
|
||||
newabbr.abbr = abbr
|
||||
newabbr.name = name
|
||||
newabbr.region = region
|
||||
newabbr.zone = zone
|
||||
newabbr.dst = dst
|
||||
|
||||
if abbr not in all:
|
||||
all[abbr] = newabbr
|
||||
|
||||
if not region in regions:
|
||||
regions[region] = {}
|
||||
|
||||
assert abbr not in regions[region]
|
||||
regions[region][abbr] = newabbr
|
||||
|
||||
|
||||
def tzinfos_create(use_region):
|
||||
abbrs = regions[use_region]
|
||||
|
||||
def tzinfos(abbr, offset):
|
||||
if abbr:
|
||||
if abbr in abbrs:
|
||||
result = abbrs[abbr]
|
||||
if offset:
|
||||
# FIXME: Check the offset matches the abbreviation we just selected.
|
||||
pass
|
||||
return result
|
||||
else:
|
||||
raise ValueError, "Unknown timezone found %s" % abbr
|
||||
if offset == 0:
|
||||
return pytz.utc
|
||||
if offset:
|
||||
return pytz.FixedOffset(offset/60)
|
||||
return unknown
|
||||
|
||||
return tzinfos
|
||||
|
||||
|
||||
# Create a special alias for the all tzinfos
|
||||
tzinfos = tzinfos_create('all')
|
||||
|
||||
|
||||
# Create the abbreviations.
|
||||
# *WARNING*: Order matters!
|
||||
tzabbr_register("A", u"Alpha Time Zone", u"Military", "Etc/GMT-1", False)
|
||||
tzabbr_register("ACDT", u"Australian Central Daylight Time", u"Australia",
|
||||
"Australia/Adelaide", True)
|
||||
tzabbr_register("ACST", u"Australian Central Standard Time", u"Australia",
|
||||
"Australia/Adelaide", False)
|
||||
tzabbr_register("ADT", u"Atlantic Daylight Time", u"North America",
|
||||
"America/Halifax", True)
|
||||
tzabbr_register("AEDT", u"Australian Eastern Daylight Time", u"Australia",
|
||||
"Australia/Sydney", True)
|
||||
tzabbr_register("AEST", u"Australian Eastern Standard Time", u"Australia",
|
||||
"Australia/Sydney", False)
|
||||
tzabbr_register("AKDT", u"Alaska Daylight Time", u"North America",
|
||||
"US/Alaska", True)
|
||||
tzabbr_register("AKST", u"Alaska Standard Time", u"North America",
|
||||
"US/Alaska", False)
|
||||
tzabbr_register("AST", u"Atlantic Standard Time", u"North America",
|
||||
"America/Halifax", False)
|
||||
tzabbr_register("AWDT", u"Australian Western Daylight Time", u"Australia",
|
||||
"Australia/West", True)
|
||||
tzabbr_register("AWST", u"Australian Western Standard Time", u"Australia",
|
||||
"Australia/West", False)
|
||||
tzabbr_register("B", u"Bravo Time Zone", u"Military", "Etc/GMT-2", False)
|
||||
tzabbr_register("BST", u"British Summer Time", u"Europe", "Europe/London", True)
|
||||
tzabbr_register("C", u"Charlie Time Zone", u"Military", "Etc/GMT-2", False)
|
||||
tzabbr_register("CDT", u"Central Daylight Time", u"North America",
|
||||
"US/Central", True)
|
||||
tzabbr_register("CEDT", u"Central European Daylight Time", u"Europe",
|
||||
"Etc/GMT+2", True)
|
||||
tzabbr_register("CEST", u"Central European Summer Time", u"Europe",
|
||||
"Etc/GMT+2", True)
|
||||
tzabbr_register("CET", u"Central European Time", u"Europe", "Etc/GMT+1", False)
|
||||
tzabbr_register("CST", u"Central Standard Time", u"North America",
|
||||
"US/Central", False)
|
||||
tzabbr_register("CXT", u"Christmas Island Time", u"Australia",
|
||||
"Indian/Christmas", False)
|
||||
tzabbr_register("D", u"Delta Time Zone", u"Military", "Etc/GMT-2", False)
|
||||
tzabbr_register("E", u"Echo Time Zone", u"Military", "Etc/GMT-2", False)
|
||||
tzabbr_register("EDT", u"Eastern Daylight Time", u"North America",
|
||||
"US/Eastern", True)
|
||||
tzabbr_register("EEDT", u"Eastern European Daylight Time", u"Europe",
|
||||
"Etc/GMT+3", True)
|
||||
tzabbr_register("EEST", u"Eastern European Summer Time", u"Europe",
|
||||
"Etc/GMT+3", True)
|
||||
tzabbr_register("EET", u"Eastern European Time", u"Europe", "Etc/GMT+2", False)
|
||||
tzabbr_register("EST", u"Eastern Standard Time", u"North America",
|
||||
"US/Eastern", False)
|
||||
tzabbr_register("F", u"Foxtrot Time Zone", u"Military", "Etc/GMT-6", False)
|
||||
tzabbr_register("G", u"Golf Time Zone", u"Military", "Etc/GMT-7", False)
|
||||
tzabbr_register("GMT", u"Greenwich Mean Time", u"Europe", pytz.utc, False)
|
||||
tzabbr_register("H", u"Hotel Time Zone", u"Military", "Etc/GMT-8", False)
|
||||
#tzabbr_register("HAA", u"Heure Avancée de l'Atlantique", u"North America", u"UTC - 3 hours")
|
||||
#tzabbr_register("HAC", u"Heure Avancée du Centre", u"North America", u"UTC - 5 hours")
|
||||
tzabbr_register("HADT", u"Hawaii-Aleutian Daylight Time", u"North America",
|
||||
"Pacific/Honolulu", True)
|
||||
#tzabbr_register("HAE", u"Heure Avancée de l'Est", u"North America", u"UTC - 4 hours")
|
||||
#tzabbr_register("HAP", u"Heure Avancée du Pacifique", u"North America", u"UTC - 7 hours")
|
||||
#tzabbr_register("HAR", u"Heure Avancée des Rocheuses", u"North America", u"UTC - 6 hours")
|
||||
tzabbr_register("HAST", u"Hawaii-Aleutian Standard Time", u"North America",
|
||||
"Pacific/Honolulu", False)
|
||||
#tzabbr_register("HAT", u"Heure Avancée de Terre-Neuve", u"North America", u"UTC - 2:30 hours")
|
||||
#tzabbr_register("HAY", u"Heure Avancée du Yukon", u"North America", u"UTC - 8 hours")
|
||||
tzabbr_register("HDT", u"Hawaii Daylight Time", u"North America",
|
||||
"Pacific/Honolulu", True)
|
||||
#tzabbr_register("HNA", u"Heure Normale de l'Atlantique", u"North America", u"UTC - 4 hours")
|
||||
#tzabbr_register("HNC", u"Heure Normale du Centre", u"North America", u"UTC - 6 hours")
|
||||
#tzabbr_register("HNE", u"Heure Normale de l'Est", u"North America", u"UTC - 5 hours")
|
||||
#tzabbr_register("HNP", u"Heure Normale du Pacifique", u"North America", u"UTC - 8 hours")
|
||||
#tzabbr_register("HNR", u"Heure Normale des Rocheuses", u"North America", u"UTC - 7 hours")
|
||||
#tzabbr_register("HNT", u"Heure Normale de Terre-Neuve", u"North America", u"UTC - 3:30 hours")
|
||||
#tzabbr_register("HNY", u"Heure Normale du Yukon", u"North America", u"UTC - 9 hours")
|
||||
tzabbr_register("HST", u"Hawaii Standard Time", u"North America",
|
||||
"Pacific/Honolulu", False)
|
||||
tzabbr_register("I", u"India Time Zone", u"Military", "Etc/GMT-9", False)
|
||||
tzabbr_register("IST", u"Irish Summer Time", u"Europe", "Europe/Dublin", True)
|
||||
tzabbr_register("K", u"Kilo Time Zone", u"Military", "Etc/GMT-10", False)
|
||||
tzabbr_register("L", u"Lima Time Zone", u"Military", "Etc/GMT-11", False)
|
||||
tzabbr_register("M", u"Mike Time Zone", u"Military", "Etc/GMT-12", False)
|
||||
tzabbr_register("MDT", u"Mountain Daylight Time", u"North America",
|
||||
"US/Mountain", True)
|
||||
#tzabbr_register("MESZ", u"Mitteleuroäische Sommerzeit", u"Europe", u"UTC + 2 hours")
|
||||
#tzabbr_register("MEZ", u"Mitteleuropäische Zeit", u"Europe", u"UTC + 1 hour")
|
||||
tzabbr_register("MSD", u"Moscow Daylight Time", u"Europe",
|
||||
"Europe/Moscow", True)
|
||||
tzabbr_register("MSK", u"Moscow Standard Time", u"Europe",
|
||||
"Europe/Moscow", False)
|
||||
tzabbr_register("MST", u"Mountain Standard Time", u"North America",
|
||||
"US/Mountain", False)
|
||||
tzabbr_register("N", u"November Time Zone", u"Military", "Etc/GMT+1", False)
|
||||
tzabbr_register("NDT", u"Newfoundland Daylight Time", u"North America",
|
||||
"America/St_Johns", True)
|
||||
tzabbr_register("NFT", u"Norfolk (Island) Time", u"Australia",
|
||||
"Pacific/Norfolk", False)
|
||||
tzabbr_register("NST", u"Newfoundland Standard Time", u"North America",
|
||||
"America/St_Johns", False)
|
||||
tzabbr_register("O", u"Oscar Time Zone", u"Military", "Etc/GMT+2", False)
|
||||
tzabbr_register("P", u"Papa Time Zone", u"Military", "Etc/GMT+3", False)
|
||||
tzabbr_register("PDT", u"Pacific Daylight Time", u"North America",
|
||||
"US/Pacific", True)
|
||||
tzabbr_register("PST", u"Pacific Standard Time", u"North America",
|
||||
"US/Pacific", False)
|
||||
tzabbr_register("Q", u"Quebec Time Zone", u"Military", "Etc/GMT+4", False)
|
||||
tzabbr_register("R", u"Romeo Time Zone", u"Military", "Etc/GMT+5", False)
|
||||
tzabbr_register("S", u"Sierra Time Zone", u"Military", "Etc/GMT+6", False)
|
||||
tzabbr_register("T", u"Tango Time Zone", u"Military", "Etc/GMT+7", False)
|
||||
tzabbr_register("U", u"Uniform Time Zone", u"Military", "Etc/GMT+8", False)
|
||||
tzabbr_register("UTC", u"Coordinated Universal Time", u"Europe",
|
||||
pytz.utc, False)
|
||||
tzabbr_register("V", u"Victor Time Zone", u"Military", "Etc/GMT+9", False)
|
||||
tzabbr_register("W", u"Whiskey Time Zone", u"Military", "Etc/GMT+10", False)
|
||||
tzabbr_register("WDT", u"Western Daylight Time", u"Australia",
|
||||
"Australia/West", True)
|
||||
tzabbr_register("WEDT", u"Western European Daylight Time", u"Europe",
|
||||
"Etc/GMT+1", True)
|
||||
tzabbr_register("WEST", u"Western European Summer Time", u"Europe",
|
||||
"Etc/GMT+1", True)
|
||||
tzabbr_register("WET", u"Western European Time", u"Europe", pytz.utc, False)
|
||||
tzabbr_register("WST", u"Western Standard Time", u"Australia",
|
||||
"Australia/West", False)
|
||||
tzabbr_register("X", u"X-ray Time Zone", u"Military", "Etc/GMT+11", False)
|
||||
tzabbr_register("Y", u"Yankee Time Zone", u"Military", "Etc/GMT+12", False)
|
||||
tzabbr_register("Z", u"Zulu Time Zone", u"Military", pytz.utc, False)
|
181
design.md
181
design.md
@@ -1,181 +0,0 @@
|
||||
Structure
|
||||
---------
|
||||
nilmdb.nilmdb is the NILM database interface. It tracks a PyTables
|
||||
database holds actual rows of data, and a SQL database tracks metadata
|
||||
and ranges.
|
||||
|
||||
Access to the nilmdb must be single-threaded. This is handled with
|
||||
the nilmdb.serializer class.
|
||||
|
||||
nilmdb.server is a HTTP server that provides an interface to talk,
|
||||
thorugh the serialization layer, to the nilmdb object.
|
||||
|
||||
nilmdb.client is a HTTP client that connects to this.
|
||||
|
||||
Sqlite performance
|
||||
------------------
|
||||
|
||||
Committing a transaction in the default sync mode (PRAGMA synchronous=FULL)
|
||||
takes about 125msec. sqlite3 will commit transactions at 3 times:
|
||||
|
||||
1: explicit con.commit()
|
||||
|
||||
2: between a series of DML commands and non-DML commands, e.g.
|
||||
after a series of INSERT, SELECT, but before a CREATE TABLE or
|
||||
PRAGMA.
|
||||
|
||||
3: at the end of an explicit transaction, e.g. "with self.con as con:"
|
||||
|
||||
To speed up testing, or if this transaction speed becomes an issue,
|
||||
the sync=False option to NilmDB will set PRAGMA synchronous=OFF.
|
||||
|
||||
|
||||
Inserting streams
|
||||
-----------------
|
||||
|
||||
We need to send the contents of "data" as POST. Do we need chunked
|
||||
transfer?
|
||||
|
||||
- Don't know the size in advance, so we would need to use chunked if
|
||||
we send the entire thing in one request.
|
||||
- But we shouldn't send one chunk per line, so we need to buffer some
|
||||
anyway; why not just make new requests?
|
||||
- Consider the infinite-streaming case, we might want to send it
|
||||
immediately? Not really -- server still should do explicit inserts
|
||||
of fixed-size chunks.
|
||||
- Even chunked encoding needs the size of each chunk beforehand, so
|
||||
everything still gets buffered. Just a tradeoff of buffer size.
|
||||
|
||||
Before timestamps are added:
|
||||
- Raw data is about 440 kB/s (9 channels)
|
||||
- Prep data is about 12.5 kB/s (1 phase)
|
||||
- How do we know how much data to send?
|
||||
|
||||
- Remember that we can only do maybe 8-50 transactions per second on
|
||||
the sqlite database. So if one block of inserted data is one
|
||||
transaction, we'd need the raw case to be around 64kB per request,
|
||||
ideally more.
|
||||
- Maybe use a range, based on how long it's taking to read the data
|
||||
- If no more data, send it
|
||||
- If data > 1 MB, send it
|
||||
- If more than 10 seconds have elapsed, send it
|
||||
- Should those numbers come from the server?
|
||||
|
||||
Converting from ASCII to PyTables:
|
||||
- For each row getting added, we need to set attributes on a PyTables
|
||||
Row object and call table.append(). This means that there isn't a
|
||||
particularly efficient way of converting from ascii.
|
||||
- Could create a function like nilmdb.layout.Layout("foo".fillRow(asciiline)
|
||||
- But this means we're doing parsing on the serialized side
|
||||
- Let's keep parsing on the threaded server side so we can detect
|
||||
errors better, and not block the serialized nilmdb for a slow
|
||||
parsing process.
|
||||
- Client sends ASCII data
|
||||
- Server converts this ACSII data to a list of values
|
||||
- Maybe:
|
||||
|
||||
# threaded side creates this object
|
||||
parser = nilmdb.layout.Parser("layout_name")
|
||||
# threaded side parses and fills it with data
|
||||
parser.parse(textdata)
|
||||
# serialized side pulls out rows
|
||||
for n in xrange(parser.nrows):
|
||||
parser.fill_row(rowinstance, n)
|
||||
table.append()
|
||||
|
||||
|
||||
Inserting streams, inside nilmdb
|
||||
--------------------------------
|
||||
|
||||
- First check that the new stream doesn't overlap.
|
||||
- Get minimum timestamp, maximum timestamp from data parser.
|
||||
- (extend parser to verify monotonicity and track extents)
|
||||
- Get all intervals for this stream in the database
|
||||
- See if new interval overlaps any existing ones
|
||||
- If so, bail
|
||||
- Question: should we cache intervals inside NilmDB?
|
||||
- Assume database is fast for now, and always rebuild fom DB.
|
||||
- Can add a caching layer later if we need to.
|
||||
- `stream_get_ranges(path)` -> return IntervalSet?
|
||||
|
||||
Speed
|
||||
-----
|
||||
|
||||
- First approach was quadratic. Adding four hours of data:
|
||||
|
||||
$ time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s 20110513-110000 /bpnilm/1/raw
|
||||
real 24m31.093s
|
||||
$ time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s 20110513-120001 /bpnilm/1/raw
|
||||
real 43m44.528s
|
||||
$ time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s 20110513-130002 /bpnilm/1/raw
|
||||
real 93m29.713s
|
||||
$ time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s 20110513-140003 /bpnilm/1/raw
|
||||
real 166m53.007s
|
||||
|
||||
- Disabling pytables indexing didn't help:
|
||||
|
||||
real 31m21.492s
|
||||
real 52m51.963s
|
||||
real 102m8.151s
|
||||
real 176m12.469s
|
||||
|
||||
- Server RAM usage is constant.
|
||||
|
||||
- Speed problems were due to IntervalSet speed, of parsing intervals
|
||||
from the database and adding the new one each time.
|
||||
|
||||
- First optimization is to cache result of `nilmdb:_get_intervals`,
|
||||
which gives the best speedup.
|
||||
|
||||
- Also switched to internally using bxInterval from bx-python package.
|
||||
Speed of `tests/test_interval:TestIntervalSpeed` is pretty decent
|
||||
and seems to be growing logarithmically now. About 85μs per insertion
|
||||
for inserting 131k entries.
|
||||
|
||||
- Storing the interval data in SQL might be better, with a scheme like:
|
||||
http://www.logarithmic.net/pfh/blog/01235197474
|
||||
|
||||
- Next slowdown target is nilmdb.layout.Parser.parse().
|
||||
- Rewrote parsers using cython and sscanf
|
||||
- Stats (rev 10831), with _add_interval disabled
|
||||
layout.pyx.Parser.parse:128 6303 sec, 262k calls
|
||||
layout.pyx.parse:63 13913 sec, 5.1g calls
|
||||
numpy:records.py.fromrecords:569 7410 sec, 262k calls
|
||||
- Probably OK for now.
|
||||
|
||||
IntervalSet speed
|
||||
-----------------
|
||||
- Initial implementation was pretty slow, even with binary search in
|
||||
sorted list
|
||||
|
||||
- Replaced with bxInterval; now takes about log n time for an insertion
|
||||
- TestIntervalSpeed with range(17,18) and profiling
|
||||
- 85 μs each
|
||||
- 131072 calls to `__iadd__`
|
||||
- 131072 to bx.insert_interval
|
||||
- 131072 to bx.insert:395
|
||||
- 2355835 to bx.insert:106 (18x as many?)
|
||||
|
||||
- Tried blist too, worse than bxinterval.
|
||||
|
||||
- Might be algorithmic improvements to be made in Interval.py,
|
||||
like in `__and__`
|
||||
|
||||
|
||||
Layouts
|
||||
-------
|
||||
Current/old design has specific layouts: RawData, PrepData, RawNotchedData.
|
||||
Let's get rid of this entirely and switch to simpler data types that are
|
||||
just collections and counts of a single type. We'll still use strings
|
||||
to describe them, with format:
|
||||
|
||||
type_count
|
||||
|
||||
where type is "uint16", "float32", or "float64", and count is an integer.
|
||||
|
||||
nilmdb.layout.named() will parse these strings into the appropriate
|
||||
handlers. For compatibility:
|
||||
|
||||
"RawData" == "uint16_6"
|
||||
"RawNotchedData" == "uint16_9"
|
||||
"PrepData" == "float32_8"
|
@@ -1,16 +1,2 @@
|
||||
"""Main NilmDB import"""
|
||||
|
||||
from .nilmdb import NilmDB
|
||||
from .server import Server
|
||||
from .client import Client
|
||||
from .timer import Timer
|
||||
|
||||
import cmdline
|
||||
|
||||
import pyximport; pyximport.install()
|
||||
import layout
|
||||
|
||||
import serializer
|
||||
import timestamper
|
||||
import interval
|
||||
import du
|
||||
from nilmdb.interval import *
|
||||
from nilmdb.fileinterval import *
|
||||
|
@@ -1,352 +0,0 @@
|
||||
# cython: profile=False
|
||||
# This is based on bxintersect in bx-python 554:07aca5a9f6fc (BSD licensed);
|
||||
# modified to store interval ranges as doubles rather than 32-bit integers,
|
||||
# use fully closed intervals, support deletion, etc.
|
||||
#cython: cdivision=True
|
||||
|
||||
import operator
|
||||
|
||||
cdef extern from "stdlib.h":
|
||||
int ceil(float f)
|
||||
float log(float f)
|
||||
int RAND_MAX
|
||||
int rand()
|
||||
int strlen(char *)
|
||||
int iabs(int)
|
||||
|
||||
cdef inline double dmax2(double a, double b):
|
||||
if b > a: return b
|
||||
return a
|
||||
|
||||
cdef inline double dmax3(double a, double b, double c):
|
||||
if b > a:
|
||||
if c > b:
|
||||
return c
|
||||
return b
|
||||
if a > c:
|
||||
return a
|
||||
return c
|
||||
|
||||
cdef inline double dmin3(double a, double b, double c):
|
||||
if b < a:
|
||||
if c < b:
|
||||
return c
|
||||
return b
|
||||
if a < c:
|
||||
return a
|
||||
return c
|
||||
|
||||
cdef inline double dmin2(double a, double b):
|
||||
if b < a: return b
|
||||
return a
|
||||
|
||||
cdef float nlog = -1.0 / log(0.5)
|
||||
|
||||
cdef class IntervalNode:
|
||||
"""
|
||||
A single node of an `IntervalTree`.
|
||||
|
||||
NOTE: Unless you really know what you are doing, you probably should us
|
||||
`IntervalTree` rather than using this directly.
|
||||
"""
|
||||
cdef float priority
|
||||
cdef public object interval
|
||||
cdef public double start, end
|
||||
cdef double minend, maxend, minstart
|
||||
cdef IntervalNode cleft, cright, croot
|
||||
|
||||
property left_node:
|
||||
def __get__(self):
|
||||
return self.cleft if self.cleft is not EmptyNode else None
|
||||
property right_node:
|
||||
def __get__(self):
|
||||
return self.cright if self.cright is not EmptyNode else None
|
||||
property root_node:
|
||||
def __get__(self):
|
||||
return self.croot if self.croot is not EmptyNode else None
|
||||
|
||||
def __repr__(self):
|
||||
return "IntervalNode(%g, %g)" % (self.start, self.end)
|
||||
|
||||
def __cinit__(IntervalNode self, double start, double end, object interval):
|
||||
# Python lacks the binomial distribution, so we convert a
|
||||
# uniform into a binomial because it naturally scales with
|
||||
# tree size. Also, python's uniform is perfect since the
|
||||
# upper limit is not inclusive, which gives us undefined here.
|
||||
self.priority = ceil(nlog * log(-1.0/(1.0 * rand()/RAND_MAX - 1)))
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.interval = interval
|
||||
self.maxend = end
|
||||
self.minstart = start
|
||||
self.minend = end
|
||||
self.cleft = EmptyNode
|
||||
self.cright = EmptyNode
|
||||
self.croot = EmptyNode
|
||||
|
||||
cpdef IntervalNode insert(IntervalNode self, double start, double end, object interval):
|
||||
"""
|
||||
Insert a new IntervalNode into the tree of which this node is
|
||||
currently the root. The return value is the new root of the tree (which
|
||||
may or may not be this node!)
|
||||
"""
|
||||
cdef IntervalNode croot = self
|
||||
# If starts are the same, decide which to add interval to based on
|
||||
# end, thus maintaining sortedness relative to start/end
|
||||
cdef double decision_endpoint = start
|
||||
if start == self.start:
|
||||
decision_endpoint = end
|
||||
|
||||
if decision_endpoint > self.start:
|
||||
# insert to cright tree
|
||||
if self.cright is not EmptyNode:
|
||||
self.cright = self.cright.insert( start, end, interval )
|
||||
else:
|
||||
self.cright = IntervalNode( start, end, interval )
|
||||
# rebalance tree
|
||||
if self.priority < self.cright.priority:
|
||||
croot = self.rotate_left()
|
||||
else:
|
||||
# insert to cleft tree
|
||||
if self.cleft is not EmptyNode:
|
||||
self.cleft = self.cleft.insert( start, end, interval)
|
||||
else:
|
||||
self.cleft = IntervalNode( start, end, interval)
|
||||
# rebalance tree
|
||||
if self.priority < self.cleft.priority:
|
||||
croot = self.rotate_right()
|
||||
|
||||
croot.set_ends()
|
||||
self.cleft.croot = croot
|
||||
self.cright.croot = croot
|
||||
return croot
|
||||
|
||||
cdef IntervalNode rotate_right(IntervalNode self):
|
||||
cdef IntervalNode croot = self.cleft
|
||||
self.cleft = self.cleft.cright
|
||||
croot.cright = self
|
||||
self.set_ends()
|
||||
return croot
|
||||
|
||||
cdef IntervalNode rotate_left(IntervalNode self):
|
||||
cdef IntervalNode croot = self.cright
|
||||
self.cright = self.cright.cleft
|
||||
croot.cleft = self
|
||||
self.set_ends()
|
||||
return croot
|
||||
|
||||
cdef inline void set_ends(IntervalNode self):
|
||||
if self.cright is not EmptyNode and self.cleft is not EmptyNode:
|
||||
self.maxend = dmax3(self.end, self.cright.maxend, self.cleft.maxend)
|
||||
self.minend = dmin3(self.end, self.cright.minend, self.cleft.minend)
|
||||
self.minstart = dmin3(self.start, self.cright.minstart, self.cleft.minstart)
|
||||
elif self.cright is not EmptyNode:
|
||||
self.maxend = dmax2(self.end, self.cright.maxend)
|
||||
self.minend = dmin2(self.end, self.cright.minend)
|
||||
self.minstart = dmin2(self.start, self.cright.minstart)
|
||||
elif self.cleft is not EmptyNode:
|
||||
self.maxend = dmax2(self.end, self.cleft.maxend)
|
||||
self.minend = dmin2(self.end, self.cleft.minend)
|
||||
self.minstart = dmin2(self.start, self.cleft.minstart)
|
||||
|
||||
|
||||
def intersect( self, double start, double end, sort=True ):
|
||||
"""
|
||||
given a start and a end, return a list of features
|
||||
falling within that range
|
||||
"""
|
||||
cdef list results = []
|
||||
self._intersect( start, end, results )
|
||||
if sort:
|
||||
results = sorted(results)
|
||||
return results
|
||||
|
||||
find = intersect
|
||||
|
||||
cdef void _intersect( IntervalNode self, double start, double end, list results):
|
||||
# Left subtree
|
||||
if self.cleft is not EmptyNode and self.cleft.maxend > start:
|
||||
self.cleft._intersect( start, end, results )
|
||||
# This interval
|
||||
if ( self.end > start ) and ( self.start < end ):
|
||||
results.append( self.interval )
|
||||
# Right subtree
|
||||
if self.cright is not EmptyNode and self.start < end:
|
||||
self.cright._intersect( start, end, results )
|
||||
|
||||
|
||||
def traverse(self):
|
||||
if self.cleft is not EmptyNode:
|
||||
for node in self.cleft.traverse():
|
||||
yield node
|
||||
yield self.interval
|
||||
if self.cright is not EmptyNode:
|
||||
for node in self.cright.traverse():
|
||||
yield node
|
||||
|
||||
cdef IntervalNode EmptyNode = IntervalNode( 0, 0, Interval(0, 0))
|
||||
|
||||
## ---- Wrappers that retain the old interface -------------------------------
|
||||
|
||||
cdef class Interval:
|
||||
"""
|
||||
Basic feature, with required integer start and end properties.
|
||||
Also accepts optional strand as +1 or -1 (used for up/downstream queries),
|
||||
a name, and any arbitrary data is sent in on the info keyword argument
|
||||
|
||||
>>> from bx.intervals.intersection import Interval
|
||||
|
||||
>>> f1 = Interval(23, 36)
|
||||
>>> f2 = Interval(34, 48, value={'chr':12, 'anno':'transposon'})
|
||||
>>> f2
|
||||
Interval(34, 48, value={'anno': 'transposon', 'chr': 12})
|
||||
|
||||
"""
|
||||
cdef public double start, end
|
||||
cdef public object value, chrom, strand
|
||||
|
||||
def __init__(self, double start, double end, object value=None, object chrom=None, object strand=None ):
|
||||
assert start <= end, "start must be less than end"
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.value = value
|
||||
self.chrom = chrom
|
||||
self.strand = strand
|
||||
|
||||
def __repr__(self):
|
||||
fstr = "Interval(%g, %g" % (self.start, self.end)
|
||||
if not self.value is None:
|
||||
fstr += ", value=" + str(self.value)
|
||||
fstr += ")"
|
||||
return fstr
|
||||
|
||||
def __richcmp__(self, other, op):
|
||||
if op == 0:
|
||||
# <
|
||||
return self.start < other.start or self.end < other.end
|
||||
elif op == 1:
|
||||
# <=
|
||||
return self == other or self < other
|
||||
elif op == 2:
|
||||
# ==
|
||||
return self.start == other.start and self.end == other.end
|
||||
elif op == 3:
|
||||
# !=
|
||||
return self.start != other.start or self.end != other.end
|
||||
elif op == 4:
|
||||
# >
|
||||
return self.start > other.start or self.end > other.end
|
||||
elif op == 5:
|
||||
# >=
|
||||
return self == other or self > other
|
||||
|
||||
cdef class IntervalTree:
|
||||
"""
|
||||
Data structure for performing window intersect queries on a set of
|
||||
of possibly overlapping 1d intervals.
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
Create an empty IntervalTree
|
||||
|
||||
>>> from bx.intervals.intersection import Interval, IntervalTree
|
||||
>>> intersecter = IntervalTree()
|
||||
|
||||
An interval is a start and end position and a value (possibly None).
|
||||
You can add any object as an interval:
|
||||
|
||||
>>> intersecter.insert( 0, 10, "food" )
|
||||
>>> intersecter.insert( 3, 7, dict(foo='bar') )
|
||||
|
||||
>>> intersecter.find( 2, 5 )
|
||||
['food', {'foo': 'bar'}]
|
||||
|
||||
If the object has start and end attributes (like the Interval class) there
|
||||
is are some shortcuts:
|
||||
|
||||
>>> intersecter = IntervalTree()
|
||||
>>> intersecter.insert_interval( Interval( 0, 10 ) )
|
||||
>>> intersecter.insert_interval( Interval( 3, 7 ) )
|
||||
>>> intersecter.insert_interval( Interval( 3, 40 ) )
|
||||
>>> intersecter.insert_interval( Interval( 13, 50 ) )
|
||||
|
||||
>>> intersecter.find( 30, 50 )
|
||||
[Interval(3, 40), Interval(13, 50)]
|
||||
>>> intersecter.find( 100, 200 )
|
||||
[]
|
||||
|
||||
Before/after for intervals
|
||||
|
||||
>>> intersecter.before_interval( Interval( 10, 20 ) )
|
||||
[Interval(3, 7)]
|
||||
>>> intersecter.before_interval( Interval( 5, 20 ) )
|
||||
[]
|
||||
|
||||
Upstream/downstream
|
||||
|
||||
>>> intersecter.upstream_of_interval(Interval(11, 12))
|
||||
[Interval(0, 10)]
|
||||
>>> intersecter.upstream_of_interval(Interval(11, 12, strand="-"))
|
||||
[Interval(13, 50)]
|
||||
|
||||
>>> intersecter.upstream_of_interval(Interval(1, 2, strand="-"), num_intervals=3)
|
||||
[Interval(3, 7), Interval(3, 40), Interval(13, 50)]
|
||||
|
||||
|
||||
"""
|
||||
|
||||
cdef IntervalNode root
|
||||
|
||||
def __cinit__( self ):
|
||||
root = None
|
||||
|
||||
# ---- Position based interfaces -----------------------------------------
|
||||
|
||||
## KEEP
|
||||
def insert( self, double start, double end, object value=None ):
|
||||
"""
|
||||
Insert the interval [start,end) associated with value `value`.
|
||||
"""
|
||||
if self.root is None:
|
||||
self.root = IntervalNode( start, end, value )
|
||||
else:
|
||||
self.root = self.root.insert( start, end, value )
|
||||
|
||||
def delete( self, double start, double end, object value=None ):
|
||||
"""
|
||||
Delete the interval [start,end) associated with value `value`.
|
||||
"""
|
||||
if self.root is None:
|
||||
self.root = IntervalNode( start, end, value )
|
||||
else:
|
||||
self.root = self.root.insert( start, end, value )
|
||||
|
||||
def find( self, start, end ):
|
||||
"""
|
||||
Return a sorted list of all intervals overlapping [start,end).
|
||||
"""
|
||||
if self.root is None:
|
||||
return []
|
||||
return self.root.find( start, end )
|
||||
|
||||
# ---- Interval-like object based interfaces -----------------------------
|
||||
|
||||
## KEEP
|
||||
def insert_interval( self, interval ):
|
||||
"""
|
||||
Insert an "interval" like object (one with at least start and end
|
||||
attributes)
|
||||
"""
|
||||
self.insert( interval.start, interval.end, interval )
|
||||
|
||||
def traverse(self):
|
||||
"""
|
||||
iterator that traverses the tree
|
||||
"""
|
||||
if self.root is None:
|
||||
return iter([])
|
||||
return self.root.traverse()
|
||||
|
||||
# For backward compatibility
|
||||
Intersecter = IntervalTree
|
152
nilmdb/client.py
152
nilmdb/client.py
@@ -1,152 +0,0 @@
|
||||
"""Class for performing HTTP client requests via libcurl"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
|
||||
import time
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
import simplejson as json
|
||||
|
||||
import nilmdb.httpclient
|
||||
|
||||
# Other functions expect to see these in the nilmdb.client namespace
|
||||
from nilmdb.httpclient import ClientError, ServerError, Error
|
||||
|
||||
version = "1.0"
|
||||
|
||||
class Client(object):
|
||||
"""Main client interface to the Nilm database."""
|
||||
|
||||
client_version = version
|
||||
|
||||
def __init__(self, url):
|
||||
self.http = nilmdb.httpclient.HTTPClient(url)
|
||||
|
||||
def _json_param(self, data):
|
||||
"""Return compact json-encoded version of parameter"""
|
||||
return json.dumps(data, separators=(',',':'))
|
||||
|
||||
def close(self):
|
||||
self.http.close()
|
||||
|
||||
def geturl(self):
|
||||
"""Return the URL we're using"""
|
||||
return self.http.baseurl
|
||||
|
||||
def version(self):
|
||||
"""Return server version"""
|
||||
return self.http.get("version")
|
||||
|
||||
def dbpath(self):
|
||||
"""Return server database path"""
|
||||
return self.http.get("dbpath")
|
||||
|
||||
def dbsize(self):
|
||||
"""Return server database size as human readable string"""
|
||||
return self.http.get("dbsize")
|
||||
|
||||
def stream_list(self, path = None, layout = None):
|
||||
params = {}
|
||||
if path is not None:
|
||||
params["path"] = path
|
||||
if layout is not None:
|
||||
params["layout"] = layout
|
||||
return self.http.get("stream/list", params)
|
||||
|
||||
def stream_get_metadata(self, path, keys = None):
|
||||
params = { "path": path }
|
||||
if keys is not None:
|
||||
params["key"] = keys
|
||||
return self.http.get("stream/get_metadata", params)
|
||||
|
||||
def stream_set_metadata(self, path, data):
|
||||
"""Set stream metadata from a dictionary, replacing all existing
|
||||
metadata."""
|
||||
params = {
|
||||
"path": path,
|
||||
"data": self._json_param(data)
|
||||
}
|
||||
return self.http.get("stream/set_metadata", params)
|
||||
|
||||
def stream_update_metadata(self, path, data):
|
||||
"""Update stream metadata from a dictionary"""
|
||||
params = {
|
||||
"path": path,
|
||||
"data": self._json_param(data)
|
||||
}
|
||||
return self.http.get("stream/update_metadata", params)
|
||||
|
||||
def stream_create(self, path, layout):
|
||||
"""Create a new stream"""
|
||||
params = { "path": path,
|
||||
"layout" : layout }
|
||||
return self.http.get("stream/create", params)
|
||||
|
||||
def stream_insert(self, path, data):
|
||||
"""Insert data into a stream. data should be a file-like object
|
||||
that provides ASCII data that matches the database layout for path."""
|
||||
params = { "path": path }
|
||||
|
||||
# See design.md for a discussion of how much data to send.
|
||||
# These are soft limits -- actual data might be rounded up.
|
||||
max_data = 1048576
|
||||
max_time = 30
|
||||
|
||||
def sendit():
|
||||
result = self.http.put("stream/insert", send_data, params)
|
||||
params["old_timestamp"] = result[1]
|
||||
return result
|
||||
|
||||
result = None
|
||||
start = time.time()
|
||||
send_data = ""
|
||||
for line in data:
|
||||
elapsed = time.time() - start
|
||||
send_data += line
|
||||
|
||||
if (len(send_data) > max_data) or (elapsed > max_time):
|
||||
result = sendit()
|
||||
send_data = ""
|
||||
start = time.time()
|
||||
if len(send_data):
|
||||
result = sendit()
|
||||
|
||||
# Return the most recent JSON result we got back, or None if
|
||||
# we didn't make any requests.
|
||||
return result
|
||||
|
||||
def stream_intervals(self, path, start = None, end = None):
|
||||
"""
|
||||
Return a generator that yields each stream interval.
|
||||
"""
|
||||
params = {
|
||||
"path": path
|
||||
}
|
||||
if start is not None:
|
||||
params["start"] = repr(start) # use repr to keep precision
|
||||
if end is not None:
|
||||
params["end"] = repr(end)
|
||||
return self.http.get_gen("stream/intervals", params, retjson = True)
|
||||
|
||||
def stream_extract(self, path, start = None, end = None, count = False):
|
||||
"""
|
||||
Extract data from a stream. Returns a generator that yields
|
||||
lines of ASCII-formatted data that matches the database
|
||||
layout for the given path.
|
||||
|
||||
Specify count=True to just get a count of values rather than
|
||||
the actual data.
|
||||
"""
|
||||
params = {
|
||||
"path": path,
|
||||
}
|
||||
if start is not None:
|
||||
params["start"] = repr(start) # use repr to keep precision
|
||||
if end is not None:
|
||||
params["end"] = repr(end)
|
||||
if count:
|
||||
params["count"] = 1
|
||||
|
||||
return self.http.get_gen("stream/extract", params, retjson = False)
|
@@ -1 +0,0 @@
|
||||
from .cmdline import Cmdline
|
@@ -1,147 +0,0 @@
|
||||
"""Command line client functionality"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
import nilmdb.client
|
||||
|
||||
import datetime_tz
|
||||
import dateutil.parser
|
||||
import sys
|
||||
import re
|
||||
import argparse
|
||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||
|
||||
version = "0.1"
|
||||
|
||||
# Valid subcommands. Defined in separate files just to break
|
||||
# things up -- they're still called with Cmdline as self.
|
||||
subcommands = [ "info", "create", "list", "metadata", "insert", "extract" ]
|
||||
|
||||
# Import the subcommand modules. Equivalent way of doing this would be
|
||||
# from . import info as cmd_info
|
||||
subcmd_mods = {}
|
||||
for cmd in subcommands:
|
||||
subcmd_mods[cmd] = __import__("nilmdb.cmdline." + cmd, fromlist = [ cmd ])
|
||||
|
||||
class Cmdline(object):
|
||||
|
||||
def __init__(self, argv):
|
||||
self.argv = argv
|
||||
|
||||
def arg_time(self, toparse):
|
||||
"""Parse a time string argument"""
|
||||
try:
|
||||
return self.parse_time(toparse).totimestamp()
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(sprintf("%s \"%s\"",
|
||||
str(e), toparse))
|
||||
|
||||
def parse_time(self, toparse):
|
||||
"""
|
||||
Parse a free-form time string and return a datetime_tz object.
|
||||
If the string doesn't contain a timestamp, the current local
|
||||
timezone is assumed (e.g. from the TZ env var).
|
||||
"""
|
||||
# If string doesn't contain at least 6 digits, consider it
|
||||
# invalid. smartparse might otherwise accept empty strings
|
||||
# and strings with just separators.
|
||||
if len(re.findall(r"\d", toparse)) < 6:
|
||||
raise ValueError("not enough digits for a timestamp")
|
||||
|
||||
# Try to just parse the time as given
|
||||
try:
|
||||
return datetime_tz.datetime_tz.smartparse(toparse)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Try to extract a substring in a condensed format that we expect
|
||||
# to see in a filename or header comment
|
||||
res = re.search(r"(^|[^\d])(" # non-numeric or SOL
|
||||
r"(199\d|2\d\d\d)" # year
|
||||
r"[-/]?" # separator
|
||||
r"(0[1-9]|1[012])" # month
|
||||
r"[-/]?" # separator
|
||||
r"([012]\d|3[01])" # day
|
||||
r"[-T ]?" # separator
|
||||
r"([01]\d|2[0-3])" # hour
|
||||
r"[:]?" # separator
|
||||
r"([0-5]\d)" # minute
|
||||
r"[:]?" # separator
|
||||
r"([0-5]\d)?" # second
|
||||
r"([-+]\d\d\d\d)?" # timezone
|
||||
r")", toparse)
|
||||
if res is not None:
|
||||
try:
|
||||
return datetime_tz.datetime_tz.smartparse(res.group(2))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Could also try to successively parse substrings, but let's
|
||||
# just give up for now.
|
||||
raise ValueError("unable to parse timestamp")
|
||||
|
||||
def time_string(self, timestamp):
|
||||
"""
|
||||
Convert a Unix timestamp to a string for printing, using the
|
||||
local timezone for display (e.g. from the TZ env var).
|
||||
"""
|
||||
dt = datetime_tz.datetime_tz.fromtimestamp(timestamp)
|
||||
return dt.strftime("%a, %d %b %Y %H:%M:%S.%f %z")
|
||||
|
||||
def parser_setup(self):
|
||||
version_string = sprintf("nilmtool %s, client library %s",
|
||||
version, nilmdb.Client.client_version)
|
||||
|
||||
self.parser = argparse.ArgumentParser(add_help = False,
|
||||
formatter_class = def_form)
|
||||
|
||||
group = self.parser.add_argument_group("General options")
|
||||
group.add_argument("-h", "--help", action='help',
|
||||
help='show this help message and exit')
|
||||
group.add_argument("-V", "--version", action="version",
|
||||
version=version_string)
|
||||
|
||||
group = self.parser.add_argument_group("Server")
|
||||
group.add_argument("-u", "--url", action="store",
|
||||
default="http://localhost:12380/",
|
||||
help="NilmDB server URL (default: %(default)s)")
|
||||
|
||||
sub = self.parser.add_subparsers(title="Commands",
|
||||
dest="command",
|
||||
description="Specify --help after "
|
||||
"the command for command-specific "
|
||||
"options.")
|
||||
|
||||
# Set up subcommands (defined in separate files)
|
||||
for cmd in subcommands:
|
||||
subcmd_mods[cmd].setup(self, sub)
|
||||
|
||||
def die(self, formatstr, *args):
|
||||
fprintf(sys.stderr, formatstr + "\n", *args)
|
||||
self.client.close()
|
||||
sys.exit(-1)
|
||||
|
||||
def run(self):
|
||||
# Clear cached timezone, so that we can pick up timezone changes
|
||||
# while running this from the test suite.
|
||||
datetime_tz._localtz = None
|
||||
|
||||
# Run parser
|
||||
self.parser_setup()
|
||||
self.args = self.parser.parse_args(self.argv)
|
||||
|
||||
self.client = nilmdb.Client(self.args.url)
|
||||
|
||||
# Make a test connection to make sure things work
|
||||
try:
|
||||
server_version = self.client.version()
|
||||
except nilmdb.client.Error as e:
|
||||
self.die("Error connecting to server: %s", str(e))
|
||||
|
||||
# Now dispatch client request to appropriate function. Parser
|
||||
# should have ensured that we don't have any unknown commands
|
||||
# here.
|
||||
retval = self.args.handler(self) or 0
|
||||
|
||||
self.client.close()
|
||||
sys.exit(retval)
|
@@ -1,27 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
import nilmdb.client
|
||||
|
||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||
|
||||
def setup(self, sub):
|
||||
cmd = sub.add_parser("create", help="Create a new stream",
|
||||
formatter_class = def_form,
|
||||
description="""
|
||||
Create a new empty stream at the
|
||||
specified path and with the specifed
|
||||
layout type.
|
||||
""")
|
||||
cmd.set_defaults(handler = cmd_create)
|
||||
group = cmd.add_argument_group("Required arguments")
|
||||
group.add_argument("path",
|
||||
help="Path (in database) of new stream, e.g. /foo/bar")
|
||||
group.add_argument("layout",
|
||||
help="Layout type for new stream, e.g. float32_8")
|
||||
|
||||
def cmd_create(self):
|
||||
"""Create new stream"""
|
||||
try:
|
||||
self.client.stream_create(self.args.path, self.args.layout)
|
||||
except nilmdb.client.ClientError as e:
|
||||
self.die("Error creating stream: %s", str(e))
|
@@ -1,61 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
import nilmdb.client
|
||||
import nilmdb.layout
|
||||
import sys
|
||||
|
||||
def setup(self, sub):
|
||||
cmd = sub.add_parser("extract", help="Extract data",
|
||||
description="""
|
||||
Extract data from a stream.
|
||||
""")
|
||||
cmd.set_defaults(handler = cmd_extract)
|
||||
|
||||
group = cmd.add_argument_group("Data selection")
|
||||
group.add_argument("path",
|
||||
help="Path of stream, e.g. /foo/bar")
|
||||
group.add_argument("-s", "--start", required=True,
|
||||
metavar="TIME", type=self.arg_time,
|
||||
help="Starting timestamp (free-form)")
|
||||
group.add_argument("-e", "--end", required=True,
|
||||
metavar="TIME", type=self.arg_time,
|
||||
help="Ending timestamp (free-form)")
|
||||
|
||||
group = cmd.add_argument_group("Output format")
|
||||
group.add_argument("-b", "--bare", action="store_true",
|
||||
help="Exclude timestamps from output lines")
|
||||
group.add_argument("-a", "--annotate", action="store_true",
|
||||
help="Include comments with some information "
|
||||
"about the stream")
|
||||
group.add_argument("-c", "--count", action="store_true",
|
||||
help="Just output a count of matched data points")
|
||||
|
||||
def cmd_extract(self):
|
||||
streams = self.client.stream_list(self.args.path)
|
||||
if len(streams) != 1:
|
||||
self.die("Error getting stream info for path %s", self.args.path)
|
||||
layout = streams[0][1]
|
||||
|
||||
if self.args.annotate:
|
||||
printf("# path: %s\n", self.args.path)
|
||||
printf("# layout: %s\n", layout)
|
||||
printf("# start: %s\n", self.time_string(self.args.start))
|
||||
printf("# end: %s\n", self.time_string(self.args.end))
|
||||
|
||||
printed = False
|
||||
for dataline in self.client.stream_extract(self.args.path,
|
||||
self.args.start,
|
||||
self.args.end,
|
||||
self.args.count):
|
||||
if self.args.bare and not self.args.count:
|
||||
# Strip timestamp (first element). Doesn't make sense
|
||||
# if we are only returning a count.
|
||||
dataline = ' '.join(dataline.split(' ')[1:])
|
||||
print dataline
|
||||
printed = True
|
||||
if not printed:
|
||||
if self.args.annotate:
|
||||
printf("# no data\n")
|
||||
return 2
|
||||
|
||||
return 0
|
@@ -1,21 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
|
||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||
|
||||
def setup(self, sub):
|
||||
cmd = sub.add_parser("info", help="Server information",
|
||||
formatter_class = def_form,
|
||||
description="""
|
||||
List information about the server, like
|
||||
version.
|
||||
""")
|
||||
cmd.set_defaults(handler = cmd_info)
|
||||
|
||||
def cmd_info(self):
|
||||
"""Print info about the server"""
|
||||
printf("Client library version: %s\n", self.client.client_version)
|
||||
printf("Server version: %s\n", self.client.version())
|
||||
printf("Server URL: %s\n", self.client.geturl())
|
||||
printf("Server database path: %s\n", self.client.dbpath())
|
||||
printf("Server database size: %s\n", self.client.dbsize())
|
@@ -1,106 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
import nilmdb.client
|
||||
import nilmdb.layout
|
||||
import nilmdb.timestamper
|
||||
|
||||
import sys
|
||||
|
||||
def setup(self, sub):
|
||||
cmd = sub.add_parser("insert", help="Insert data",
|
||||
description="""
|
||||
Insert data into a stream.
|
||||
""")
|
||||
cmd.set_defaults(handler = cmd_insert)
|
||||
cmd.add_argument("-q", "--quiet", action='store_true',
|
||||
help='suppress unnecessary messages')
|
||||
|
||||
group = cmd.add_argument_group("Timestamping",
|
||||
description="""
|
||||
If timestamps are already provided in the
|
||||
input date, use --none. Otherwise,
|
||||
provide --start, or use --filename to
|
||||
try to deduce timestamps from the file.
|
||||
|
||||
Set the TZ environment variable to change
|
||||
the default timezone.
|
||||
""")
|
||||
|
||||
group.add_argument("-r", "--rate", type=float,
|
||||
help="""
|
||||
If needed, rate in Hz (required when using --start)
|
||||
""")
|
||||
exc = group.add_mutually_exclusive_group()
|
||||
exc.add_argument("-s", "--start",
|
||||
metavar="TIME", type=self.arg_time,
|
||||
help="Starting timestamp (free-form)")
|
||||
exc.add_argument("-f", "--filename", action="store_true",
|
||||
help="""
|
||||
Use filenames to determine start time
|
||||
(default, if filenames are provided)
|
||||
""")
|
||||
exc.add_argument("-n", "--none", action="store_true",
|
||||
help="Timestamp is already present, don't add one")
|
||||
|
||||
group = cmd.add_argument_group("Required parameters")
|
||||
group.add_argument("path",
|
||||
help="Path of stream, e.g. /foo/bar")
|
||||
group.add_argument("file", nargs="*", default=['-'],
|
||||
help="File(s) to insert (default: - (stdin))")
|
||||
|
||||
def cmd_insert(self):
|
||||
# Find requested stream
|
||||
streams = self.client.stream_list(self.args.path)
|
||||
if len(streams) != 1:
|
||||
self.die("Error getting stream info for path %s", self.args.path)
|
||||
|
||||
layout = streams[0][1]
|
||||
|
||||
if self.args.start and len(self.args.file) != 1:
|
||||
self.die("--start can only be used with one input file, for now")
|
||||
|
||||
for filename in self.args.file:
|
||||
if filename == '-':
|
||||
infile = sys.stdin
|
||||
else:
|
||||
try:
|
||||
infile = open(filename, "r")
|
||||
except IOError:
|
||||
self.die("Error opening input file %s", filename)
|
||||
|
||||
# Build a timestamper for this file
|
||||
if self.args.none:
|
||||
ts = nilmdb.timestamper.TimestamperNull(infile)
|
||||
else:
|
||||
if self.args.start:
|
||||
start = self.args.start
|
||||
else:
|
||||
try:
|
||||
start = self.parse_time(filename)
|
||||
except ValueError:
|
||||
self.die("Error extracting time from filename '%s'",
|
||||
filename)
|
||||
|
||||
if not self.args.rate:
|
||||
self.die("Need to specify --rate")
|
||||
rate = self.args.rate
|
||||
|
||||
ts = nilmdb.timestamper.TimestamperRate(infile, start, rate)
|
||||
|
||||
# Print info
|
||||
if not self.args.quiet:
|
||||
printf("Input file: %s\n", filename)
|
||||
printf("Timestamper: %s\n", str(ts))
|
||||
|
||||
# Insert the data
|
||||
try:
|
||||
result = self.client.stream_insert(self.args.path, ts)
|
||||
except nilmdb.client.Error as e:
|
||||
# TODO: It would be nice to be able to offer better errors
|
||||
# here, particularly in the case of overlap, which just shows
|
||||
# ugly bracketed ranges of 16-digit numbers and a mangled URL.
|
||||
# Need to consider adding something like e.prettyprint()
|
||||
# that is smarter about the contents of the error.
|
||||
self.die("Error inserting data: %s", str(e))
|
||||
|
||||
return
|
@@ -1,54 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
import nilmdb.client
|
||||
|
||||
import fnmatch
|
||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||
|
||||
def setup(self, sub):
|
||||
cmd = sub.add_parser("list", help="List streams",
|
||||
formatter_class = def_form,
|
||||
description="""
|
||||
List streams available in the database,
|
||||
optionally filtering by layout or path. Wildcards
|
||||
are accepted.
|
||||
""")
|
||||
cmd.set_defaults(handler = cmd_list)
|
||||
|
||||
group = cmd.add_argument_group("Stream filtering")
|
||||
group.add_argument("-l", "--layout", default="*",
|
||||
help="Match only this stream layout")
|
||||
group.add_argument("-p", "--path", default="*",
|
||||
help="Match only this path")
|
||||
|
||||
group = cmd.add_argument_group("Interval details")
|
||||
group.add_argument("-d", "--detail", action="store_true",
|
||||
help="Show available data time intervals")
|
||||
group.add_argument("-s", "--start",
|
||||
metavar="TIME", type=self.arg_time,
|
||||
help="Starting timestamp (free-form)")
|
||||
group.add_argument("-e", "--end",
|
||||
metavar="TIME", type=self.arg_time,
|
||||
help="Ending timestamp (free-form)")
|
||||
|
||||
def cmd_list(self):
|
||||
"""List available streams"""
|
||||
streams = self.client.stream_list()
|
||||
for (path, layout) in streams:
|
||||
if not (fnmatch.fnmatch(path, self.args.path) and
|
||||
fnmatch.fnmatch(layout, self.args.layout)):
|
||||
continue
|
||||
|
||||
printf("%s %s\n", path, layout)
|
||||
if not self.args.detail:
|
||||
continue
|
||||
|
||||
printed = False
|
||||
for (start, end) in self.client.stream_intervals(path, self.args.start,
|
||||
self.args.end):
|
||||
printf(" [ %s -> %s ]\n",
|
||||
self.time_string(start),
|
||||
self.time_string(end))
|
||||
printed = True
|
||||
if not printed:
|
||||
printf(" (no intervals)\n")
|
@@ -1,65 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
import nilmdb.client
|
||||
|
||||
def setup(self, sub):
|
||||
cmd = sub.add_parser("metadata", help="Get or set stream metadata",
|
||||
description="""
|
||||
Get or set key=value metadata associated with
|
||||
a stream.
|
||||
""",
|
||||
usage="%(prog)s path [-g [key ...] | "
|
||||
"-s key=value [...] | -u key=value [...]]")
|
||||
cmd.set_defaults(handler = cmd_metadata)
|
||||
|
||||
group = cmd.add_argument_group("Required arguments")
|
||||
group.add_argument("path",
|
||||
help="Path of stream, e.g. /foo/bar")
|
||||
|
||||
group = cmd.add_argument_group("Actions")
|
||||
exc = group.add_mutually_exclusive_group()
|
||||
exc.add_argument("-g", "--get", nargs="*", metavar="key",
|
||||
help="Get metadata for specified keys (default all)")
|
||||
exc.add_argument("-s", "--set", nargs="+", metavar="key=value",
|
||||
help="Replace all metadata with provided "
|
||||
"key=value pairs")
|
||||
exc.add_argument("-u", "--update", nargs="+", metavar="key=value",
|
||||
help="Update metadata using provided "
|
||||
"key=value pairs")
|
||||
|
||||
def cmd_metadata(self):
|
||||
"""Manipulate metadata"""
|
||||
if self.args.set is not None or self.args.update is not None:
|
||||
# Either set, or update
|
||||
if self.args.set is not None:
|
||||
keyvals = self.args.set
|
||||
handler = self.client.stream_set_metadata
|
||||
else:
|
||||
keyvals = self.args.update
|
||||
handler = self.client.stream_update_metadata
|
||||
|
||||
# Extract key=value pairs
|
||||
data = {}
|
||||
for keyval in keyvals:
|
||||
kv = keyval.split('=')
|
||||
if len(kv) != 2 or kv[0] == "":
|
||||
self.die("Error parsing key=value argument '%s'", keyval)
|
||||
data[kv[0]] = kv[1]
|
||||
|
||||
# Make the call
|
||||
try:
|
||||
handler(self.args.path, data)
|
||||
except nilmdb.client.ClientError as e:
|
||||
self.die("Error setting/updating metadata: %s", str(e))
|
||||
else:
|
||||
# Get (or unspecified)
|
||||
keys = self.args.get or None
|
||||
try:
|
||||
data = self.client.stream_get_metadata(self.args.path, keys)
|
||||
except nilmdb.client.ClientError as e:
|
||||
self.die("Error getting metadata: %s", str(e))
|
||||
for key, value in sorted(data.items()):
|
||||
# Omit nonexistant keys
|
||||
if value is None:
|
||||
value = ""
|
||||
printf("%s=%s\n", key, value)
|
30
nilmdb/du.py
30
nilmdb/du.py
@@ -1,30 +0,0 @@
|
||||
import nilmdb
|
||||
import os
|
||||
from math import log
|
||||
|
||||
def sizeof_fmt(num):
|
||||
"""Human friendly file size"""
|
||||
unit_list = zip(['bytes', 'kiB', 'MiB', 'GiB', 'TiB'], [0, 0, 1, 2, 2])
|
||||
if num > 1:
|
||||
exponent = min(int(log(num, 1024)), len(unit_list) - 1)
|
||||
quotient = float(num) / 1024**exponent
|
||||
unit, num_decimals = unit_list[exponent]
|
||||
format_string = '{:.%sf} {}' % (num_decimals)
|
||||
return format_string.format(quotient, unit)
|
||||
if num == 0: # pragma: no cover
|
||||
return '0 bytes'
|
||||
if num == 1: # pragma: no cover
|
||||
return '1 byte'
|
||||
|
||||
def du_bytes(path):
|
||||
"""Like du -sb, returns total size of path in bytes."""
|
||||
size = os.path.getsize(path)
|
||||
if os.path.isdir(path):
|
||||
for file in os.listdir(path):
|
||||
filepath = os.path.join(path, file)
|
||||
size += du_bytes(filepath)
|
||||
return size
|
||||
|
||||
def du(path):
|
||||
"""Like du -sh, returns total size of path as a human-readable string."""
|
||||
return sizeof_fmt(du_bytes(path))
|
37
nilmdb/fileinterval.py
Normal file
37
nilmdb/fileinterval.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""FileInterval
|
||||
|
||||
An Interval that is backed with file data storage"""
|
||||
|
||||
from nilmdb.interval import Interval, IntervalSet, IntervalError
|
||||
from datetime import datetime
|
||||
import bisect
|
||||
|
||||
class FileInterval(Interval):
|
||||
"""Represents an interval of time and its corresponding data"""
|
||||
|
||||
def __init__(self, start, end,
|
||||
filename,
|
||||
start_offset = None, end_offset = None):
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.filename = filename
|
||||
if start_offset is None:
|
||||
start_offset = 0
|
||||
self.start_offset = start_offset
|
||||
if end_offset is None:
|
||||
f = open(filename, 'rb')
|
||||
f.seek(0, os.SEEK_END)
|
||||
end_offset = f.tell()
|
||||
self.end_offset = end_offset
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
pass
|
||||
|
||||
def subset(self, start, end):
|
||||
"""Return a new Interval that is a subset of this one"""
|
||||
# TODO: Any magic regarding file/offset/length mapping for subsets
|
||||
if (start < self.start or end > self.end):
|
||||
raise IntervalError("not a subset")
|
||||
return FileInterval(start, end)
|
||||
|
||||
|
@@ -1,220 +0,0 @@
|
||||
"""HTTP client library"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
|
||||
import time
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
import simplejson as json
|
||||
import urlparse
|
||||
import urllib
|
||||
import pycurl
|
||||
import cStringIO
|
||||
|
||||
import nilmdb.iteratorizer
|
||||
|
||||
class Error(Exception):
|
||||
"""Base exception for both ClientError and ServerError responses"""
|
||||
def __init__(self,
|
||||
status = "Unspecified error",
|
||||
message = None,
|
||||
url = None,
|
||||
traceback = None):
|
||||
Exception.__init__(self, status)
|
||||
self.status = status # e.g. "400 Bad Request"
|
||||
self.message = message # textual message from the server
|
||||
self.url = url # URL we were requesting
|
||||
self.traceback = traceback # server traceback, if available
|
||||
def __str__(self):
|
||||
s = sprintf("[%s]", self.status)
|
||||
if self.message:
|
||||
s += sprintf(" %s", self.message)
|
||||
if self.url:
|
||||
s += sprintf(" (%s)", self.url)
|
||||
if self.traceback: # pragma: no cover
|
||||
s += sprintf("\nServer traceback:\n%s", self.traceback)
|
||||
return s
|
||||
class ClientError(Error):
|
||||
pass
|
||||
class ServerError(Error):
|
||||
pass
|
||||
|
||||
class HTTPClient(object):
|
||||
"""Class to manage and perform HTTP requests from the client"""
|
||||
def __init__(self, baseurl = ""):
|
||||
"""If baseurl is supplied, all other functions that take
|
||||
a URL can be given a relative URL instead."""
|
||||
# Verify / clean up URL
|
||||
reparsed = urlparse.urlparse(baseurl).geturl()
|
||||
if '://' not in reparsed:
|
||||
reparsed = urlparse.urlparse("http://" + baseurl).geturl()
|
||||
self.baseurl = reparsed
|
||||
self.curl = pycurl.Curl()
|
||||
self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)
|
||||
self.curl.setopt(pycurl.FOLLOWLOCATION, 1)
|
||||
self.curl.setopt(pycurl.MAXREDIRS, 5)
|
||||
self._setup_url()
|
||||
|
||||
def _setup_url(self, url = "", params = ""):
|
||||
url = urlparse.urljoin(self.baseurl, url)
|
||||
if params:
|
||||
url = urlparse.urljoin(url, "?" + urllib.urlencode(params, True))
|
||||
self.curl.setopt(pycurl.URL, url)
|
||||
self.url = url
|
||||
|
||||
def _check_error(self, body = None):
|
||||
code = self.curl.getinfo(pycurl.RESPONSE_CODE)
|
||||
if code == 200:
|
||||
return
|
||||
# Default variables for exception
|
||||
args = { "url" : self.url,
|
||||
"status" : str(code),
|
||||
"message" : None,
|
||||
"traceback" : None }
|
||||
try:
|
||||
# Fill with server-provided data if we can
|
||||
jsonerror = json.loads(body)
|
||||
args["status"] = jsonerror["status"]
|
||||
args["message"] = jsonerror["message"]
|
||||
args["traceback"] = jsonerror["traceback"]
|
||||
except Exception: # pragma: no cover
|
||||
pass
|
||||
if code >= 400 and code <= 499:
|
||||
raise ClientError(**args)
|
||||
else: # pragma: no cover
|
||||
if code >= 500 and code <= 599:
|
||||
raise ServerError(**args)
|
||||
else:
|
||||
raise Error(**args)
|
||||
|
||||
def _req_generator(self, url, params):
|
||||
"""
|
||||
Like self._req(), but runs the perform in a separate thread.
|
||||
It returns a generator that spits out arbitrary-sized chunks
|
||||
of the resulting data, instead of using the WRITEFUNCTION
|
||||
callback.
|
||||
"""
|
||||
self._setup_url(url, params)
|
||||
self._status = None
|
||||
error_body = ""
|
||||
self._headers = ""
|
||||
def header_callback(data):
|
||||
if self._status is None:
|
||||
self._status = int(data.split(" ")[1])
|
||||
self._headers += data
|
||||
self.curl.setopt(pycurl.HEADERFUNCTION, header_callback)
|
||||
def func(callback):
|
||||
self.curl.setopt(pycurl.WRITEFUNCTION, callback)
|
||||
self.curl.perform()
|
||||
try:
|
||||
for i in nilmdb.iteratorizer.Iteratorizer(func):
|
||||
if self._status == 200:
|
||||
# If we had a 200 response, yield the data to the caller.
|
||||
yield i
|
||||
else:
|
||||
# Otherwise, collect it into an error string.
|
||||
error_body += i
|
||||
except pycurl.error as e:
|
||||
raise ServerError(status = "502 Error",
|
||||
url = self.url,
|
||||
message = e[1])
|
||||
# Raise an exception if there was an error
|
||||
self._check_error(error_body)
|
||||
|
||||
def _req(self, url, params):
|
||||
"""
|
||||
GET or POST that returns raw data. Returns the body
|
||||
data as a string, or raises an error if it contained an error.
|
||||
"""
|
||||
self._setup_url(url, params)
|
||||
body = cStringIO.StringIO()
|
||||
self.curl.setopt(pycurl.WRITEFUNCTION, body.write)
|
||||
self._headers = ""
|
||||
def header_callback(data):
|
||||
self._headers += data
|
||||
self.curl.setopt(pycurl.HEADERFUNCTION, header_callback)
|
||||
try:
|
||||
self.curl.perform()
|
||||
except pycurl.error as e:
|
||||
raise ServerError(status = "502 Error",
|
||||
url = self.url,
|
||||
message = e[1])
|
||||
body_str = body.getvalue()
|
||||
# Raise an exception if there was an error
|
||||
self._check_error(body_str)
|
||||
return body_str
|
||||
|
||||
def close(self):
|
||||
self.curl.close()
|
||||
|
||||
def _iterate_lines(self, it):
|
||||
"""
|
||||
Given an iterator that returns arbitrarily-sized chunks
|
||||
of data, return '\n'-delimited lines of text
|
||||
"""
|
||||
partial = ""
|
||||
for chunk in it:
|
||||
partial += chunk
|
||||
lines = partial.split("\n")
|
||||
for line in lines[0:-1]:
|
||||
yield line
|
||||
partial = lines[-1]
|
||||
if partial != "":
|
||||
yield partial
|
||||
|
||||
# Non-generator versions
|
||||
def _doreq(self, url, params, retjson):
|
||||
"""
|
||||
Perform a request, and return the body.
|
||||
|
||||
url: URL to request (relative to baseurl)
|
||||
params: dictionary of query parameters
|
||||
retjson: expect JSON and return python objects instead of string
|
||||
"""
|
||||
out = self._req(url, params)
|
||||
if retjson:
|
||||
return json.loads(out)
|
||||
return out
|
||||
|
||||
def get(self, url, params = None, retjson = True):
|
||||
"""Simple GET"""
|
||||
self.curl.setopt(pycurl.UPLOAD, 0)
|
||||
return self._doreq(url, params, retjson)
|
||||
|
||||
def put(self, url, postdata, params = None, retjson = True):
|
||||
"""Simple PUT"""
|
||||
self._setup_url(url, params)
|
||||
data = cStringIO.StringIO(postdata)
|
||||
self.curl.setopt(pycurl.UPLOAD, 1)
|
||||
self.curl.setopt(pycurl.READFUNCTION, data.read)
|
||||
return self._doreq(url, params, retjson)
|
||||
|
||||
# Generator versions
|
||||
def _doreq_gen(self, url, params, retjson):
|
||||
"""
|
||||
Perform a request, and return lines of the body in a generator.
|
||||
|
||||
url: URL to request (relative to baseurl)
|
||||
params: dictionary of query parameters
|
||||
retjson: expect JSON and yield python objects instead of strings
|
||||
"""
|
||||
for line in self._iterate_lines(self._req_generator(url, params)):
|
||||
if retjson:
|
||||
yield json.loads(line)
|
||||
else:
|
||||
yield line
|
||||
|
||||
def get_gen(self, url, params = None, retjson = True):
|
||||
"""Simple GET, returning a generator"""
|
||||
self.curl.setopt(pycurl.UPLOAD, 0)
|
||||
return self._doreq_gen(url, params, retjson)
|
||||
|
||||
def put_gen(self, url, postdata, params = None, retjson = True):
|
||||
"""Simple PUT, returning a generator"""
|
||||
self._setup_url(url, params)
|
||||
data = cStringIO.StringIO(postdata)
|
||||
self.curl.setopt(pycurl.UPLOAD, 1)
|
||||
self.curl.setopt(pycurl.READFUNCTION, data.read)
|
||||
return self._doreq_gen(url, params, retjson)
|
@@ -1,201 +1,140 @@
|
||||
"""Interval and IntervalSet
|
||||
|
||||
Represents an interval of time, and a set of such intervals.
|
||||
Represents an interval of time, and a sorted set of such intervals"""
|
||||
|
||||
Intervals are closed, ie. they include timestamps [start, end]
|
||||
"""
|
||||
|
||||
# First implementation kept a sorted list of intervals and used
|
||||
# biesct() to optimize some operations, but this was too slow.
|
||||
|
||||
# Second version was based on the quicksect implementation from
|
||||
# python-bx, modified slightly to handle floating point intervals.
|
||||
# This didn't support deletion.
|
||||
|
||||
# Third version is more similar to the first version, using a rb-tree
|
||||
# instead of a simple sorted list to maintain O(log n) operations.
|
||||
|
||||
# Fourth version is an optimized rb-tree that stores interval starts
|
||||
# and ends directly in the tree, like bxinterval did.
|
||||
|
||||
# Fifth version is back to modified bxintersect...
|
||||
|
||||
import bxintersect
|
||||
from datetime import datetime
|
||||
import bisect
|
||||
|
||||
class IntervalError(Exception):
|
||||
"""Error due to interval overlap, etc"""
|
||||
pass
|
||||
|
||||
class Interval(object):
|
||||
"""Represents an interval of time."""
|
||||
"""Represents an interval of time"""
|
||||
|
||||
start = None
|
||||
end = None
|
||||
|
||||
def __init__(self, start, end):
|
||||
"""
|
||||
'start' and 'end' are arbitrary floats that represent time
|
||||
"""
|
||||
if start > end:
|
||||
raise IntervalError("start %s must precede end %s" % (start, end))
|
||||
self.start = float(start)
|
||||
self.end = float(end)
|
||||
self.start = start
|
||||
self.end = end
|
||||
|
||||
def __repr__(self):
|
||||
s = repr(self.start) + ", " + repr(self.end)
|
||||
return self.__class__.__name__ + "(" + s + ")"
|
||||
return "Interval(" + repr(self.start) + ", " + repr(self.end) + ")"
|
||||
|
||||
def __str__(self):
|
||||
return "[" + str(self.start) + " -> " + str(self.end) + "]"
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
"""Set attribute"""
|
||||
# TODO: If we need to manipulate file names, offsets, lengths, etc,
|
||||
# based on start and end time changing, maybe this is the right spot?
|
||||
# Or we could just disallow changing it here.
|
||||
if not isinstance(value, datetime):
|
||||
raise IntervalError("Must set datetime values")
|
||||
self.__dict__[name] = value
|
||||
if (type(self.start) is type(self.end)):
|
||||
if (self.start > self.end):
|
||||
raise IntervalError("Interval start must precede interval end")
|
||||
|
||||
def __cmp__(self, other):
|
||||
"""Compare two intervals. If non-equal, order by start then end"""
|
||||
if not isinstance(other, Interval):
|
||||
raise TypeError("bad type")
|
||||
if self.start == other.start:
|
||||
if self.end < other.end:
|
||||
raise TypeError("Can't compare to non-interval")
|
||||
if (self.start == other.start):
|
||||
if (self.end < other.end):
|
||||
return -1
|
||||
if self.end > other.end:
|
||||
if (self.end > other.end):
|
||||
return 1
|
||||
return 0
|
||||
if self.start < other.start:
|
||||
if (self.start < other.start):
|
||||
return -1
|
||||
return 1
|
||||
|
||||
|
||||
def intersects(self, other):
|
||||
"""Return True if two Interval objects intersect"""
|
||||
if (self.end <= other.start or self.start >= other.end):
|
||||
if (not isinstance(other, Interval)):
|
||||
raise TypeError("need Interval for intersection test")
|
||||
if (self.end <= other.start or
|
||||
self.start >= other.end):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_adjacent(self, other):
|
||||
"""Return True if two Intervals are adjacent (same end or start)"""
|
||||
if (not isinstance(other, Interval)):
|
||||
raise TypeError("need Interval for adjacency test")
|
||||
if (self.end == other.start or
|
||||
self.start == other.end):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def subset(self, start, end):
|
||||
"""Return a new Interval that is a subset of this one"""
|
||||
# A subclass that tracks additional data might override this.
|
||||
if start < self.start or end > self.end:
|
||||
# TODO: Any magic regarding file/offset/length mapping for subsets
|
||||
if (start < self.start or end > self.end):
|
||||
raise IntervalError("not a subset")
|
||||
return Interval(start, end)
|
||||
|
||||
class DBInterval(Interval):
|
||||
"""
|
||||
Like Interval, but also tracks corresponding start/end times and
|
||||
positions within the database. These are not currently modified
|
||||
when subsets are taken, but can be used later to help zero in on
|
||||
database positions.
|
||||
|
||||
The actual 'start' and 'end' will always fall within the database
|
||||
start and end, e.g.:
|
||||
db_start = 100, db_startpos = 10000
|
||||
start = 123
|
||||
end = 150
|
||||
db_end = 200, db_endpos = 20000
|
||||
"""
|
||||
|
||||
def __init__(self, start, end,
|
||||
db_start, db_end,
|
||||
db_startpos, db_endpos):
|
||||
"""
|
||||
'db_start' and 'db_end' are arbitrary floats that represent
|
||||
time. They must be a strict superset of the time interval
|
||||
covered by 'start' and 'end'. The 'db_startpos' and
|
||||
'db_endpos' are arbitrary database position indicators that
|
||||
correspond to those points.
|
||||
"""
|
||||
Interval.__init__(self, start, end)
|
||||
self.db_start = db_start
|
||||
self.db_end = db_end
|
||||
self.db_startpos = db_startpos
|
||||
self.db_endpos = db_endpos
|
||||
if db_start > start or db_end < end:
|
||||
raise IntervalError("database times must span the interval times")
|
||||
|
||||
def __repr__(self):
|
||||
s = repr(self.start) + ", " + repr(self.end)
|
||||
s += ", " + repr(self.db_start) + ", " + repr(self.db_end)
|
||||
s += ", " + repr(self.db_startpos) + ", " + repr(self.db_endpos)
|
||||
return self.__class__.__name__ + "(" + s + ")"
|
||||
|
||||
def subset(self, start, end):
|
||||
"""
|
||||
Return a new DBInterval that is a subset of this one
|
||||
"""
|
||||
if start < self.start or end > self.end:
|
||||
raise IntervalError("not a subset")
|
||||
return DBInterval(start, end,
|
||||
self.db_start, self.db_end,
|
||||
self.db_startpos, self.db_endpos)
|
||||
return Interval(start, end)
|
||||
|
||||
class IntervalSet(object):
|
||||
"""
|
||||
A non-intersecting set of intervals.
|
||||
"""
|
||||
"""A non-intersecting set of intervals
|
||||
|
||||
def __init__(self, source=None):
|
||||
"""
|
||||
'source' is an Interval or IntervalSet to add.
|
||||
"""
|
||||
self.tree = bxinterval.IntervalTree()
|
||||
if source is not None:
|
||||
self += source
|
||||
Kept sorted internally"""
|
||||
|
||||
def __init__(self, iterable=None):
|
||||
self.data = []
|
||||
if iterable is not None:
|
||||
if isinstance(iterable, Interval):
|
||||
iterable = [iterable]
|
||||
self._add_intervals(iterable)
|
||||
|
||||
def __iter__(self):
|
||||
for node in self.tree:
|
||||
if node.obj:
|
||||
yield node.obj
|
||||
|
||||
def __len__(self):
|
||||
return sum(1 for x in self)
|
||||
return self.data.__iter__()
|
||||
|
||||
def __repr__(self):
|
||||
descs = [ repr(x) for x in self ]
|
||||
return self.__class__.__name__ + "([" + ", ".join(descs) + "])"
|
||||
return "IntervalSet(" + repr(list(self.data)) + ")"
|
||||
|
||||
def __str__(self):
|
||||
descs = [ str(x) for x in self ]
|
||||
return "[" + ", ".join(descs) + "]"
|
||||
def __cmp__(self, other):
|
||||
# compare isn't supported, they don't really have an ordering
|
||||
raise TypeError("can't compare IntervalSets with cmp()")
|
||||
|
||||
def __eq__(self, other):
|
||||
# This isn't particularly efficient, but it shouldn't get used in the
|
||||
# general case.
|
||||
"""Test equality of two IntervalSets.
|
||||
|
||||
Treats adjacent Intervals as equivalent to one long interval,
|
||||
so this function really tests whether the IntervalSets cover
|
||||
the same spans of time."""
|
||||
if not isinstance(other, IntervalSet):
|
||||
return False
|
||||
i = 0
|
||||
j = 0
|
||||
outside = True
|
||||
|
||||
def is_adjacent(a, b):
|
||||
"""Return True if two Intervals are adjacent (same end or start)"""
|
||||
if a.end == b.start or b.end == a.start:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
this = [ x for x in self ]
|
||||
that = [ x for x in other ]
|
||||
|
||||
try:
|
||||
while True:
|
||||
if (outside):
|
||||
# To match, we need to be finished both sets
|
||||
if (i >= len(this) and j >= len(that)):
|
||||
# To match, we need to be finished this set
|
||||
if (i >= len(self) and j >= len(other)):
|
||||
return True
|
||||
# Or the starts need to match
|
||||
if (this[i].start != that[j].start):
|
||||
if (self[i].start != other[j].start):
|
||||
return False
|
||||
outside = False
|
||||
else:
|
||||
# We can move on if the two interval ends match
|
||||
if (this[i].end == that[j].end):
|
||||
if (self[i].end == other[j].end):
|
||||
i += 1
|
||||
j += 1
|
||||
outside = True
|
||||
else:
|
||||
# Whichever ends first needs to be adjacent to the next
|
||||
if (this[i].end < that[j].end):
|
||||
if (not is_adjacent(this[i],this[i+1])):
|
||||
if (self[i].end < other[j].end):
|
||||
if (not self[i].is_adjacent(self[i+1])):
|
||||
return False
|
||||
i += 1
|
||||
else:
|
||||
if (not is_adjacent(that[j],that[j+1])):
|
||||
if (not other[j].is_adjacent(other[j+1])):
|
||||
return False
|
||||
j += 1
|
||||
except IndexError:
|
||||
@@ -204,90 +143,63 @@ class IntervalSet(object):
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.data.__getitem__(key)
|
||||
|
||||
def __iadd__(self, other):
|
||||
"""Inplace add -- modifies self
|
||||
|
||||
This throws an exception if the regions being added intersect."""
|
||||
if isinstance(other, Interval):
|
||||
if self.intersects(other):
|
||||
raise IntervalError("Tried to add overlapping interval "
|
||||
"to this set")
|
||||
self.tree.insert_interval(other)
|
||||
else:
|
||||
for x in other:
|
||||
self.__iadd__(x)
|
||||
other = [other]
|
||||
self._add_intervals(other)
|
||||
return self
|
||||
|
||||
def __isub__(self, other):
|
||||
"""Inplace subtract -- modifies self
|
||||
|
||||
Removes an interval from the set. Must exist exactly
|
||||
as provided -- cannot remove a subset of an existing interval."""
|
||||
i = self.tree.find(other.start, other.end)
|
||||
if i is None:
|
||||
raise IntervalError("interval " + str(other) + " not in tree")
|
||||
self.tree.delete(i)
|
||||
return self
|
||||
|
||||
|
||||
def __add__(self, other):
|
||||
"""Add -- returns a new object"""
|
||||
"""Add -- returns a new object
|
||||
|
||||
This throws an exception if the regions being added intersect."""
|
||||
new = IntervalSet(self)
|
||||
new += IntervalSet(other)
|
||||
return new
|
||||
|
||||
def __and__(self, other):
|
||||
"""
|
||||
Compute a new IntervalSet from the intersection of two others
|
||||
"""Compute a new IntervalSet from the intersection of two others
|
||||
|
||||
Output intervals are built as subsets of the intervals in the
|
||||
first argument (self).
|
||||
"""
|
||||
first argument (self)."""
|
||||
# If we were given a set, intersect with each interval in that set
|
||||
if isinstance(other, IntervalSet):
|
||||
out = IntervalSet()
|
||||
for interval in other.data:
|
||||
out += self & interval
|
||||
return out
|
||||
|
||||
if not isinstance(other, Interval):
|
||||
raise TypeError("can't intersect with that type")
|
||||
|
||||
out = IntervalSet()
|
||||
|
||||
if not isinstance(other, IntervalSet):
|
||||
for i in self.intersection(other):
|
||||
out.tree.insert(rbtree.RBNode(i))
|
||||
else:
|
||||
for x in other:
|
||||
for i in self.intersection(x):
|
||||
out.tree.insert(rbtree.RBNode(i))
|
||||
|
||||
for this in self.data:
|
||||
# If there's any overlap, add the overlapping region
|
||||
if (this.end > other.start and this.start < other.end):
|
||||
out += this.subset(max(this.start, other.start),
|
||||
min(this.end, other.end))
|
||||
return out
|
||||
|
||||
def _add_intervals(self, iterable):
|
||||
"""Add each Interval from an interable to this set"""
|
||||
for element in iter(iterable):
|
||||
self._add_single_interval(element)
|
||||
|
||||
def intersection(self, interval):
|
||||
"""
|
||||
Compute a sequence of intervals that correspond to the
|
||||
intersection between `self` and the provided interval.
|
||||
Returns a generator that yields each of these intervals
|
||||
in turn.
|
||||
|
||||
Output intervals are built as subsets of the intervals in the
|
||||
first argument (self).
|
||||
"""
|
||||
if not isinstance(interval, Interval):
|
||||
raise TypeError("bad type")
|
||||
for n in self.tree.intersect(interval.start, interval.end):
|
||||
i = n.obj
|
||||
if i:
|
||||
if i.start >= interval.start and i.end <= interval.end:
|
||||
yield i
|
||||
elif i.start > interval.end:
|
||||
break
|
||||
else:
|
||||
subset = i.subset(max(i.start, interval.start),
|
||||
min(i.end, interval.end))
|
||||
yield subset
|
||||
|
||||
def intersects(self, other):
|
||||
### PROBABLY WRONG
|
||||
"""Return True if this IntervalSet intersects another interval"""
|
||||
node = self.tree.find_left(other.start, other.end)
|
||||
if node is None:
|
||||
return False
|
||||
for n in self.tree.inorder(node):
|
||||
if n.obj:
|
||||
if n.obj.intersects(other):
|
||||
return True
|
||||
if n.obj > other:
|
||||
break
|
||||
return False
|
||||
def _add_single_interval(self, interval):
|
||||
"""Add one Interval to this set"""
|
||||
if (not isinstance(interval, Interval)):
|
||||
raise TypeError("can only add Intervals")
|
||||
for existing in self.data:
|
||||
if existing.intersects(interval):
|
||||
raise IntervalError("Tried to add overlapping interval "
|
||||
"to this set")
|
||||
bisect.insort(self.data, interval)
|
||||
|
@@ -1,72 +0,0 @@
|
||||
import Queue
|
||||
import threading
|
||||
import sys
|
||||
|
||||
# This file provides a class that will convert a function that
|
||||
# takes a callback into a generator that returns an iterator.
|
||||
|
||||
# Based partially on http://stackoverflow.com/questions/9968592/
|
||||
|
||||
class IteratorizerThread(threading.Thread):
|
||||
def __init__(self, queue, function):
|
||||
"""
|
||||
function: function to execute, which takes the
|
||||
callback (provided by this class) as an argument
|
||||
"""
|
||||
threading.Thread.__init__(self)
|
||||
self.function = function
|
||||
self.queue = queue
|
||||
self.die = False
|
||||
|
||||
def callback(self, data):
|
||||
if self.die:
|
||||
raise Exception("should die")
|
||||
self.queue.put((1, data))
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
result = self.function(self.callback)
|
||||
except:
|
||||
if sys is not None: # can be None during unclean shutdown
|
||||
self.queue.put((2, sys.exc_info()))
|
||||
else:
|
||||
self.queue.put((0, result))
|
||||
|
||||
class Iteratorizer(object):
|
||||
def __init__(self, function):
|
||||
"""
|
||||
function: function to execute, which takes the
|
||||
callback (provided by this class) as an argument
|
||||
"""
|
||||
self.function = function
|
||||
self.queue = Queue.Queue(maxsize = 1)
|
||||
self.thread = IteratorizerThread(self.queue, self.function)
|
||||
self.thread.daemon = True
|
||||
self.thread.start()
|
||||
|
||||
def __del__(self):
|
||||
# If we get garbage collected, try to get rid of the
|
||||
# thread too by asking it to raise an exception, then
|
||||
# draining the queue until it's gone.
|
||||
self.thread.die = True
|
||||
while self.thread.isAlive():
|
||||
try:
|
||||
self.queue.get(True, 0.01)
|
||||
except: # pragma: no cover
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
(typ, data) = self.queue.get()
|
||||
if typ == 0:
|
||||
# function returned
|
||||
self.retval = data
|
||||
raise StopIteration
|
||||
elif typ == 1:
|
||||
# data available
|
||||
return data
|
||||
else:
|
||||
# exception
|
||||
raise data[0], data[1], data[2]
|
@@ -1,219 +0,0 @@
|
||||
# cython: profile=False
|
||||
|
||||
import tables
|
||||
import time
|
||||
import sys
|
||||
import inspect
|
||||
import cStringIO
|
||||
import numpy as np
|
||||
|
||||
cdef enum:
|
||||
max_value_count = 64
|
||||
|
||||
cimport cython
|
||||
cimport libc.stdlib
|
||||
cimport libc.stdio
|
||||
cimport libc.string
|
||||
|
||||
class ParserError(Exception):
|
||||
def __init__(self, line, message):
|
||||
self.message = "line " + str(line) + ": " + message
|
||||
Exception.__init__(self, self.message)
|
||||
|
||||
class FormatterError(Exception):
|
||||
pass
|
||||
|
||||
class Layout:
|
||||
"""Represents a NILM database layout"""
|
||||
|
||||
def __init__(self, typestring):
|
||||
"""Initialize this Layout object to handle the specified
|
||||
type string"""
|
||||
try:
|
||||
[ datatype, count ] = typestring.split("_")
|
||||
except:
|
||||
raise KeyError("invalid layout string")
|
||||
|
||||
try:
|
||||
self.count = int(count)
|
||||
except ValueError:
|
||||
raise KeyError("invalid count")
|
||||
if self.count < 1 or self.count > max_value_count:
|
||||
raise KeyError("invalid count")
|
||||
|
||||
if datatype == 'uint16':
|
||||
self.parse = self.parse_uint16
|
||||
self.format = self.format_uint16
|
||||
elif datatype == 'float32' or datatype == 'float64':
|
||||
self.parse = self.parse_float64
|
||||
self.format = self.format_float64
|
||||
else:
|
||||
raise KeyError("invalid type")
|
||||
|
||||
self.datatype = datatype
|
||||
|
||||
# Parsers
|
||||
def parse_float64(self, char *text):
|
||||
cdef int n
|
||||
cdef double ts
|
||||
# Return doubles even in float32 case, since they're going into
|
||||
# a Python array which would upconvert to double anyway.
|
||||
result = []
|
||||
cdef char *end
|
||||
ts = libc.stdlib.strtod(text, &end)
|
||||
if end == text:
|
||||
raise ValueError("bad timestamp")
|
||||
result.append(ts)
|
||||
for n in range(self.count):
|
||||
text = end
|
||||
result.append(libc.stdlib.strtod(text, &end))
|
||||
if end == text:
|
||||
raise ValueError("wrong number of values")
|
||||
n = 0
|
||||
while end[n] == ' ':
|
||||
n += 1
|
||||
if end[n] != '\n' and end[n] != '#' and end[n] != '\0':
|
||||
raise ValueError("extra data on line")
|
||||
return (ts, result)
|
||||
|
||||
def parse_uint16(self, char *text):
|
||||
cdef int n
|
||||
cdef double ts
|
||||
cdef int v
|
||||
result = []
|
||||
cdef char *end
|
||||
ts = libc.stdlib.strtod(text, &end)
|
||||
if end == text:
|
||||
raise ValueError("bad timestamp")
|
||||
result.append(ts)
|
||||
for n in range(self.count):
|
||||
text = end
|
||||
v = libc.stdlib.strtol(text, &end, 10)
|
||||
if v < 0 or v > 65535:
|
||||
raise ValueError("value out of range")
|
||||
result.append(v)
|
||||
if end == text:
|
||||
raise ValueError("wrong number of values")
|
||||
n = 0
|
||||
while end[n] == ' ':
|
||||
n += 1
|
||||
if end[n] != '\n' and end[n] != '#' and end[n] != '\0':
|
||||
raise ValueError("extra data on line")
|
||||
return (ts, result)
|
||||
|
||||
# Formatters
|
||||
def format_float64(self, d):
|
||||
n = len(d) - 1
|
||||
if n != self.count:
|
||||
raise ValueError("wrong number of values for layout type: "
|
||||
"got %d, wanted %d" % (n, self.count))
|
||||
s = "%.6f" % d[0]
|
||||
for i in range(n):
|
||||
s += " %f" % d[i+1]
|
||||
return s + "\n"
|
||||
|
||||
def format_uint16(self, d):
|
||||
n = len(d) - 1
|
||||
if n != self.count:
|
||||
raise ValueError("wrong number of values for layout type: "
|
||||
"got %d, wanted %d" % (n, self.count))
|
||||
s = "%.6f" % d[0]
|
||||
for i in range(n):
|
||||
s += " %d" % d[i+1]
|
||||
return s + "\n"
|
||||
|
||||
# PyTables description
|
||||
def description(self):
|
||||
"""Return the PyTables description of this layout"""
|
||||
desc = {}
|
||||
desc['timestamp'] = tables.Col.from_type('float64', pos=0)
|
||||
for n in range(self.count):
|
||||
desc['c' + str(n+1)] = tables.Col.from_type(self.datatype, pos=n+1)
|
||||
return tables.Description(desc)
|
||||
|
||||
# Get a layout by name
|
||||
def get_named(typestring):
|
||||
try:
|
||||
return Layout(typestring)
|
||||
except KeyError:
|
||||
compat = { "PrepData": "float32_8",
|
||||
"RawData": "uint16_6",
|
||||
"RawNotchedData": "uint16_9" }
|
||||
return Layout(compat[typestring])
|
||||
|
||||
class Parser(object):
|
||||
"""Object that parses and stores ASCII data for inclusion into the
|
||||
database"""
|
||||
|
||||
def __init__(self, layout):
|
||||
if issubclass(layout.__class__, Layout):
|
||||
self.layout = layout
|
||||
else:
|
||||
try:
|
||||
self.layout = get_named(layout)
|
||||
except KeyError:
|
||||
raise TypeError("unknown layout")
|
||||
|
||||
self.data = []
|
||||
self.min_timestamp = None
|
||||
self.max_timestamp = None
|
||||
|
||||
def parse(self, textdata):
|
||||
"""
|
||||
Parse the data, provided as lines of text, using the current
|
||||
layout, into an internal data structure suitable for a
|
||||
pytables 'table.append(parser.data)'.
|
||||
"""
|
||||
cdef double last_ts = 0, ts
|
||||
cdef int n = 0, i
|
||||
cdef char *line
|
||||
|
||||
indata = cStringIO.StringIO(textdata)
|
||||
# Assume any parsing error is a real error.
|
||||
# In the future we might want to skip completely empty lines,
|
||||
# or partial lines right before EOF?
|
||||
try:
|
||||
self.data = []
|
||||
for pyline in indata:
|
||||
line = pyline
|
||||
n += 1
|
||||
if line[0] == '\#':
|
||||
continue
|
||||
(ts, row) = self.layout.parse(line)
|
||||
if ts < last_ts:
|
||||
raise ValueError("timestamp is not "
|
||||
"monotonically increasing")
|
||||
last_ts = ts
|
||||
self.data.append(row)
|
||||
except (ValueError, IndexError, TypeError) as e:
|
||||
raise ParserError(n, "error: " + e.message)
|
||||
|
||||
# Mark timestamp ranges
|
||||
if len(self.data):
|
||||
self.min_timestamp = self.data[0][0]
|
||||
self.max_timestamp = self.data[-1][0]
|
||||
|
||||
class Formatter(object):
|
||||
"""Object that formats database data into ASCII"""
|
||||
|
||||
def __init__(self, layout):
|
||||
if issubclass(layout.__class__, Layout):
|
||||
self.layout = layout
|
||||
else:
|
||||
try:
|
||||
self.layout = get_named(layout)
|
||||
except KeyError:
|
||||
raise TypeError("unknown layout")
|
||||
|
||||
def format(self, data):
|
||||
"""
|
||||
Format raw data from the database, using the current layout,
|
||||
as lines of ACSII text.
|
||||
"""
|
||||
text = cStringIO.StringIO()
|
||||
try:
|
||||
for row in data:
|
||||
text.write(self.layout.format(row))
|
||||
except (ValueError, IndexError, TypeError) as e:
|
||||
raise FormatterError("formatting error: " + e.message)
|
||||
return text.getvalue()
|
496
nilmdb/nilmdb.py
496
nilmdb/nilmdb.py
@@ -1,496 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""NilmDB
|
||||
|
||||
Object that represents a NILM database file.
|
||||
|
||||
Manages both the SQL database and the PyTables storage backend.
|
||||
"""
|
||||
|
||||
# Need absolute_import so that "import nilmdb" won't pull in nilmdb.py,
|
||||
# but will pull the nilmdb module instead.
|
||||
from __future__ import absolute_import
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
|
||||
import sqlite3
|
||||
import tables
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
import errno
|
||||
import bisect
|
||||
|
||||
import pyximport
|
||||
pyximport.install()
|
||||
from nilmdb.interval import Interval, DBInterval, IntervalSet, IntervalError
|
||||
|
||||
# Note about performance and transactions:
|
||||
#
|
||||
# Committing a transaction in the default sync mode (PRAGMA synchronous=FULL)
|
||||
# takes about 125msec. sqlite3 will commit transactions at 3 times:
|
||||
# 1: explicit con.commit()
|
||||
# 2: between a series of DML commands and non-DML commands, e.g.
|
||||
# after a series of INSERT, SELECT, but before a CREATE TABLE or PRAGMA.
|
||||
# 3: at the end of an explicit transaction, e.g. "with self.con as con:"
|
||||
#
|
||||
# To speed up testing, or if this transaction speed becomes an issue,
|
||||
# the sync=False option to NilmDB.__init__ will set PRAGMA synchronous=OFF.
|
||||
|
||||
|
||||
# Don't touch old entries -- just add new ones.
|
||||
_sql_schema_updates = {
|
||||
0: """
|
||||
-- All streams
|
||||
CREATE TABLE streams(
|
||||
id INTEGER PRIMARY KEY, -- stream ID
|
||||
path TEXT UNIQUE NOT NULL, -- path, e.g. '/newton/prep'
|
||||
layout TEXT NOT NULL -- layout name, e.g. float32_8
|
||||
);
|
||||
|
||||
-- Individual timestamped ranges in those streams.
|
||||
-- For a given start_time and end_time, this tells us that the
|
||||
-- data is stored between start_pos and end_pos.
|
||||
-- Times are stored as μs since Unix epoch
|
||||
-- Positions are opaque: PyTables rows, file offsets, etc.
|
||||
--
|
||||
-- Note: end_pos points to the row _after_ end_time, so end_pos-1
|
||||
-- is the last valid row.
|
||||
CREATE TABLE ranges(
|
||||
stream_id INTEGER NOT NULL,
|
||||
start_time INTEGER NOT NULL,
|
||||
end_time INTEGER NOT NULL,
|
||||
start_pos INTEGER NOT NULL,
|
||||
end_pos INTEGER NOT NULL
|
||||
);
|
||||
CREATE INDEX _ranges_index ON ranges (stream_id, start_time, end_time);
|
||||
""",
|
||||
|
||||
1: """
|
||||
-- Generic dictionary-type metadata that can be associated with a stream
|
||||
CREATE TABLE metadata(
|
||||
stream_id INTEGER NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT
|
||||
);
|
||||
""",
|
||||
}
|
||||
|
||||
class NilmDBError(Exception):
|
||||
"""Base exception for NilmDB errors"""
|
||||
def __init__(self, message = "Unspecified error"):
|
||||
Exception.__init__(self, self.__class__.__name__ + ": " + message)
|
||||
|
||||
class StreamError(NilmDBError):
|
||||
pass
|
||||
|
||||
class OverlapError(NilmDBError):
|
||||
pass
|
||||
|
||||
# Helper that lets us pass a Pytables table into bisect
|
||||
class BisectableTable(object):
|
||||
def __init__(self, table):
|
||||
self.table = table
|
||||
def __getitem__(self, index):
|
||||
return self.table[index][0]
|
||||
|
||||
class NilmDB(object):
|
||||
verbose = 0
|
||||
|
||||
def __init__(self, basepath, sync=True, max_results=None):
|
||||
# set up path
|
||||
self.basepath = os.path.abspath(basepath.rstrip('/'))
|
||||
|
||||
# Create the database path if it doesn't exist
|
||||
try:
|
||||
os.makedirs(self.basepath)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise IOError("can't create tree " + self.basepath)
|
||||
|
||||
# Our HD5 file goes inside it
|
||||
h5filename = os.path.abspath(self.basepath + "/data.h5")
|
||||
self.h5file = tables.openFile(h5filename, "a", "NILM Database")
|
||||
|
||||
# SQLite database too
|
||||
sqlfilename = os.path.abspath(self.basepath + "/data.sql")
|
||||
# We use check_same_thread = False, assuming that the rest
|
||||
# of the code (e.g. Server) will be smart and not access this
|
||||
# database from multiple threads simultaneously. That requirement
|
||||
# may be relaxed later.
|
||||
self.con = sqlite3.connect(sqlfilename, check_same_thread = False)
|
||||
self._sql_schema_update()
|
||||
|
||||
# See big comment at top about the performance implications of this
|
||||
if sync:
|
||||
self.con.execute("PRAGMA synchronous=FULL")
|
||||
else:
|
||||
self.con.execute("PRAGMA synchronous=OFF")
|
||||
|
||||
# Approximate largest number of elements that we want to send
|
||||
# in a single reply (for stream_intervals, stream_extract)
|
||||
if max_results:
|
||||
self.max_results = max_results
|
||||
else:
|
||||
self.max_results = 16384
|
||||
|
||||
self.opened = True
|
||||
|
||||
# Cached intervals
|
||||
self._cached_iset = {}
|
||||
|
||||
def __del__(self):
|
||||
if "opened" in self.__dict__: # pragma: no cover
|
||||
fprintf(sys.stderr,
|
||||
"error: NilmDB.close() wasn't called, path %s",
|
||||
self.basepath)
|
||||
|
||||
def get_basepath(self):
|
||||
return self.basepath
|
||||
|
||||
def close(self):
|
||||
if self.con:
|
||||
self.con.commit()
|
||||
self.con.close()
|
||||
self.h5file.close()
|
||||
del self.opened
|
||||
|
||||
def _sql_schema_update(self):
|
||||
cur = self.con.cursor()
|
||||
version = cur.execute("PRAGMA user_version").fetchone()[0]
|
||||
oldversion = version
|
||||
|
||||
while version in _sql_schema_updates:
|
||||
cur.executescript(_sql_schema_updates[version])
|
||||
version = version + 1
|
||||
if self.verbose: # pragma: no cover
|
||||
printf("Schema updated to %d\n", version)
|
||||
|
||||
if version != oldversion:
|
||||
with self.con:
|
||||
cur.execute("PRAGMA user_version = {v:d}".format(v=version))
|
||||
|
||||
def _get_intervals(self, stream_id):
|
||||
"""
|
||||
Return a mutable IntervalSet corresponding to the given stream ID.
|
||||
"""
|
||||
# Load from database if not cached
|
||||
if stream_id not in self._cached_iset:
|
||||
iset = IntervalSet()
|
||||
result = self.con.execute("SELECT start_time, end_time, "
|
||||
"start_pos, end_pos "
|
||||
"FROM ranges "
|
||||
"WHERE stream_id=?", (stream_id,))
|
||||
try:
|
||||
for (start_time, end_time, start_pos, end_pos) in result:
|
||||
iset += DBInterval(start_time, end_time,
|
||||
start_time, end_time,
|
||||
start_pos, end_pos)
|
||||
except IntervalError as e: # pragma: no cover
|
||||
raise NilmDBError("unexpected overlap in ranges table!")
|
||||
self._cached_iset[stream_id] = iset
|
||||
# Return cached value
|
||||
return self._cached_iset[stream_id]
|
||||
|
||||
# TODO: Split add_interval into two pieces, one to add
|
||||
# and one to flush to disk?
|
||||
# Need to think about this. Basic problem is that we can't
|
||||
# mess with intervals once they're in the IntervalSet,
|
||||
# without mucking with bxinterval internals.
|
||||
|
||||
# Maybe add a separate optimization step?
|
||||
# Join intervals that have a fairly small gap between them
|
||||
|
||||
def _add_interval(self, stream_id, interval, start_pos, end_pos):
|
||||
"""
|
||||
Add interval to the internal interval cache, and to the database.
|
||||
Note: arguments must be ints (not numpy.int64, etc)
|
||||
"""
|
||||
# Ensure this stream's intervals are cached, and add the new
|
||||
# interval to that cache.
|
||||
iset = self._get_intervals(stream_id)
|
||||
try:
|
||||
iset += DBInterval(interval.start, interval.end,
|
||||
interval.start, interval.end,
|
||||
start_pos, end_pos)
|
||||
except IntervalError as e: # pragma: no cover
|
||||
raise NilmDBError("new interval overlaps existing data")
|
||||
|
||||
# Insert into the database
|
||||
self.con.execute("INSERT INTO ranges "
|
||||
"(stream_id,start_time,end_time,start_pos,end_pos) "
|
||||
"VALUES (?,?,?,?,?)",
|
||||
(stream_id, interval.start, interval.end,
|
||||
int(start_pos), int(end_pos)))
|
||||
self.con.commit()
|
||||
|
||||
def stream_list(self, path = None, layout = None):
|
||||
"""Return list of [path, layout] lists of all streams
|
||||
in the database.
|
||||
|
||||
If path is specified, include only streams with a path that
|
||||
matches the given string.
|
||||
|
||||
If layout is specified, include only streams with a layout
|
||||
that matches the given string.
|
||||
"""
|
||||
where = "WHERE 1=1"
|
||||
params = ()
|
||||
if layout:
|
||||
where += " AND layout=?"
|
||||
params += (layout,)
|
||||
if path:
|
||||
where += " AND path=?"
|
||||
params += (path,)
|
||||
result = self.con.execute("SELECT path, layout "
|
||||
"FROM streams " + where, params).fetchall()
|
||||
|
||||
return sorted(list(x) for x in result)
|
||||
|
||||
def stream_intervals(self, path, start = None, end = None):
|
||||
"""
|
||||
Returns (intervals, restart) tuple.
|
||||
|
||||
intervals is a list of [start,end] timestamps of all intervals
|
||||
that exist for path, between start and end.
|
||||
|
||||
restart, if nonzero, means that there were too many results to
|
||||
return in a single request. The data is complete from the
|
||||
starting timestamp to the point at which it was truncated,
|
||||
and a new request with a start time of 'restart' will fetch
|
||||
the next block of data.
|
||||
"""
|
||||
stream_id = self._stream_id(path)
|
||||
intervals = self._get_intervals(stream_id)
|
||||
requested = Interval(start or 0, end or 1e12)
|
||||
result = []
|
||||
for n, i in enumerate(intervals.intersection(requested)):
|
||||
if n >= self.max_results:
|
||||
restart = i.start
|
||||
break
|
||||
result.append([i.start, i.end])
|
||||
else:
|
||||
restart = 0
|
||||
return (result, restart)
|
||||
|
||||
def stream_create(self, path, layout_name):
|
||||
"""Create a new table in the database.
|
||||
|
||||
path: path to the data (e.g. '/newton/prep').
|
||||
Paths must contain at least two elements, e.g.:
|
||||
/newton/prep
|
||||
/newton/raw
|
||||
/newton/upstairs/prep
|
||||
/newton/upstairs/raw
|
||||
|
||||
layout_name: string for nilmdb.layout.get_named(), e.g. 'float32_8'
|
||||
"""
|
||||
if path[0] != '/':
|
||||
raise ValueError("paths must start with /")
|
||||
[ group, node ] = path.rsplit("/", 1)
|
||||
if group == '':
|
||||
raise ValueError("invalid path")
|
||||
|
||||
# Make the group structure, one element at a time
|
||||
group_path = group.lstrip('/').split("/")
|
||||
for i in range(len(group_path)):
|
||||
parent = "/" + "/".join(group_path[0:i])
|
||||
child = group_path[i]
|
||||
try:
|
||||
self.h5file.createGroup(parent, child)
|
||||
except tables.NodeError:
|
||||
pass
|
||||
|
||||
# Get description
|
||||
try:
|
||||
desc = nilmdb.layout.get_named(layout_name).description()
|
||||
except KeyError:
|
||||
raise ValueError("no such layout")
|
||||
|
||||
# Estimated table size (for PyTables optimization purposes): assume
|
||||
# 3 months worth of data at 8 KHz. It's OK if this is wrong.
|
||||
exp_rows = 8000 * 60*60*24*30*3
|
||||
|
||||
# Create the table
|
||||
table = self.h5file.createTable(group, node,
|
||||
description = desc,
|
||||
expectedrows = exp_rows)
|
||||
|
||||
# Insert into SQL database once the PyTables is happy
|
||||
with self.con as con:
|
||||
con.execute("INSERT INTO streams (path, layout) VALUES (?,?)",
|
||||
(path, layout_name))
|
||||
|
||||
def _stream_id(self, path):
|
||||
"""Return unique stream ID"""
|
||||
result = self.con.execute("SELECT id FROM streams WHERE path=?",
|
||||
(path,)).fetchone()
|
||||
if result is None:
|
||||
raise StreamError("No stream at path " + path)
|
||||
return result[0]
|
||||
|
||||
def stream_set_metadata(self, path, data):
|
||||
"""Set stream metadata from a dictionary, e.g.
|
||||
{ description = 'Downstairs lighting',
|
||||
v_scaling = 123.45 }
|
||||
This replaces all existing metadata.
|
||||
"""
|
||||
stream_id = self._stream_id(path)
|
||||
with self.con as con:
|
||||
con.execute("DELETE FROM metadata "
|
||||
"WHERE stream_id=?", (stream_id,))
|
||||
for key in data:
|
||||
if data[key] != '':
|
||||
con.execute("INSERT INTO metadata VALUES (?, ?, ?)",
|
||||
(stream_id, key, data[key]))
|
||||
|
||||
def stream_get_metadata(self, path):
|
||||
"""Return stream metadata as a dictionary."""
|
||||
stream_id = self._stream_id(path)
|
||||
result = self.con.execute("SELECT metadata.key, metadata.value "
|
||||
"FROM metadata "
|
||||
"WHERE metadata.stream_id=?", (stream_id,))
|
||||
data = {}
|
||||
for (key, value) in result:
|
||||
data[key] = value
|
||||
return data
|
||||
|
||||
def stream_update_metadata(self, path, newdata):
|
||||
"""Update stream metadata from a dictionary"""
|
||||
data = self.stream_get_metadata(path)
|
||||
data.update(newdata)
|
||||
self.stream_set_metadata(path, data)
|
||||
|
||||
def stream_insert(self, path, parser, old_timestamp = None):
|
||||
"""Insert new data into the database.
|
||||
path: Path at which to add the data
|
||||
parser: nilmdb.layout.Parser instance full of data to insert
|
||||
"""
|
||||
if (not parser.min_timestamp or not parser.max_timestamp or
|
||||
not len(parser.data)):
|
||||
raise StreamError("no data provided")
|
||||
|
||||
# If we were provided with an old timestamp, the expectation
|
||||
# is that the client has a contiguous block of time it is sending,
|
||||
# but it's doing it over multiple calls to stream_insert.
|
||||
# old_timestamp is the max_timestamp of the previous insert.
|
||||
# To make things continuous, use that as our starting timestamp
|
||||
# instead of what the parser found.
|
||||
if old_timestamp:
|
||||
min_timestamp = old_timestamp
|
||||
else:
|
||||
min_timestamp = parser.min_timestamp
|
||||
|
||||
# First check for basic overlap using timestamp info given.
|
||||
stream_id = self._stream_id(path)
|
||||
iset = self._get_intervals(stream_id)
|
||||
interval = Interval(min_timestamp, parser.max_timestamp)
|
||||
if iset.intersects(interval):
|
||||
raise OverlapError("new data overlaps existing data: "
|
||||
+ str(iset & interval))
|
||||
|
||||
# Insert the data into pytables
|
||||
table = self.h5file.getNode(path)
|
||||
row_start = table.nrows
|
||||
table.append(parser.data)
|
||||
row_end = table.nrows
|
||||
table.flush()
|
||||
|
||||
# Insert the record into the sql database.
|
||||
# Casts are to convert from numpy.int64.
|
||||
self._add_interval(stream_id, interval, int(row_start), int(row_end))
|
||||
|
||||
# And that's all
|
||||
return "ok"
|
||||
|
||||
def _find_start(self, table, interval):
|
||||
"""
|
||||
Given a DBInterval, find the row in the database that
|
||||
corresponds to the start time. Return the first database
|
||||
position with a timestamp (first element) greater than or
|
||||
equal to 'start'.
|
||||
"""
|
||||
# Optimization for the common case where an interval wasn't truncated
|
||||
if interval.start == interval.db_start:
|
||||
return interval.db_startpos
|
||||
return bisect.bisect_left(BisectableTable(table),
|
||||
interval.start,
|
||||
interval.db_startpos,
|
||||
interval.db_endpos)
|
||||
|
||||
def _find_end(self, table, interval):
|
||||
"""
|
||||
Given a DBInterval, find the row in the database that follows
|
||||
the end time. Return the first database position after the
|
||||
row with timestamp (first element) greater than or equal
|
||||
to 'end'.
|
||||
"""
|
||||
# Optimization for the common case where an interval wasn't truncated
|
||||
if interval.end == interval.db_end:
|
||||
return interval.db_endpos
|
||||
# Note that we still use bisect_left here, because we don't
|
||||
# want to include the given timestamp in the results. This is
|
||||
# so a queries like 1:00 -> 2:00 and 2:00 -> 3:00 return
|
||||
# non-overlapping data.
|
||||
return bisect.bisect_left(BisectableTable(table),
|
||||
interval.end,
|
||||
interval.db_startpos,
|
||||
interval.db_endpos)
|
||||
|
||||
def stream_extract(self, path, start = None, end = None, count = False):
|
||||
"""
|
||||
Returns (data, restart) tuple.
|
||||
|
||||
data is a list of raw data from the database, suitable for
|
||||
passing to e.g. nilmdb.layout.Formatter to translate into
|
||||
textual form.
|
||||
|
||||
restart, if nonzero, means that there were too many results to
|
||||
return in a single request. The data is complete from the
|
||||
starting timestamp to the point at which it was truncated,
|
||||
and a new request with a start time of 'restart' will fetch
|
||||
the next block of data.
|
||||
|
||||
count, if true, means to not return raw data, but just the count
|
||||
of rows that would have been returned. This is much faster
|
||||
than actually fetching the data. It is not limited by
|
||||
max_results.
|
||||
"""
|
||||
table = self.h5file.getNode(path)
|
||||
stream_id = self._stream_id(path)
|
||||
intervals = self._get_intervals(stream_id)
|
||||
requested = Interval(start or 0, end or 1e12)
|
||||
result = []
|
||||
matched = 0
|
||||
remaining = self.max_results
|
||||
restart = 0
|
||||
for interval in intervals.intersection(requested):
|
||||
# Reading single rows from the table is too slow, so
|
||||
# we use two bisections to find both the starting and
|
||||
# ending row for this particular interval, then
|
||||
# read the entire range as one slice.
|
||||
row_start = self._find_start(table, interval)
|
||||
row_end = self._find_end(table, interval)
|
||||
|
||||
if count:
|
||||
matched += row_end - row_start
|
||||
continue
|
||||
|
||||
# Shorten it if we'll hit the maximum number of results
|
||||
row_max = row_start + remaining
|
||||
if row_max < row_end:
|
||||
row_end = row_max
|
||||
restart = table[row_max][0]
|
||||
|
||||
# Gather these results up
|
||||
result.extend(table[row_start:row_end])
|
||||
|
||||
# Count them
|
||||
remaining -= row_end - row_start
|
||||
|
||||
if restart:
|
||||
break
|
||||
|
||||
if count:
|
||||
return matched
|
||||
return (result, restart)
|
@@ -1,9 +0,0 @@
|
||||
"""printf, fprintf, sprintf"""
|
||||
|
||||
from __future__ import print_function
|
||||
def printf(_str, *args):
|
||||
print(_str % args, end='')
|
||||
def fprintf(_file, _str, *args):
|
||||
print(_str % args, end='', file=_file)
|
||||
def sprintf(_str, *args):
|
||||
return (_str % args)
|
392
nilmdb/rbtree.py
392
nilmdb/rbtree.py
@@ -1,392 +0,0 @@
|
||||
"""Red-black tree, where keys are stored as start/end timestamps."""
|
||||
|
||||
import sys
|
||||
|
||||
class RBNode(object):
|
||||
"""One node of the Red/Black tree. obj points to any object,
|
||||
'start' and 'end' are timestamps that represent the key."""
|
||||
def __init__(self, obj = None, start = None, end = None):
|
||||
"""If given an object but no start/end times, get the
|
||||
start/end times from the object.
|
||||
|
||||
If given start/end times, obj can be anything, including None."""
|
||||
self.obj = obj
|
||||
if start is None:
|
||||
start = obj.start
|
||||
if end is None:
|
||||
end = obj.end
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.red = False
|
||||
self.left = None
|
||||
self.right = None
|
||||
|
||||
def __str__(self):
|
||||
if self.red:
|
||||
color = "R"
|
||||
else:
|
||||
color = "B"
|
||||
return ("[node "
|
||||
+ str(self.start) + " -> " + str(self.end) + " "
|
||||
+ color + "]")
|
||||
|
||||
class RBTree(object):
|
||||
"""Red/Black tree"""
|
||||
|
||||
# Init
|
||||
def __init__(self):
|
||||
self.nil = RBNode(start = sys.float_info.min,
|
||||
end = sys.float_info.min)
|
||||
self.nil.left = self.nil
|
||||
self.nil.right = self.nil
|
||||
self.nil.parent = self.nil
|
||||
self.nil.nil = True
|
||||
|
||||
self.root = RBNode(start = sys.float_info.max,
|
||||
end = sys.float_info.max)
|
||||
self.root.left = self.nil
|
||||
self.root.right = self.nil
|
||||
self.root.parent = self.nil
|
||||
|
||||
# Rotations and basic operations
|
||||
def __rotate_left(self, x):
|
||||
y = x.right
|
||||
x.right = y.left
|
||||
if y.left is not self.nil:
|
||||
y.left.parent = x
|
||||
y.parent = x.parent
|
||||
if x is x.parent.left:
|
||||
x.parent.left = y
|
||||
else:
|
||||
x.parent.right = y
|
||||
y.left = x
|
||||
x.parent = y
|
||||
|
||||
def __rotate_right(self, y):
|
||||
x = y.left
|
||||
y.left = x.right
|
||||
if x.right is not self.nil:
|
||||
x.right.parent = y
|
||||
x.parent = y.parent
|
||||
if y is y.parent.left:
|
||||
y.parent.left = x
|
||||
else:
|
||||
y.parent.right = x
|
||||
x.right = y
|
||||
y.parent = x
|
||||
|
||||
def __successor(self, x):
|
||||
"""Returns the successor of RBNode x"""
|
||||
y = x.right
|
||||
if y is not self.nil:
|
||||
while y.left is not self.nil:
|
||||
y = y.left
|
||||
else:
|
||||
y = x.parent
|
||||
while x is y.right:
|
||||
x = y
|
||||
y = y.parent
|
||||
if y is self.root:
|
||||
return self.nil
|
||||
return y
|
||||
|
||||
def _predecessor(self, x):
|
||||
"""Returns the predecessor of RBNode x"""
|
||||
y = x.left
|
||||
if y is not self.nil:
|
||||
while y.right is not self.nil:
|
||||
y = y.right
|
||||
else:
|
||||
y = x.parent
|
||||
while x is y.left:
|
||||
if y is self.root:
|
||||
y = self.nil
|
||||
break
|
||||
x = y
|
||||
y = y.parent
|
||||
return y
|
||||
|
||||
# Insertion
|
||||
def insert(self, z):
|
||||
"""Insert RBNode z into RBTree and rebalance as necessary"""
|
||||
z.left = self.nil
|
||||
z.right = self.nil
|
||||
y = self.root
|
||||
x = self.root.left
|
||||
while x is not self.nil:
|
||||
y = x
|
||||
if (x.start > z.start or (x.start == z.start and x.end > z.end)):
|
||||
x = x.left
|
||||
else:
|
||||
x = x.right
|
||||
z.parent = y
|
||||
if (y is self.root or
|
||||
(y.start > z.start or (y.start == z.start and y.end > z.end))):
|
||||
y.left = z
|
||||
else:
|
||||
y.right = z
|
||||
# relabel/rebalance
|
||||
self.__insert_fixup(z)
|
||||
|
||||
def __insert_fixup(self, x):
|
||||
"""Rebalance/fix RBTree after a simple insertion of RBNode x"""
|
||||
x.red = True
|
||||
while x.parent.red:
|
||||
if x.parent is x.parent.parent.left:
|
||||
y = x.parent.parent.right
|
||||
if y.red:
|
||||
x.parent.red = False
|
||||
y.red = False
|
||||
x.parent.parent.red = True
|
||||
x = x.parent.parent
|
||||
else:
|
||||
if x is x.parent.right:
|
||||
x = x.parent
|
||||
self.__rotate_left(x)
|
||||
x.parent.red = False
|
||||
x.parent.parent.red = True
|
||||
self.__rotate_right(x.parent.parent)
|
||||
else: # same as above, left/right switched
|
||||
y = x.parent.parent.left
|
||||
if y.red:
|
||||
x.parent.red = False
|
||||
y.red = False
|
||||
x.parent.parent.red = True
|
||||
x = x.parent.parent
|
||||
else:
|
||||
if x is x.parent.left:
|
||||
x = x.parent
|
||||
self.__rotate_right(x)
|
||||
x.parent.red = False
|
||||
x.parent.parent.red = True
|
||||
self.__rotate_left(x.parent.parent)
|
||||
self.root.left.red = False
|
||||
|
||||
# Deletion
|
||||
def delete(self, z):
|
||||
if z.left is None or z.right is None:
|
||||
raise AttributeError("you can only delete a node object "
|
||||
+ "from the tree; use find() to get one")
|
||||
if z.left is self.nil or z.right is self.nil:
|
||||
y = z
|
||||
else:
|
||||
y = self.__successor(z)
|
||||
if y.left is self.nil:
|
||||
x = y.right
|
||||
else:
|
||||
x = y.left
|
||||
x.parent = y.parent
|
||||
if x.parent is self.root:
|
||||
self.root.left = x
|
||||
else:
|
||||
if y is y.parent.left:
|
||||
y.parent.left = x
|
||||
else:
|
||||
y.parent.right = x
|
||||
if y is not z:
|
||||
# y is the node to splice out, x is its child
|
||||
y.left = z.left
|
||||
y.right = z.right
|
||||
y.parent = z.parent
|
||||
z.left.parent = y
|
||||
z.right.parent = y
|
||||
if z is z.parent.left:
|
||||
z.parent.left = y
|
||||
else:
|
||||
z.parent.right = y
|
||||
if not y.red:
|
||||
y.red = z.red
|
||||
self.__delete_fixup(x)
|
||||
else:
|
||||
y.red = z.red
|
||||
else:
|
||||
if not y.red:
|
||||
self.__delete_fixup(x)
|
||||
|
||||
def __delete_fixup(self, x):
|
||||
"""Rebalance/fix RBTree after a deletion. RBNode x is the
|
||||
child of the spliced out node."""
|
||||
rootLeft = self.root.left
|
||||
while not x.red and x is not rootLeft:
|
||||
if x is x.parent.left:
|
||||
w = x.parent.right
|
||||
if w.red:
|
||||
w.red = False
|
||||
x.parent.red = True
|
||||
self.__rotate_left(x.parent)
|
||||
w = x.parent.right
|
||||
if not w.right.red and not w.left.red:
|
||||
w.red = True
|
||||
x = x.parent
|
||||
else:
|
||||
if not w.right.red:
|
||||
w.left.red = False
|
||||
w.red = True
|
||||
self.__rotate_right(w)
|
||||
w = x.parent.right
|
||||
w.red = x.parent.red
|
||||
x.parent.red = False
|
||||
w.right.red = False
|
||||
self.__rotate_left(x.parent)
|
||||
x = rootLeft # exit loop
|
||||
else: # same as above, left/right switched
|
||||
w = x.parent.left
|
||||
if w.red:
|
||||
w.red = False
|
||||
x.parent.red = True
|
||||
self.__rotate_right(x.parent)
|
||||
w = x.parent.left
|
||||
if not w.left.red and not w.right.red:
|
||||
w.red = True
|
||||
x = x.parent
|
||||
else:
|
||||
if not w.left.red:
|
||||
w.right.red = False
|
||||
w.red = True
|
||||
self.__rotate_left(w)
|
||||
w = x.parent.left
|
||||
w.red = x.parent.red
|
||||
x.parent.red = False
|
||||
w.left.red = False
|
||||
self.__rotate_right(x.parent)
|
||||
x = rootLeft # exit loop
|
||||
x.red = False
|
||||
|
||||
# Rendering
|
||||
def __render_dot_node(self, node, max_depth = 20):
|
||||
from printf import sprintf
|
||||
"""Render a single node and its children into a dot graph fragment"""
|
||||
if max_depth == 0:
|
||||
return ""
|
||||
if node is self.nil:
|
||||
return ""
|
||||
def c(red):
|
||||
if red:
|
||||
return 'color="#ff0000", style=filled, fillcolor="#ffc0c0"'
|
||||
else:
|
||||
return 'color="#000000", style=filled, fillcolor="#c0c0c0"'
|
||||
s = sprintf("%d [label=\"%g\\n%g\", %s];\n",
|
||||
id(node),
|
||||
node.start, node.end,
|
||||
c(node.red))
|
||||
|
||||
if node.left is self.nil:
|
||||
s += sprintf("L%d [label=\"-\", %s];\n", id(node), c(False))
|
||||
s += sprintf("%d -> L%d [label=L];\n", id(node), id(node))
|
||||
else:
|
||||
s += sprintf("%d -> %d [label=L];\n", id(node), id(node.left))
|
||||
if node.right is self.nil:
|
||||
s += sprintf("R%d [label=\"-\", %s];\n", id(node), c(False))
|
||||
s += sprintf("%d -> R%d [label=R];\n", id(node), id(node))
|
||||
else:
|
||||
s += sprintf("%d -> %d [label=R];\n", id(node), id(node.right))
|
||||
s += self.__render_dot_node(node.left, max_depth-1)
|
||||
s += self.__render_dot_node(node.right, max_depth-1)
|
||||
return s
|
||||
|
||||
def render_dot(self, title = "RBTree"):
|
||||
"""Render the entire RBTree as a dot graph"""
|
||||
return ("digraph rbtree {\n"
|
||||
+ self.__render_dot_node(self.root.left)
|
||||
+ "}\n");
|
||||
|
||||
def render_dot_live(self, title = "RBTree"):
|
||||
"""Render the entire RBTree as a dot graph, live GTK view"""
|
||||
import gtk
|
||||
import gtk.gdk
|
||||
sys.path.append("/usr/share/xdot")
|
||||
import xdot
|
||||
xdot.Pen.highlighted = lambda pen: pen
|
||||
s = ("digraph rbtree {\n"
|
||||
+ self.__render_dot_node(self.root)
|
||||
+ "}\n");
|
||||
window = xdot.DotWindow()
|
||||
window.set_dotcode(s)
|
||||
window.set_title(title + " - any key to close")
|
||||
window.connect('destroy', gtk.main_quit)
|
||||
def quit(widget, event):
|
||||
if not event.is_modifier:
|
||||
window.destroy()
|
||||
gtk.main_quit()
|
||||
window.widget.connect('key-press-event', quit)
|
||||
gtk.main()
|
||||
|
||||
# Walking, searching
|
||||
def __iter__(self):
|
||||
return self.inorder(self.root.left)
|
||||
|
||||
def inorder(self, x = None):
|
||||
"""Generator that performs an inorder walk for the tree
|
||||
starting at RBNode x"""
|
||||
if x is None:
|
||||
x = self.root.left
|
||||
while x.left is not self.nil:
|
||||
x = x.left
|
||||
while x is not self.nil:
|
||||
yield x
|
||||
x = self.__successor(x)
|
||||
|
||||
def __find_all(self, start, end, x):
|
||||
"""Find node with the specified (start,end) key.
|
||||
Also returns the largest node less than or equal to key,
|
||||
and the smallest node greater or equal to than key."""
|
||||
if x is None:
|
||||
x = self.root.left
|
||||
largest = self.nil
|
||||
smallest = self.nil
|
||||
while x is not self.nil:
|
||||
if start < x.start:
|
||||
smallest = x
|
||||
x = x.left # start <
|
||||
elif start == x.start:
|
||||
if end < x.end:
|
||||
smallest = x
|
||||
x = x.left # start =, end <
|
||||
elif end == x.end: # found it
|
||||
smallest = x
|
||||
largest = x
|
||||
break
|
||||
else:
|
||||
largest = x
|
||||
x = x.right # start =, end >
|
||||
else:
|
||||
largest = x
|
||||
x = x.right # start >
|
||||
return (x, smallest, largest)
|
||||
|
||||
def find(self, start, end, x = None):
|
||||
"""Find node with the key == (start,end), or None"""
|
||||
y = self.__find_all(start, end, x)[1]
|
||||
return y if y is not self.nil else None
|
||||
|
||||
def find_right(self, start, end, x = None):
|
||||
"""Find node with the smallest key >= (start,end), or None"""
|
||||
y = self.__find_all(start, end, x)[1]
|
||||
return y if y is not self.nil else None
|
||||
|
||||
def find_left(self, start, end, x = None):
|
||||
"""Find node with the largest key <= (start,end), or None"""
|
||||
y = self.__find_all(start, end, x)[2]
|
||||
return y if y is not self.nil else None
|
||||
|
||||
# Intersections
|
||||
def intersect(self, start, end):
|
||||
"""Generator that returns nodes that overlap the given
|
||||
(start,end) range, for the tree rooted at RBNode x.
|
||||
|
||||
NOTE: this assumes non-overlapping intervals."""
|
||||
# Start with the leftmost node before the starting point
|
||||
n = self.find_left(start, start)
|
||||
# If we didn't find one, look for the leftmode node before the
|
||||
# ending point instead.
|
||||
if n is None:
|
||||
n = self.find_left(end, end)
|
||||
# If we still didn't find it, there are no intervals that intersect.
|
||||
if n is None:
|
||||
return none
|
||||
|
||||
# Now yield this node and all successors until their endpoints
|
||||
|
||||
if False:
|
||||
yield
|
||||
return
|
@@ -1,69 +0,0 @@
|
||||
import Queue
|
||||
import threading
|
||||
import sys
|
||||
|
||||
# This file provides a class that will wrap an object and serialize
|
||||
# all calls to its methods. All calls to that object will be queued
|
||||
# and executed from a single thread, regardless of which thread makes
|
||||
# the call.
|
||||
|
||||
# Based partially on http://stackoverflow.com/questions/2642515/
|
||||
|
||||
class SerializerThread(threading.Thread):
|
||||
"""Thread that retrieves call information from the queue, makes the
|
||||
call, and returns the results."""
|
||||
def __init__(self, call_queue):
|
||||
threading.Thread.__init__(self)
|
||||
self.call_queue = call_queue
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
result_queue, func, args, kwargs = self.call_queue.get()
|
||||
# Terminate if result_queue is None
|
||||
if result_queue is None:
|
||||
return
|
||||
try:
|
||||
result = func(*args, **kwargs) # wrapped
|
||||
except:
|
||||
result_queue.put((sys.exc_info(), None))
|
||||
else:
|
||||
result_queue.put((None, result))
|
||||
|
||||
class WrapCall(object):
|
||||
"""Wrap a callable using the given queues"""
|
||||
|
||||
def __init__(self, call_queue, result_queue, func):
|
||||
self.call_queue = call_queue
|
||||
self.result_queue = result_queue
|
||||
self.func = func
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
self.call_queue.put((self.result_queue, self.func, args, kwargs))
|
||||
( exc_info, result ) = self.result_queue.get()
|
||||
if exc_info is None:
|
||||
return result
|
||||
else:
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
|
||||
class WrapObject(object):
|
||||
"""Wrap all calls to methods in a target object with WrapCall"""
|
||||
|
||||
def __init__(self, target):
|
||||
self.__wrap_target = target
|
||||
self.__wrap_call_queue = Queue.Queue()
|
||||
self.__wrap_serializer = SerializerThread(self.__wrap_call_queue)
|
||||
self.__wrap_serializer.daemon = True
|
||||
self.__wrap_serializer.start()
|
||||
|
||||
def __getattr__(self, key):
|
||||
"""Wrap methods of self.__wrap_target in a WrapCall instance"""
|
||||
func = getattr(self.__wrap_target, key)
|
||||
if not callable(func):
|
||||
raise TypeError("Can't serialize attribute %r (type: %s)"
|
||||
% (key, type(func)))
|
||||
result_queue = Queue.Queue()
|
||||
return WrapCall(self.__wrap_call_queue, result_queue, func)
|
||||
|
||||
def __del__(self):
|
||||
self.__wrap_call_queue.put((None, None, None, None))
|
||||
self.__wrap_serializer.join()
|
403
nilmdb/server.py
403
nilmdb/server.py
@@ -1,403 +0,0 @@
|
||||
"""CherryPy-based server for accessing NILM database via HTTP"""
|
||||
|
||||
# Need absolute_import so that "import nilmdb" won't pull in nilmdb.py,
|
||||
# but will pull the nilmdb module instead.
|
||||
from __future__ import absolute_import
|
||||
import nilmdb
|
||||
|
||||
from nilmdb.printf import *
|
||||
|
||||
import cherrypy
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import simplejson as json
|
||||
|
||||
try:
|
||||
import cherrypy
|
||||
cherrypy.tools.json_out
|
||||
except: # pragma: no cover
|
||||
sys.stderr.write("Cherrypy 3.2+ required\n")
|
||||
sys.exit(1)
|
||||
|
||||
class NilmApp(object):
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
|
||||
version = "1.1"
|
||||
|
||||
class Root(NilmApp):
|
||||
"""Root application for NILM database"""
|
||||
|
||||
def __init__(self, db, version):
|
||||
super(Root, self).__init__(db)
|
||||
self.server_version = version
|
||||
|
||||
# /
|
||||
@cherrypy.expose
|
||||
def index(self):
|
||||
raise cherrypy.NotFound()
|
||||
|
||||
# /favicon.ico
|
||||
@cherrypy.expose
|
||||
def favicon_ico(self):
|
||||
raise cherrypy.NotFound()
|
||||
|
||||
# /version
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def version(self):
|
||||
return self.server_version
|
||||
|
||||
# /dbpath
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def dbpath(self):
|
||||
return self.db.get_basepath()
|
||||
|
||||
# /dbsize
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def dbsize(self):
|
||||
return nilmdb.du.du(self.db.get_basepath())
|
||||
|
||||
class Stream(NilmApp):
|
||||
"""Stream-specific operations"""
|
||||
|
||||
# /stream/list
|
||||
# /stream/list?layout=PrepData
|
||||
# /stream/list?path=/newton/prep
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def list(self, path = None, layout = None):
|
||||
"""List all streams in the database. With optional path or
|
||||
layout parameter, just list streams that match the given path
|
||||
or layout"""
|
||||
return self.db.stream_list(path, layout)
|
||||
|
||||
# /stream/create?path=/newton/prep&layout=PrepData
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def create(self, path, layout):
|
||||
"""Create a new stream in the database. Provide path
|
||||
and one of the nilmdb.layout.layouts keys.
|
||||
"""
|
||||
try:
|
||||
return self.db.stream_create(path, layout)
|
||||
except Exception as e:
|
||||
message = sprintf("%s: %s", type(e).__name__, e.message)
|
||||
raise cherrypy.HTTPError("400 Bad Request", message)
|
||||
|
||||
# /stream/get_metadata?path=/newton/prep
|
||||
# /stream/get_metadata?path=/newton/prep&key=foo&key=bar
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def get_metadata(self, path, key=None):
|
||||
"""Get metadata for the named stream. If optional
|
||||
key parameters are specified, only return metadata
|
||||
matching the given keys."""
|
||||
try:
|
||||
data = self.db.stream_get_metadata(path)
|
||||
except nilmdb.nilmdb.StreamError as e:
|
||||
raise cherrypy.HTTPError("404 Not Found", e.message)
|
||||
if key is None: # If no keys specified, return them all
|
||||
key = data.keys()
|
||||
elif not isinstance(key, list):
|
||||
key = [ key ]
|
||||
result = {}
|
||||
for k in key:
|
||||
if k in data:
|
||||
result[k] = data[k]
|
||||
else: # Return "None" for keys with no matching value
|
||||
result[k] = None
|
||||
return result
|
||||
|
||||
# /stream/set_metadata?path=/newton/prep&data=<json>
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def set_metadata(self, path, data):
|
||||
"""Set metadata for the named stream, replacing any
|
||||
existing metadata. Data should be a json-encoded
|
||||
dictionary"""
|
||||
try:
|
||||
data_dict = json.loads(data)
|
||||
self.db.stream_set_metadata(path, data_dict)
|
||||
except Exception as e:
|
||||
message = sprintf("%s: %s", type(e).__name__, e.message)
|
||||
raise cherrypy.HTTPError("400 Bad Request", message)
|
||||
return "ok"
|
||||
|
||||
# /stream/update_metadata?path=/newton/prep&data=<json>
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def update_metadata(self, path, data):
|
||||
"""Update metadata for the named stream. Data
|
||||
should be a json-encoded dictionary"""
|
||||
try:
|
||||
data_dict = json.loads(data)
|
||||
self.db.stream_update_metadata(path, data_dict)
|
||||
except Exception as e:
|
||||
message = sprintf("%s: %s", type(e).__name__, e.message)
|
||||
raise cherrypy.HTTPError("400 Bad Request", message)
|
||||
return "ok"
|
||||
|
||||
# /stream/insert?path=/newton/prep
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
#@cherrypy.tools.disable_prb()
|
||||
def insert(self, path, old_timestamp = None):
|
||||
"""
|
||||
Insert new data into the database. Provide textual data
|
||||
(matching the path's layout) as a HTTP PUT.
|
||||
|
||||
old_timestamp is used when making multiple, split-up insertions
|
||||
for a larger contiguous block of data. The first insert
|
||||
will return the maximum timestamp that it saw, and the second
|
||||
insert should provide this timestamp as an argument. This is
|
||||
used to extend the previous database interval rather than
|
||||
start a new one.
|
||||
"""
|
||||
|
||||
# Important that we always read the input before throwing any
|
||||
# errors, to keep lengths happy for persistent connections.
|
||||
# However, CherryPy 3.2.2 has a bug where this fails for GET
|
||||
# requests, so catch that. (issue #1134)
|
||||
try:
|
||||
body = cherrypy.request.body.read()
|
||||
except TypeError:
|
||||
raise cherrypy.HTTPError("400 Bad Request", "No request body")
|
||||
|
||||
# Check path and get layout
|
||||
streams = self.db.stream_list(path = path)
|
||||
if len(streams) != 1:
|
||||
raise cherrypy.HTTPError("404 Not Found", "No such stream")
|
||||
layout = streams[0][1]
|
||||
|
||||
# Parse the input data
|
||||
try:
|
||||
parser = nilmdb.layout.Parser(layout)
|
||||
parser.parse(body)
|
||||
except nilmdb.layout.ParserError as e:
|
||||
raise cherrypy.HTTPError("400 Bad Request",
|
||||
"Error parsing input data: " +
|
||||
e.message)
|
||||
|
||||
# Now do the nilmdb insert, passing it the parser full of data.
|
||||
try:
|
||||
if old_timestamp:
|
||||
old_timestamp = float(old_timestamp)
|
||||
result = self.db.stream_insert(path, parser, old_timestamp)
|
||||
except nilmdb.nilmdb.NilmDBError as e:
|
||||
raise cherrypy.HTTPError("400 Bad Request", e.message)
|
||||
|
||||
# Return the maximum timestamp that we saw. The client will
|
||||
# return this back to us as the old_timestamp parameter, if
|
||||
# it has more data to send.
|
||||
return ("ok", parser.max_timestamp)
|
||||
|
||||
# /stream/intervals?path=/newton/prep
|
||||
# /stream/intervals?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
||||
@cherrypy.expose
|
||||
def intervals(self, path, start = None, end = None):
|
||||
"""
|
||||
Get intervals from backend database. Streams the resulting
|
||||
intervals as JSON strings separated by newlines. This may
|
||||
make multiple requests to the nilmdb backend to avoid causing
|
||||
it to block for too long.
|
||||
"""
|
||||
if start is not None:
|
||||
start = float(start)
|
||||
if end is not None:
|
||||
end = float(end)
|
||||
|
||||
if start is not None and end is not None:
|
||||
if end < start:
|
||||
raise cherrypy.HTTPError("400 Bad Request",
|
||||
"end before start")
|
||||
|
||||
streams = self.db.stream_list(path = path)
|
||||
if len(streams) != 1:
|
||||
raise cherrypy.HTTPError("404 Not Found", "No such stream")
|
||||
|
||||
def content(start, end):
|
||||
# Note: disable response.stream below to get better debug info
|
||||
# from tracebacks in this subfunction.
|
||||
while True:
|
||||
(intervals, restart) = self.db.stream_intervals(path,start,end)
|
||||
response = ''.join([ json.dumps(i) + "\n" for i in intervals ])
|
||||
yield response
|
||||
if restart == 0:
|
||||
break
|
||||
start = restart
|
||||
return content(start, end)
|
||||
intervals._cp_config = { 'response.stream': True } # chunked HTTP response
|
||||
|
||||
# /stream/extract?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
||||
@cherrypy.expose
|
||||
def extract(self, path, start = None, end = None, count = False):
|
||||
"""
|
||||
Extract data from backend database. Streams the resulting
|
||||
entries as ASCII text lines separated by newlines. This may
|
||||
make multiple requests to the nilmdb backend to avoid causing
|
||||
it to block for too long.
|
||||
|
||||
Add count=True to return a count rather than actual data.
|
||||
"""
|
||||
if start is not None:
|
||||
start = float(start)
|
||||
if end is not None:
|
||||
end = float(end)
|
||||
|
||||
# Check parameters
|
||||
if start is not None and end is not None:
|
||||
if end < start:
|
||||
raise cherrypy.HTTPError("400 Bad Request",
|
||||
"end before start")
|
||||
|
||||
# Check path and get layout
|
||||
streams = self.db.stream_list(path = path)
|
||||
if len(streams) != 1:
|
||||
raise cherrypy.HTTPError("404 Not Found", "No such stream")
|
||||
layout = streams[0][1]
|
||||
|
||||
# Get formatter
|
||||
formatter = nilmdb.layout.Formatter(layout)
|
||||
|
||||
def content(start, end, count):
|
||||
# Note: disable response.stream below to get better debug info
|
||||
# from tracebacks in this subfunction.
|
||||
if count:
|
||||
matched = self.db.stream_extract(path, start, end, count)
|
||||
yield sprintf("%d\n", matched)
|
||||
return
|
||||
|
||||
while True:
|
||||
(data, restart) = self.db.stream_extract(path, start, end)
|
||||
|
||||
# Format the data and yield it
|
||||
yield formatter.format(data)
|
||||
|
||||
if restart == 0:
|
||||
return
|
||||
start = restart
|
||||
return content(start, end, count)
|
||||
extract._cp_config = { 'response.stream': True } # chunked HTTP response
|
||||
|
||||
|
||||
class Exiter(object):
|
||||
"""App that exits the server, for testing"""
|
||||
@cherrypy.expose
|
||||
def index(self):
|
||||
cherrypy.response.headers['Content-Type'] = 'text/plain'
|
||||
def content():
|
||||
yield 'Exiting by request'
|
||||
raise SystemExit
|
||||
return content()
|
||||
index._cp_config = { 'response.stream': True }
|
||||
|
||||
class Server(object):
|
||||
def __init__(self, db, host = '127.0.0.1', port = 8080,
|
||||
stoppable = False, # whether /exit URL exists
|
||||
embedded = True, # hide diagnostics and output, etc
|
||||
fast_shutdown = False, # don't wait for clients to disconn.
|
||||
force_traceback = False # include traceback in all errors
|
||||
):
|
||||
self.version = version
|
||||
|
||||
# Need to wrap DB object in a serializer because we'll call
|
||||
# into it from separate threads.
|
||||
self.embedded = embedded
|
||||
self.db = nilmdb.serializer.WrapObject(db)
|
||||
cherrypy.config.update({
|
||||
'server.socket_host': host,
|
||||
'server.socket_port': port,
|
||||
'engine.autoreload_on': False,
|
||||
'server.max_request_body_size': 4*1024*1024,
|
||||
'error_page.default': self.json_error_page,
|
||||
})
|
||||
if self.embedded:
|
||||
cherrypy.config.update({ 'environment': 'embedded' })
|
||||
|
||||
# Send tracebacks in error responses. They're hidden by the
|
||||
# error_page function for client errors (code 400-499).
|
||||
cherrypy.config.update({ 'request.show_tracebacks' : True })
|
||||
self.force_traceback = force_traceback
|
||||
|
||||
cherrypy.tree.apps = {}
|
||||
cherrypy.tree.mount(Root(self.db, self.version), "/")
|
||||
cherrypy.tree.mount(Stream(self.db), "/stream")
|
||||
if stoppable:
|
||||
cherrypy.tree.mount(Exiter(), "/exit")
|
||||
|
||||
# Shutdowns normally wait for clients to disconnect. To speed
|
||||
# up tests, set fast_shutdown = True
|
||||
if fast_shutdown:
|
||||
# Setting timeout to 0 triggers os._exit(70) at shutdown, grr...
|
||||
cherrypy.server.shutdown_timeout = 0.01
|
||||
else:
|
||||
cherrypy.server.shutdown_timeout = 5
|
||||
|
||||
def json_error_page(self, status, message, traceback, version):
|
||||
"""Return a custom error page in JSON so the client can parse it"""
|
||||
errordata = { "status" : status,
|
||||
"message" : message,
|
||||
"traceback" : traceback }
|
||||
# Don't send a traceback if the error was 400-499 (client's fault)
|
||||
try:
|
||||
code = int(status.split()[0])
|
||||
if not self.force_traceback:
|
||||
if code >= 400 and code <= 499:
|
||||
errordata["traceback"] = ""
|
||||
except Exception as e: # pragma: no cover
|
||||
pass
|
||||
# Override the response type, which was previously set to text/html
|
||||
cherrypy.serving.response.headers['Content-Type'] = (
|
||||
"application/json;charset=utf-8" )
|
||||
# Undo the HTML escaping that cherrypy's get_error_page function applies
|
||||
# (cherrypy issue 1135)
|
||||
for k, v in errordata.iteritems():
|
||||
v = v.replace("<","<")
|
||||
v = v.replace(">",">")
|
||||
v = v.replace("&","&")
|
||||
errordata[k] = v
|
||||
return json.dumps(errordata, separators=(',',':'))
|
||||
|
||||
def start(self, blocking = False, event = None):
|
||||
|
||||
if not self.embedded: # pragma: no cover
|
||||
# Handle signals nicely
|
||||
if hasattr(cherrypy.engine, "signal_handler"):
|
||||
cherrypy.engine.signal_handler.subscribe()
|
||||
if hasattr(cherrypy.engine, "console_control_handler"):
|
||||
cherrypy.engine.console_control_handler.subscribe()
|
||||
|
||||
# Cherrypy stupidly calls os._exit(70) when it can't bind the
|
||||
# port. At least try to print a reasonable error and continue
|
||||
# in this case, rather than just dying silently (as we would
|
||||
# otherwise do in embedded mode)
|
||||
real_exit = os._exit
|
||||
def fake_exit(code): # pragma: no cover
|
||||
if code == os.EX_SOFTWARE:
|
||||
fprintf(sys.stderr, "error: CherryPy called os._exit!\n")
|
||||
else:
|
||||
real_exit(code)
|
||||
os._exit = fake_exit
|
||||
cherrypy.engine.start()
|
||||
os._exit = real_exit
|
||||
|
||||
if event is not None:
|
||||
event.set()
|
||||
if blocking:
|
||||
try:
|
||||
cherrypy.engine.wait(cherrypy.engine.states.EXITING,
|
||||
interval = 0.1, channel = 'main')
|
||||
except (KeyboardInterrupt, IOError): # pragma: no cover
|
||||
cherrypy.engine.log('Keyboard Interrupt: shutting down bus')
|
||||
cherrypy.engine.exit()
|
||||
except SystemExit: # pragma: no cover
|
||||
cherrypy.engine.log('SystemExit raised: shutting down bus')
|
||||
cherrypy.engine.exit()
|
||||
raise
|
||||
|
||||
def stop(self):
|
||||
cherrypy.engine.exit()
|
46
nilmdb/test_fileinterval.py
Normal file
46
nilmdb/test_fileinterval.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from nilmdb import Interval, IntervalSet, IntervalError, FileInterval
|
||||
from datetime import datetime
|
||||
from nose.tools import assert_raises
|
||||
|
||||
from test_interval import iset
|
||||
|
||||
def fiset(string):
|
||||
"""Like iset, but builds with FileIntervals instead of Intervals"""
|
||||
iset = IntervalSet()
|
||||
for i, c in enumerate(string):
|
||||
day = datetime.strptime("{0:04d}".format(i+2000), "%Y")
|
||||
if (c == "["):
|
||||
start = day
|
||||
elif (c == "|"):
|
||||
iset += FileInterval(start, day, "test.dat")
|
||||
start = day
|
||||
elif (c == "]"):
|
||||
iset += FileInterval(start, day, "test.dat")
|
||||
del start
|
||||
return iset
|
||||
|
||||
def test_fileinterval_vs_interval():
|
||||
"""Test FileInterval/Interval inheritance"""
|
||||
|
||||
i = iset("[--]")
|
||||
f = fiset("[--]")
|
||||
|
||||
# check types
|
||||
assert(isinstance(i[0], Interval))
|
||||
assert(not isinstance(i[0], FileInterval))
|
||||
assert(isinstance(f[0], Interval))
|
||||
assert(isinstance(f[0], FileInterval))
|
||||
|
||||
# when doing an intersection, result should be a subset of the first arg
|
||||
u = (i & f)
|
||||
assert(isinstance(u[0], Interval))
|
||||
assert(not isinstance(u[0], FileInterval))
|
||||
u = (f & i)
|
||||
assert(isinstance(u[0], Interval))
|
||||
assert(isinstance(u[0], FileInterval))
|
||||
|
||||
# they're still the same though
|
||||
assert(i == f == u)
|
||||
|
||||
# just for coverage
|
||||
assert_raises(IntervalError, fiset("[]")[0].subset, f[0].start, f[0].end)
|
189
nilmdb/test_interval.py
Normal file
189
nilmdb/test_interval.py
Normal file
@@ -0,0 +1,189 @@
|
||||
from nilmdb import Interval, IntervalSet, IntervalError
|
||||
from datetime import datetime
|
||||
from nose.tools import assert_raises
|
||||
import itertools
|
||||
|
||||
def test_interval():
|
||||
"""Test the Interval class"""
|
||||
d1 = datetime.strptime("19801205","%Y%m%d")
|
||||
d2 = datetime.strptime("19900216","%Y%m%d")
|
||||
d3 = datetime.strptime("20111205","%Y%m%d")
|
||||
|
||||
# basic construction
|
||||
i = Interval(d1, d1)
|
||||
i = Interval(d1, d3)
|
||||
assert(i.start == d1)
|
||||
assert(i.end == d3)
|
||||
|
||||
# assignment should work
|
||||
i.start = d2
|
||||
try:
|
||||
i.end = d1
|
||||
raise Exception("should have died there")
|
||||
except IntervalError:
|
||||
pass
|
||||
i.start = d1
|
||||
i.end = d2
|
||||
|
||||
# end before start
|
||||
assert_raises(IntervalError, Interval, d3, d1)
|
||||
|
||||
# wrong type
|
||||
assert_raises(IntervalError, Interval, 1, 2)
|
||||
|
||||
# compare
|
||||
assert(Interval(d1, d2) == Interval(d1, d2))
|
||||
assert(Interval(d1, d2) < Interval(d1, d3))
|
||||
assert(Interval(d1, d3) > Interval(d1, d2))
|
||||
assert(Interval(d1, d2) < Interval(d2, d3))
|
||||
assert(Interval(d1, d3) < Interval(d2, d3))
|
||||
assert(Interval(d2, d2) > Interval(d1, d3))
|
||||
assert(Interval(d3, d3) == Interval(d3, d3))
|
||||
assert_raises(TypeError, cmp, i, 123)
|
||||
|
||||
# subset
|
||||
assert(Interval(d1, d3).subset(d1, d2) == Interval(d1, d2))
|
||||
assert_raises(IntervalError, Interval(d2, d3).subset, d1, d2)
|
||||
|
||||
# append
|
||||
assert(Interval(d1, d2).is_adjacent(Interval(d2,d3)))
|
||||
assert(Interval(d2, d3).is_adjacent(Interval(d1,d2)))
|
||||
assert(not Interval(d2, d3).is_adjacent(Interval(d1,d3)))
|
||||
assert_raises(TypeError, Interval(d1, d2).is_adjacent, 1)
|
||||
|
||||
# misc
|
||||
assert(repr(i) == repr(eval(repr(i).replace("datetime.",""))))
|
||||
assert(str(i) == "[1980-12-05 00:00:00 -> 1990-02-16 00:00:00]")
|
||||
|
||||
def test_interval_intersect():
|
||||
"""Test Interval intersections"""
|
||||
dates = [ datetime.strptime(year, "%y") for year in [ "00", "01", "02", "03" ] ]
|
||||
perm = list(itertools.permutations(dates, 2))
|
||||
prod = list(itertools.product(perm, perm))
|
||||
should_intersect = {
|
||||
False: [4, 5, 8, 20, 48, 56, 60, 96, 97, 100],
|
||||
True: [0, 1, 2, 12, 13, 14, 16, 17, 24, 25, 26, 28, 29,
|
||||
32, 49, 50, 52, 53, 61, 62, 64, 65, 68, 98, 101, 104]}
|
||||
for i,((a,b),(c,d)) in enumerate(prod):
|
||||
try:
|
||||
i1 = Interval(a, b)
|
||||
i2 = Interval(c, d)
|
||||
assert(i1.intersects(i2) == i2.intersects(i1))
|
||||
assert(i in should_intersect[i1.intersects(i2)])
|
||||
except IntervalError:
|
||||
assert(i not in should_intersect[True] and
|
||||
i not in should_intersect[False])
|
||||
assert_raises(TypeError, i1.intersects, 1234)
|
||||
|
||||
def test_intervalset_construct():
|
||||
"""Test interval set construction"""
|
||||
dates = [ datetime.strptime(year, "%y") for year in [ "00", "01", "02", "03" ]]
|
||||
|
||||
a = Interval(dates[0], dates[1])
|
||||
b = Interval(dates[1], dates[2])
|
||||
c = Interval(dates[0], dates[2])
|
||||
d = Interval(dates[2], dates[3])
|
||||
|
||||
iseta = IntervalSet(a)
|
||||
isetb = IntervalSet([a, b])
|
||||
isetc = IntervalSet([a])
|
||||
assert(iseta != isetb)
|
||||
assert(iseta == isetc)
|
||||
assert(iseta != 3)
|
||||
assert(IntervalSet(a) != IntervalSet(b))
|
||||
|
||||
print iseta == None
|
||||
assert_raises(TypeError, cmp, iseta, isetb)
|
||||
assert_raises(IntervalError, IntervalSet, [a, b, c])
|
||||
assert_raises(TypeError, IntervalSet, [1, 2])
|
||||
|
||||
iset = IntervalSet(isetb) # test iterator
|
||||
assert(iset == isetb)
|
||||
assert(len(iset) == 2)
|
||||
assert(len(IntervalSet()) == 0)
|
||||
|
||||
# Test adding
|
||||
iset = IntervalSet(a)
|
||||
iset += IntervalSet(b)
|
||||
assert(iset == IntervalSet([a, b]))
|
||||
iset = IntervalSet(a)
|
||||
iset += b
|
||||
assert(iset == IntervalSet([a, b]))
|
||||
iset = IntervalSet(a) + IntervalSet(b)
|
||||
assert(iset == IntervalSet([a, b]))
|
||||
iset = IntervalSet(b) + a
|
||||
assert(iset == IntervalSet([a, b]))
|
||||
|
||||
# A set consisting of [0-1],[1-2] should match a set consisting of [0-2]
|
||||
assert(IntervalSet([a,b]) == IntervalSet([c]))
|
||||
# Etc
|
||||
assert(IntervalSet([a,d]) != IntervalSet([c]))
|
||||
assert(IntervalSet([c]) != IntervalSet([a,d]))
|
||||
assert(IntervalSet([c,d]) != IntervalSet([b,d]))
|
||||
|
||||
# misc
|
||||
assert(repr(iset) == repr(eval(repr(iset).replace("datetime.",""))))
|
||||
|
||||
def iset(string):
|
||||
"""Build an IntervalSet from a string, for testing purposes
|
||||
|
||||
Each character is a year
|
||||
[ = interval start
|
||||
| = interval end + adjacent start
|
||||
] = interval end
|
||||
anything else is ignored
|
||||
"""
|
||||
iset = IntervalSet()
|
||||
for i, c in enumerate(string):
|
||||
day = datetime.strptime("{0:04d}".format(i+2000), "%Y")
|
||||
if (c == "["):
|
||||
start = day
|
||||
elif (c == "|"):
|
||||
iset += Interval(start, day)
|
||||
start = day
|
||||
elif (c == "]"):
|
||||
iset += Interval(start, day)
|
||||
del start
|
||||
return iset
|
||||
|
||||
def test_intervalset_iset():
|
||||
"""Test basic iset construction"""
|
||||
assert(iset(" [----] ") ==
|
||||
iset(" [-|--] "))
|
||||
|
||||
assert(iset("[] [--] ") +
|
||||
iset(" [] [--]") ==
|
||||
iset("[|] [-----]"))
|
||||
|
||||
def test_intervalset_intsersect():
|
||||
"""Test intersection (&)"""
|
||||
assert_raises(TypeError, iset("[--]").__and__, 1234)
|
||||
|
||||
assert(iset("[---------]") &
|
||||
iset(" [---] ") ==
|
||||
iset(" [---] "))
|
||||
|
||||
assert(iset(" [---] ") &
|
||||
iset("[---------]") ==
|
||||
iset(" [---] "))
|
||||
|
||||
assert(iset(" [-----]") &
|
||||
iset(" [-----] ") ==
|
||||
iset(" [--] "))
|
||||
|
||||
assert(iset(" [---]") &
|
||||
iset(" [--] ") ==
|
||||
iset(" "))
|
||||
|
||||
assert(iset(" [-|---]") &
|
||||
iset(" [-----|-] ") ==
|
||||
iset(" [----] "))
|
||||
|
||||
assert(iset(" [-|-] ") &
|
||||
iset(" [-|--|--] ") ==
|
||||
iset(" [---] "))
|
||||
|
||||
assert(iset(" [----][--]") &
|
||||
iset("[-] [--] []") ==
|
||||
iset(" [] [-] []"))
|
||||
|
@@ -1,21 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Simple timer to time a block of code, for optimization debugging
|
||||
# use like:
|
||||
# with nilmdb.Timer("flush"):
|
||||
# foo.flush()
|
||||
|
||||
import contextlib
|
||||
import time
|
||||
|
||||
@contextlib.contextmanager
|
||||
def Timer(name = None, tosyslog = False):
|
||||
start = time.time()
|
||||
yield
|
||||
elapsed = int((time.time() - start) * 1000)
|
||||
msg = (name or 'elapsed') + ": " + str(elapsed) + " ms"
|
||||
if tosyslog: # pragma: no cover
|
||||
import syslog
|
||||
syslog.syslog(msg)
|
||||
else:
|
||||
print msg
|
@@ -1,108 +0,0 @@
|
||||
"""File-like objects that add timestamps to the input lines"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from nilmdb.printf import *
|
||||
|
||||
import time
|
||||
import os
|
||||
import datetime_tz
|
||||
|
||||
class Timestamper(object):
|
||||
"""A file-like object that adds timestamps to lines of an input file."""
|
||||
def __init__(self, file, ts_iter):
|
||||
"""file: filename, or another file-like object
|
||||
ts_iter: iterator that returns a timestamp string for
|
||||
each line of the file"""
|
||||
if isinstance(file, basestring):
|
||||
self.file = open(file, "r")
|
||||
else:
|
||||
self.file = file
|
||||
self.ts_iter = ts_iter
|
||||
|
||||
def close(self):
|
||||
self.file.close()
|
||||
|
||||
def readline(self, *args):
|
||||
while True:
|
||||
line = self.file.readline(*args)
|
||||
if not line:
|
||||
return ""
|
||||
if line[0] == '#':
|
||||
continue
|
||||
break
|
||||
try:
|
||||
return self.ts_iter.next() + line
|
||||
except StopIteration:
|
||||
return ""
|
||||
|
||||
def readlines(self, size = None):
|
||||
out = ""
|
||||
while True:
|
||||
line = self.readline()
|
||||
out += line
|
||||
if not line or (size and len(out) >= size):
|
||||
break
|
||||
return out
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
result = self.readline()
|
||||
if not result:
|
||||
raise StopIteration
|
||||
return result
|
||||
|
||||
class TimestamperRate(Timestamper):
|
||||
"""Timestamper that uses a start time and a fixed rate"""
|
||||
def __init__(self, file, start, rate, end = None):
|
||||
"""
|
||||
file: file name or object
|
||||
|
||||
start: Unix timestamp for the first value
|
||||
|
||||
rate: 1/rate is added to the timestamp for each line
|
||||
|
||||
end: If specified, raise StopIteration before outputting a value
|
||||
greater than this."""
|
||||
def iterator(start, rate, end):
|
||||
n = 0
|
||||
rate = float(rate)
|
||||
while True:
|
||||
now = start + n / rate
|
||||
if end and now >= end:
|
||||
raise StopIteration
|
||||
yield sprintf("%.6f ", start + n / rate)
|
||||
n += 1
|
||||
# Handle case where we're passed a datetime or datetime_tz object
|
||||
if "totimestamp" in dir(start):
|
||||
start = start.totimestamp()
|
||||
Timestamper.__init__(self, file, iterator(start, rate, end))
|
||||
self.start = start
|
||||
self.rate = rate
|
||||
def __str__(self):
|
||||
start = datetime_tz.datetime_tz.fromtimestamp(self.start)
|
||||
start = start.strftime("%a, %d %b %Y %H:%M:%S %Z")
|
||||
return sprintf("TimestamperRate(..., start=\"%s\", rate=%g)",
|
||||
str(start), self.rate)
|
||||
|
||||
class TimestamperNow(Timestamper):
|
||||
"""Timestamper that uses current time"""
|
||||
def __init__(self, file):
|
||||
def iterator():
|
||||
while True:
|
||||
now = datetime_tz.datetime_tz.utcnow().totimestamp()
|
||||
yield sprintf("%.6f ", now)
|
||||
Timestamper.__init__(self, file, iterator())
|
||||
def __str__(self):
|
||||
return "TimestamperNow(...)"
|
||||
|
||||
class TimestamperNull(Timestamper):
|
||||
"""Timestamper that adds nothing to each line"""
|
||||
def __init__(self, file):
|
||||
def iterator():
|
||||
while True:
|
||||
yield ""
|
||||
Timestamper.__init__(self, file, iterator())
|
||||
def __str__(self):
|
||||
return "TimestamperNull(...)"
|
@@ -1,6 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import nilmdb
|
||||
import sys
|
||||
|
||||
nilmdb.cmdline.Cmdline(sys.argv[1:]).run()
|
5
pytables-test/Makefile
Normal file
5
pytables-test/Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
all:
|
||||
time python test-indexed-read.py
|
||||
|
||||
clean:
|
||||
rm -f *pyc
|
2
pytables-test/README.jim
Normal file
2
pytables-test/README.jim
Normal file
@@ -0,0 +1,2 @@
|
||||
New version from:
|
||||
http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=641485#15
|
12
pytables-test/TODO.txt
Normal file
12
pytables-test/TODO.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
- Make CherryPy server that can handle simple GET/POST,
|
||||
and a httplib client that can talk to that server.
|
||||
Steps:
|
||||
- Make server handle GET
|
||||
- Make client send request, get response
|
||||
- Add request streaming to server
|
||||
- Add request streaming to client
|
||||
- Make server handle POST
|
||||
- Make client send request, get response
|
||||
- Add request streaming to server
|
||||
- Add request streaming to client
|
||||
- Integrate into a server process that also keeps database open.
|
3
pytables-test/indexing-notes
Normal file
3
pytables-test/indexing-notes
Normal file
@@ -0,0 +1,3 @@
|
||||
Indexing time64 doesn't seem to work -- needed to do "time >= 1243052015" even though the actual database times
|
||||
should be something like 1243052015.847000. Let's switch to just using a 64-bit integer counting e.g.
|
||||
microseconds since 1970-01-01
|
3
pytables-test/sample-query
Normal file
3
pytables-test/sample-query
Normal file
@@ -0,0 +1,3 @@
|
||||
timestamp > 1243052015
|
||||
took 394.5 minutes in vitables
|
||||
(2340 rows matched)
|
53
pytables-test/server.py
Executable file
53
pytables-test/server.py
Executable file
@@ -0,0 +1,53 @@
|
||||
import sys
|
||||
import tables
|
||||
import nilmdb
|
||||
|
||||
try:
|
||||
import cherrypy
|
||||
cherrypy.tools.json_out
|
||||
except:
|
||||
sys.stderr.write("Cherrypy 3.2+ required\n")
|
||||
sys.exit(1)
|
||||
|
||||
class NilmApp:
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
|
||||
class Root(NilmApp):
|
||||
"""NILM Database"""
|
||||
|
||||
server_version = "1.0"
|
||||
|
||||
@cherrypy.expose
|
||||
def index(self):
|
||||
raise cherrypy.NotFound()
|
||||
|
||||
@cherrypy.expose
|
||||
def favicon_ico(self):
|
||||
raise cherrypy.NotFound()
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def version(self):
|
||||
return self.server_version
|
||||
|
||||
class Stream(NilmApp):
|
||||
"""Stream operations"""
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def list(self):
|
||||
return
|
||||
|
||||
cherrypy.config.update({
|
||||
'server.socket_host': '127.0.0.1',
|
||||
'server.socket_port': 12380
|
||||
})
|
||||
|
||||
db = nilmdb.nilmdb()
|
||||
cherrypy.tree.mount(Root(db), "/")
|
||||
cherrypy.tree.mount(Stream(db), "/stream")
|
||||
|
||||
if __name__ == "__main__":
|
||||
cherrypy.engine.start()
|
||||
cherrypy.engine.block()
|
16
pytables-test/speed-pytables.py
Normal file
16
pytables-test/speed-pytables.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import tables
|
||||
import numpy
|
||||
|
||||
class RawSample(tables.IsDescription):
|
||||
timestamp = tables.UInt64Col()
|
||||
voltage = tables.UInt16Col(shape = 3)
|
||||
current = tables.UInt16Col(shape = 3)
|
||||
|
||||
h5file = tables.openFile("test.h5", mode = "w", title = "Test")
|
||||
group = h5file.createGroup("/", "raw", "Raw Data")
|
||||
table = h5file.createTable(group, "nilm1", RawSample, "NILM 1")
|
||||
|
||||
print repr(h5file)
|
||||
|
||||
# write rows
|
||||
|
54
pytables-test/test-indexing.py
Normal file
54
pytables-test/test-indexing.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from tables import *
|
||||
import re
|
||||
import time
|
||||
|
||||
# A class to describe our data
|
||||
class PrepData(IsDescription):
|
||||
timestamp = Int64Col()
|
||||
p1 = Float32Col()
|
||||
q1 = Float32Col()
|
||||
p3 = Float32Col()
|
||||
q3 = Float32Col()
|
||||
p5 = Float32Col()
|
||||
q5 = Float32Col()
|
||||
p7 = Float32Col()
|
||||
q7 = Float32Col()
|
||||
|
||||
filename = "test.h5"
|
||||
h5file = openFile(filename, mode = "w", title = "NILM Test")
|
||||
|
||||
group = h5file.createGroup("/", "newton", "Newton school")
|
||||
table = h5file.createTable(group, "prep", PrepData, "Prep Data", expectedrows = 120 * 86400 * 90)
|
||||
|
||||
table.cols.timestamp.createIndex()
|
||||
|
||||
for i in range(0, 80):
|
||||
# Open file
|
||||
data = open("data/alldata")
|
||||
count = 0
|
||||
oldtime = time.time()
|
||||
prep = table.row
|
||||
for line in data:
|
||||
count = count + 1
|
||||
if count % 1000000 == 0:
|
||||
print str(i) + ": " + str((time.time() - oldtime)) + ", total " + str(count/1000000) + "m lines"
|
||||
oldtime = time.time()
|
||||
v = re.split('\s+', line)
|
||||
prep['timestamp'] = int(v[0]) + 500000000 * i
|
||||
prep['p1'] = v[1]
|
||||
prep['q1'] = v[2]
|
||||
prep['p3'] = v[3]
|
||||
prep['q3'] = v[4]
|
||||
prep['p5'] = v[5]
|
||||
prep['q5'] = v[6]
|
||||
prep['p7'] = v[7]
|
||||
prep['q7'] = v[8]
|
||||
prep.append()
|
||||
data.close()
|
||||
|
||||
h5file.close()
|
||||
|
||||
|
||||
|
54
pytables-test/test-write.py
Normal file
54
pytables-test/test-write.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from tables import *
|
||||
import re
|
||||
import time
|
||||
|
||||
# A class to describe our data
|
||||
class PrepData(IsDescription):
|
||||
timestamp = Time64Col()
|
||||
p1 = Float32Col()
|
||||
q1 = Float32Col()
|
||||
p3 = Float32Col()
|
||||
q3 = Float32Col()
|
||||
p5 = Float32Col()
|
||||
q5 = Float32Col()
|
||||
p7 = Float32Col()
|
||||
q7 = Float32Col()
|
||||
|
||||
filename = "test.h5"
|
||||
h5file = openFile(filename, mode = "w", title = "NILM Test")
|
||||
|
||||
group = h5file.createGroup("/", "newton", "Newton school")
|
||||
table = h5file.createTable(group, "prep", PrepData, "Prep Data")
|
||||
|
||||
table.cols.timestamp.createIndex()
|
||||
|
||||
for i in range(0, 80):
|
||||
# Open file
|
||||
data = open("data/alldata")
|
||||
count = 0
|
||||
oldtime = time.time()
|
||||
prep = table.row
|
||||
for line in data:
|
||||
count = count + 1
|
||||
if count % 1000000 == 0:
|
||||
print str(i) + ": " + str((time.time() - oldtime)) + ", total " + str(count/1000000) + "m lines"
|
||||
oldtime = time.time()
|
||||
v = re.split('\s+', line)
|
||||
prep['timestamp'] = float(v[0]) / 1000.0 + 500000 * i
|
||||
prep['p1'] = v[1]
|
||||
prep['q1'] = v[2]
|
||||
prep['p3'] = v[3]
|
||||
prep['q3'] = v[4]
|
||||
prep['p5'] = v[5]
|
||||
prep['q5'] = v[6]
|
||||
prep['p7'] = v[7]
|
||||
prep['q7'] = v[8]
|
||||
prep.append()
|
||||
data.close()
|
||||
|
||||
h5file.close()
|
||||
|
||||
|
||||
|
32
runserver.py
32
runserver.py
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import nilmdb
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Run the NILM server')
|
||||
parser.add_argument('-p', '--port', help='Port number', type=int, default=12380)
|
||||
parser.add_argument('-y', '--yappi', help='Run with yappi profiler',
|
||||
action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Start web app on a custom port
|
||||
db = nilmdb.NilmDB("db")
|
||||
server = nilmdb.Server(db, host = "127.0.0.1",
|
||||
port = args.port,
|
||||
embedded = False)
|
||||
|
||||
|
||||
if args.yappi:
|
||||
print "Running in yappi"
|
||||
try:
|
||||
import yappi
|
||||
yappi.start()
|
||||
server.start(blocking = True)
|
||||
finally:
|
||||
yappi.stop()
|
||||
print "Try: yappi.print_stats(sort_type=yappi.SORTTYPE_TTOT,limit=50)"
|
||||
from IPython import embed
|
||||
embed()
|
||||
else:
|
||||
server.start(blocking = True)
|
||||
db.close()
|
27
setup.cfg
27
setup.cfg
@@ -1,24 +1,9 @@
|
||||
[nosetests]
|
||||
# note: the value doesn't matter, that's why they're empty here
|
||||
nocapture=
|
||||
nologcapture= # comment to see cherrypy logs on failure
|
||||
with-coverage=
|
||||
cover-inclusive=
|
||||
nocapture=1
|
||||
with-coverage=1
|
||||
cover-inclusive=1
|
||||
cover-package=nilmdb
|
||||
cover-erase=
|
||||
##cover-html= # this works, puts html output in cover/ dir
|
||||
##cover-branches= # need nose 1.1.3 for this
|
||||
stop=
|
||||
cover-erase=1
|
||||
stop=1
|
||||
verbosity=2
|
||||
#tests=tests/test_cmdline.py
|
||||
#tests=tests/test_layout.py
|
||||
#tests=tests/test_rbtree.py
|
||||
tests=tests/test_interval.py
|
||||
#tests=tests/test_client.py
|
||||
#tests=tests/test_timestamper.py
|
||||
#tests=tests/test_serializer.py
|
||||
#tests=tests/test_iteratorizer.py
|
||||
#tests=tests/test_client.py:TestClient.test_client_nilmdb
|
||||
#with-profile=
|
||||
#profile-sort=time
|
||||
##profile-restrict=10 # doesn't work right, treated as string or something
|
||||
|
||||
|
9
setup.py
Executable file
9
setup.py
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from distutils.core import setup
|
||||
|
||||
setup(name = 'nilmdb',
|
||||
version = '1.0',
|
||||
scripts = [ 'bin/nilm-test.py' ],
|
||||
packages = [ 'nilmdb' ],
|
||||
)
|
5
test/Makefile
Normal file
5
test/Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
all:
|
||||
python speed-readascii.py
|
||||
|
||||
clean:
|
||||
rm -f *pyc
|
4
test/printf.py
Normal file
4
test/printf.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from __future__ import print_function
|
||||
def printf(str, *args):
|
||||
print(str % args, end='')
|
||||
|
67
test/speed-readascii.py
Normal file
67
test/speed-readascii.py
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from printf import printf
|
||||
import time
|
||||
import re
|
||||
import numpy as np
|
||||
import itertools
|
||||
|
||||
class Timer():
|
||||
def __init__(self, arg):
|
||||
self.arg = arg
|
||||
def __enter__(self): self.start = time.time()
|
||||
def __exit__(self, *args): printf("%s: %f lines/sec\n", self.arg, 1e6 / (time.time() - self.start))
|
||||
|
||||
def test_split():
|
||||
for n, line in enumerate(open('1m.raw', 'r')):
|
||||
out = [0]*6
|
||||
tmp = [ int(i) for i in line.partition('#')[0].split() ]
|
||||
out[0:len(tmp)] = tmp
|
||||
if (n % 100000 == 0):
|
||||
printf("line %d = %s\n", n, str(out))
|
||||
|
||||
def test_split2():
|
||||
for n, line in enumerate(open('1m.raw', 'r')):
|
||||
out = [0]*6
|
||||
tmp = [ int(i,10) for i in line.partition('#')[0].split() ]
|
||||
out[0:len(tmp)] = tmp
|
||||
if (n % 100000 == 0):
|
||||
printf("line %d = %s\n", n, str(out))
|
||||
|
||||
def test_regex():
|
||||
for n, line in enumerate(open('1m.raw', 'r')):
|
||||
out = [0]*6
|
||||
tmp = [ int(x) for x in re.findall('(\d+)\s+',line.partition('#')[0]) ]
|
||||
out[0:len(tmp)] = tmp
|
||||
if (n % 100000 == 0):
|
||||
printf("line %d = %s\n", n, str(out))
|
||||
|
||||
def test_bigregex():
|
||||
regex = re.compile('^(?:\s*)' + '(?:(\d+)\s+)?' * 6)
|
||||
for n, line in enumerate(open('1m.raw', 'r')):
|
||||
out = [ int(x or 0) for x in re.match(regex, line).groups() ]
|
||||
if (n % 100000 == 0):
|
||||
printf("line %d = %s\n", n, str(out))
|
||||
|
||||
def test_numpy():
|
||||
out = np.genfromtxt(open('1m.raw', 'r'),
|
||||
dtype = np.dtype('i2,i2,i2,i2,i2,i2'))
|
||||
|
||||
with Timer("numpy"):
|
||||
test_numpy() # 106k/sec
|
||||
|
||||
with Timer("regex"):
|
||||
test_regex() # 121k/sec
|
||||
|
||||
with Timer("split"):
|
||||
test_split() # 219k/sec
|
||||
|
||||
with Timer("split2"):
|
||||
test_split2() # 328k/sec
|
||||
|
||||
with Timer("bigregex"):
|
||||
test_bigregex() # 130k/sec
|
||||
|
||||
# The "int" operation takes quite a while -- int(x,10) is twice as fast
|
||||
# Perl does about 500k/sec
|
||||
|
74
test/speed-readbinary.py
Executable file
74
test/speed-readbinary.py
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from printf import printf
|
||||
import time
|
||||
import re
|
||||
import numpy as np
|
||||
import itertools
|
||||
import struct
|
||||
import array
|
||||
import os
|
||||
import mmap
|
||||
|
||||
class Timer():
|
||||
def __init__(self, arg):
|
||||
self.arg = arg
|
||||
def __enter__(self): self.start = time.time()
|
||||
def __exit__(self, *args): printf("%s: %f klines/sec\n", self.arg, 1e3 / (time.time() - self.start))
|
||||
|
||||
def test_struct1():
|
||||
"""read with struct.unpack"""
|
||||
f = open('1m.bin', 'rb')
|
||||
f.seek(0,os.SEEK_END)
|
||||
filesize = f.tell()
|
||||
f.seek(0,os.SEEK_SET)
|
||||
packer = struct.Struct('!dHHHHHH')
|
||||
items = filesize / packer.size
|
||||
for n in xrange(items):
|
||||
s = f.read(packer.size)
|
||||
out = packer.unpack(s)
|
||||
if (n % 100000 == 0):
|
||||
printf("line %d = %s\n", n, str(out))
|
||||
|
||||
def test_struct2():
|
||||
"""read with struct.unpack, convert to string"""
|
||||
f = open('1m.bin', 'rb')
|
||||
f.seek(0,os.SEEK_END)
|
||||
filesize = f.tell()
|
||||
f.seek(0,os.SEEK_SET)
|
||||
packer = struct.Struct('!dHHHHHH')
|
||||
items = filesize / packer.size
|
||||
for n in xrange(items):
|
||||
s = f.read(packer.size)
|
||||
out = packer.unpack(s)
|
||||
x = str(out)
|
||||
if (n % 100000 == 0):
|
||||
printf("line %d = %s\n", n, str(out))
|
||||
|
||||
def test_mmap():
|
||||
"""struct.unpack with mmap"""
|
||||
with open('1m.bin', 'rb') as f:
|
||||
f.seek(0,os.SEEK_END)
|
||||
filesize = f.tell()
|
||||
f.seek(0,os.SEEK_SET)
|
||||
m = mmap.mmap(f.fileno(), filesize, access=mmap.ACCESS_READ)
|
||||
packer = struct.Struct('!dHHHHHH')
|
||||
items = filesize / packer.size
|
||||
for n in xrange(items):
|
||||
out = packer.unpack(m[packer.size*n : packer.size*(n+1)])
|
||||
if (n % 100000 == 0):
|
||||
printf("line %d = %s\n", n, str(out))
|
||||
|
||||
with Timer("mmap"):
|
||||
test_mmap() # 1600k
|
||||
|
||||
with Timer("struct1"):
|
||||
test_struct1() # 1460k
|
||||
|
||||
with Timer("struct2"):
|
||||
test_struct2() # 210k
|
||||
|
||||
# Reading from the file is again much quicker than converting to string
|
||||
# Use mmap, it's good
|
||||
|
||||
|
76
test/speed-writebinary.py
Normal file
76
test/speed-writebinary.py
Normal file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from printf import printf
|
||||
import time
|
||||
import re
|
||||
import numpy as np
|
||||
import itertools
|
||||
import struct
|
||||
import array
|
||||
|
||||
class Timer():
|
||||
def __init__(self, arg):
|
||||
self.arg = arg
|
||||
def __enter__(self): self.start = time.time()
|
||||
def __exit__(self, *args): printf("%s: %f klines/sec\n", self.arg, 1e3 / (time.time() - self.start))
|
||||
|
||||
def read_ascii():
|
||||
for n in xrange(1000000):
|
||||
yield (1234, 2345, 3456, 4576, 5678, 6789)
|
||||
# for n, line in enumerate(open('1m.raw', 'r')):
|
||||
# out = [0]*6
|
||||
# tmp = [ int(i,10) for i in line.partition('#')[0].split() ]
|
||||
# out[0:len(tmp)] = tmp
|
||||
# if (n % 100000 == 0):
|
||||
# printf("line %d = %s\n", n, str(out))
|
||||
# yield out
|
||||
|
||||
def test_struct1():
|
||||
"""write with struct.pack"""
|
||||
f = open('1m.bin', 'wb')
|
||||
for out in read_ascii():
|
||||
s = struct.pack('!HHHHHH', *out)
|
||||
f.write(s)
|
||||
|
||||
def test_struct2():
|
||||
"""use constant format string"""
|
||||
f = open('1m.bin', 'wb')
|
||||
packer = struct.Struct('!HHHHHH')
|
||||
for out in read_ascii():
|
||||
f.write(packer.pack(*out))
|
||||
f.close()
|
||||
printf("size was %d\n", packer.size)
|
||||
|
||||
def test_struct3():
|
||||
"""like struct1, with timestamp"""
|
||||
f = open('1m.bin', 'wb')
|
||||
for out in read_ascii():
|
||||
s = struct.pack('!dHHHHHH', time.time(), *out)
|
||||
f.write(s)
|
||||
|
||||
def test_struct4():
|
||||
"""like struct2, with timestamp"""
|
||||
f = open('1m.bin', 'wb')
|
||||
packer = struct.Struct('!dHHHHHH')
|
||||
for out in read_ascii():
|
||||
f.write(packer.pack(time.time(), *out))
|
||||
f.close()
|
||||
printf("size was %d\n", packer.size)
|
||||
|
||||
#raise Exception('done')
|
||||
|
||||
with Timer("struct1"):
|
||||
test_struct1() # 1089k
|
||||
|
||||
with Timer("struct2"):
|
||||
test_struct2() # 1249k
|
||||
|
||||
with Timer("struct3"):
|
||||
test_struct3() # 845k
|
||||
|
||||
with Timer("struct4"):
|
||||
test_struct4() # 922k
|
||||
|
||||
# This seems fast enough for writing new data, since it's faster than
|
||||
# we read ascii data anyway. Use e.g. struct4
|
||||
|
11
test/test-struct-pack.py
Normal file
11
test/test-struct-pack.py
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import struct
|
||||
import mmap
|
||||
|
||||
f = open("test.dat", "rb+")
|
||||
mm = mmap.mmap(f.fileno(),3)
|
||||
|
||||
print len(mm)
|
||||
print "first 3 bytes: " + mm[0:3];
|
||||
|
1
test/test.dat
Normal file
1
test/test.dat
Normal file
@@ -0,0 +1 @@
|
||||
|
7
test/todo.md
Normal file
7
test/todo.md
Normal file
@@ -0,0 +1,7 @@
|
||||
- Have a class representing the file contents
|
||||
- Looks like an array
|
||||
- len(), get(), index
|
||||
- some form of bisect search
|
||||
- get_extents = return [0].timestamp, [-1].timestamp
|
||||
-
|
||||
- Can append? Sure, why not. Just write to the file, extend mmap accordingly.
|
@@ -1,419 +0,0 @@
|
||||
|
||||
#-----------------------------------------------
|
||||
#aplotter.py - ascii art function plotter
|
||||
#Copyright (c) 2006, Imri Goldberg
|
||||
#All rights reserved.
|
||||
#
|
||||
#Redistribution and use in source and binary forms,
|
||||
#with or without modification, are permitted provided
|
||||
#that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the
|
||||
# above copyright notice, this list of conditions
|
||||
# and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the
|
||||
# above copyright notice, this list of conditions
|
||||
# and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
# * Neither the name of the <ORGANIZATION> nor the names of
|
||||
# its contributors may be used to endorse or promote products
|
||||
# derived from this software without specific prior written permission.
|
||||
#
|
||||
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
#ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
#LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#-----------------------------------------------
|
||||
|
||||
import math
|
||||
|
||||
|
||||
EPSILON = 0.000001
|
||||
|
||||
def transposed(mat):
|
||||
result = []
|
||||
for i in xrange(len(mat[0])):
|
||||
result.append([x[i] for x in mat])
|
||||
return result
|
||||
|
||||
def y_reversed(mat):
|
||||
result = []
|
||||
for i in range(len(mat)):
|
||||
result.append(list(reversed(mat[i])))
|
||||
return result
|
||||
|
||||
def sign(x):
|
||||
if 0<x:
|
||||
return 1
|
||||
if 0 == x:
|
||||
return 0
|
||||
return -1
|
||||
|
||||
class Plotter(object):
|
||||
|
||||
class PlotData(object):
|
||||
def __init__(self, x_size, y_size, min_x, max_x, min_y, max_y, x_mod, y_mod):
|
||||
self.x_size = x_size
|
||||
self.y_size = y_size
|
||||
self.min_x = min_x
|
||||
self.max_x = max_x
|
||||
self.min_y = min_y
|
||||
self.max_y = max_y
|
||||
self.x_mod = x_mod
|
||||
self.y_mod = y_mod
|
||||
|
||||
self.x_step = float(max_x - min_x)/float(self.x_size)
|
||||
self.y_step = float(max_y - min_y)/float(self.y_size)
|
||||
self.inv_x_step = 1/self.x_step
|
||||
self.inv_y_step = 1/self.y_step
|
||||
|
||||
self.ratio = self.y_step / self.x_step
|
||||
def __repr__(self):
|
||||
s = "size: %s, bl: %s, tr: %s, step: %s" % ((self.x_size, self.y_size), (self.min_x, self.min_y), (self.max_x, self.max_y),
|
||||
(self.x_step, self.y_step))
|
||||
return s
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
self.x_size = kwargs.get("x_size", 80)
|
||||
self.y_size = kwargs.get("y_size", 20)
|
||||
|
||||
self.will_draw_axes = kwargs.get("draw_axes", True)
|
||||
|
||||
self.new_line = kwargs.get("newline", "\n")
|
||||
|
||||
self.dot = kwargs.get("dot", "*")
|
||||
|
||||
self.plot_slope = kwargs.get("plot_slope", True)
|
||||
|
||||
self.x_margin = kwargs.get("x_margin", 0.05)
|
||||
self.y_margin = kwargs.get("y_margin", 0.1)
|
||||
|
||||
self.will_plot_labels = kwargs.get("plot_labels", True)
|
||||
|
||||
@staticmethod
|
||||
def get_symbol_by_slope(slope, default_symbol):
|
||||
draw_symbol = default_symbol
|
||||
if slope > math.tan(3*math.pi/8):
|
||||
draw_symbol = "|"
|
||||
elif slope > math.tan(math.pi/8) and slope < math.tan(3*math.pi/8):
|
||||
draw_symbol = "/"
|
||||
elif abs(slope) < math.tan(math.pi/8):
|
||||
draw_symbol = "-"
|
||||
elif slope < math.tan(-math.pi/8) and slope > math.tan(-3*math.pi/8):
|
||||
draw_symbol = "\\"
|
||||
elif slope < math.tan(-3*math.pi/8):
|
||||
draw_symbol = "|"
|
||||
return draw_symbol
|
||||
|
||||
|
||||
def plot_labels(self, output_buffer, plot_data):
|
||||
if plot_data.y_size < 2:
|
||||
return
|
||||
|
||||
margin_factor = 1
|
||||
|
||||
do_plot_x_label = True
|
||||
do_plot_y_label = True
|
||||
|
||||
x_str = "%+g"
|
||||
if plot_data.x_size < 16:
|
||||
do_plot_x_label = False
|
||||
elif plot_data.x_size < 23:
|
||||
x_str = "%+.2g"
|
||||
|
||||
y_str = "%+g"
|
||||
if plot_data.x_size < 8:
|
||||
do_plot_y_label = False
|
||||
elif plot_data.x_size < 11:
|
||||
y_str = "%+.2g"
|
||||
|
||||
act_min_x = (plot_data.min_x + plot_data.x_mod*margin_factor)
|
||||
act_max_x = (plot_data.max_x - plot_data.x_mod*margin_factor)
|
||||
act_min_y = (plot_data.min_y + plot_data.y_mod*margin_factor)
|
||||
act_max_y = (plot_data.max_y - plot_data.y_mod*margin_factor)
|
||||
|
||||
if abs(act_min_x) < 1:
|
||||
min_x_str = "%+.2g" % act_min_x
|
||||
else:
|
||||
min_x_str = x_str % act_min_x
|
||||
|
||||
if abs(act_max_x) < 1:
|
||||
max_x_str = "%+.2g" % act_max_x
|
||||
else:
|
||||
max_x_str = x_str % act_max_x
|
||||
|
||||
if abs(act_min_y) < 1:
|
||||
min_y_str = "%+.2g" % act_min_y
|
||||
else:
|
||||
min_y_str = y_str % act_min_y
|
||||
|
||||
if abs(act_max_y) < 1:
|
||||
max_y_str = "%+.2g" % act_max_y
|
||||
else:
|
||||
max_y_str = y_str % act_max_y
|
||||
|
||||
min_x_coord = self.get_coord(act_min_x,plot_data.min_x,plot_data.x_step)
|
||||
max_x_coord = self.get_coord(act_max_x,plot_data.min_x,plot_data.x_step)
|
||||
min_y_coord = self.get_coord(act_min_y,plot_data.min_y,plot_data.y_step)
|
||||
max_y_coord = self.get_coord(act_max_y,plot_data.min_y,plot_data.y_step)
|
||||
|
||||
|
||||
#print plot_data
|
||||
|
||||
y_zero_coord = self.get_coord(0, plot_data.min_y, plot_data.y_step)
|
||||
|
||||
#if plot_data.min_x < 0 and plot_data.max_x > 0:
|
||||
x_zero_coord = self.get_coord(0, plot_data.min_x, plot_data.x_step)
|
||||
#else:
|
||||
|
||||
#pass
|
||||
|
||||
output_buffer[x_zero_coord][min_y_coord] = "+"
|
||||
output_buffer[x_zero_coord][max_y_coord] = "+"
|
||||
output_buffer[min_x_coord][y_zero_coord] = "+"
|
||||
output_buffer[max_x_coord][y_zero_coord] = "+"
|
||||
|
||||
if do_plot_x_label:
|
||||
|
||||
for i,c in enumerate(min_x_str):
|
||||
output_buffer[min_x_coord+i][y_zero_coord-1] = c
|
||||
for i,c in enumerate(max_x_str):
|
||||
output_buffer[max_x_coord+i-len(max_x_str)][y_zero_coord-1] = c
|
||||
|
||||
if do_plot_y_label:
|
||||
|
||||
for i,c in enumerate(max_y_str):
|
||||
output_buffer[x_zero_coord+i][max_y_coord] = c
|
||||
for i,c in enumerate(min_y_str):
|
||||
output_buffer[x_zero_coord+i][min_y_coord] = c
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def plot_data(self, xy_seq, output_buffer, plot_data):
|
||||
if self.plot_slope:
|
||||
xy_seq = list(xy_seq)
|
||||
#sort according to the x coord
|
||||
xy_seq.sort(key = lambda c: c[0])
|
||||
prev_p = xy_seq[0]
|
||||
e_xy_seq = enumerate(xy_seq)
|
||||
e_xy_seq.next()
|
||||
for i,(x,y) in e_xy_seq:
|
||||
draw_symbol = self.dot
|
||||
line_drawn = self.plot_line(prev_p, (x,y), output_buffer, plot_data)
|
||||
prev_p = (x,y)
|
||||
if not line_drawn:
|
||||
if i > 0 and i < len(xy_seq)-1:
|
||||
px,py = xy_seq[i-1]
|
||||
nx,ny = xy_seq[i+1]
|
||||
|
||||
if abs(nx-px) > EPSILON:
|
||||
slope = (1.0/plot_data.ratio)*(ny-py)/(nx-px)
|
||||
draw_symbol = self.get_symbol_by_slope(slope, draw_symbol)
|
||||
if x < plot_data.min_x or x >= plot_data.max_x or y < plot_data.min_y or y >= plot_data.max_y:
|
||||
continue
|
||||
|
||||
x_coord = self.get_coord(x, plot_data.min_x, plot_data.x_step)
|
||||
y_coord = self.get_coord(y, plot_data.min_y, plot_data.y_step)
|
||||
if x_coord >= 0 and x_coord < len(output_buffer) and y_coord >= 0 and y_coord < len(output_buffer[0]):
|
||||
if self.draw_axes:
|
||||
if y_coord == self.get_coord(0, plot_data.min_y, plot_data.y_step) and draw_symbol == "-":
|
||||
draw_symbol = "="
|
||||
output_buffer[x_coord][y_coord] = draw_symbol
|
||||
else:
|
||||
for x,y in xy_seq:
|
||||
if x < plot_data.min_x or x >= plot_data.max_x or y < plot_data.min_y or y >= plot_data.max_y:
|
||||
continue
|
||||
x_coord = self.get_coord(x, plot_data.min_x, plot_data.x_step)
|
||||
y_coord = self.get_coord(y, plot_data.min_y, plot_data.y_step)
|
||||
if x_coord >= 0 and x_coord < len(output_buffer) and y_coord > 0 and y_coord < len(output_buffer[0]):
|
||||
output_buffer[x_coord][y_coord] = self.dot
|
||||
|
||||
|
||||
def plot_line(self, start, end, output_buffer, plot_data):
|
||||
|
||||
start_coord = self.get_coord(start[0], plot_data.min_x, plot_data.x_step), self.get_coord(start[1], plot_data.min_y, plot_data.y_step)
|
||||
end_coord = self.get_coord(end[0], plot_data.min_x, plot_data.x_step), self.get_coord(end[1], plot_data.min_y, plot_data.y_step)
|
||||
|
||||
x0,y0 = start_coord
|
||||
x1,y1 = end_coord
|
||||
if (x0,y0) == (x1,y1):
|
||||
return True
|
||||
|
||||
clipped_line = clip_line(start, end, (plot_data.min_x, plot_data.min_y), (plot_data.max_x, plot_data.max_y))
|
||||
if clipped_line != None:
|
||||
start,end = clipped_line
|
||||
else:
|
||||
return False
|
||||
start_coord = self.get_coord(start[0], plot_data.min_x, plot_data.x_step), self.get_coord(start[1], plot_data.min_y, plot_data.y_step)
|
||||
end_coord = self.get_coord(end[0], plot_data.min_x, plot_data.x_step), self.get_coord(end[1], plot_data.min_y, plot_data.y_step)
|
||||
|
||||
x0,y0 = start_coord
|
||||
x1,y1 = end_coord
|
||||
if (x0,y0) == (x1,y1):
|
||||
return True
|
||||
x_zero_coord = self.get_coord(0, plot_data.min_x, plot_data.x_step)
|
||||
y_zero_coord = self.get_coord(0, plot_data.min_y, plot_data.y_step)
|
||||
|
||||
if start[0]-end[0] == 0:
|
||||
draw_symbol = "|"
|
||||
else:
|
||||
slope = (1.0/plot_data.ratio)*(end[1]-start[1])/(end[0]-start[0])
|
||||
draw_symbol = self.get_symbol_by_slope(slope, self.dot)
|
||||
try:
|
||||
|
||||
delta = x1-x0, y1-y0
|
||||
if abs(delta[0])>abs(delta[1]):
|
||||
s = sign(delta[0])
|
||||
slope = float(delta[1])/delta[0]
|
||||
for i in range(0,abs(int(delta[0]))):
|
||||
cur_draw_symbol = draw_symbol
|
||||
x = i*s
|
||||
cur_y = int(y0+slope*x)
|
||||
if self.draw_axes and cur_y == y_zero_coord and draw_symbol == "-":
|
||||
cur_draw_symbol = "="
|
||||
output_buffer[x0+x][cur_y] = cur_draw_symbol
|
||||
|
||||
|
||||
else:
|
||||
s = sign(delta[1])
|
||||
slope = float(delta[0])/delta[1]
|
||||
for i in range(0,abs(int(delta[1]))):
|
||||
y = i*s
|
||||
cur_draw_symbol = draw_symbol
|
||||
cur_y = y0+y
|
||||
if self.draw_axes and cur_y == y_zero_coord and draw_symbol == "-":
|
||||
cur_draw_symbol = "="
|
||||
output_buffer[int(x0+slope*y)][cur_y] = cur_draw_symbol
|
||||
except:
|
||||
print start, end
|
||||
print start_coord, end_coord
|
||||
print plot_data
|
||||
raise
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def plot_single(self, seq, min_x = None, max_x = None, min_y = None, max_y = None):
|
||||
return self.plot_double(range(len(seq)),seq, min_x, max_x, min_y, max_y)
|
||||
|
||||
|
||||
|
||||
|
||||
def plot_double(self, x_seq, y_seq, min_x = None, max_x = None, min_y = None, max_y = None):
|
||||
if min_x == None:
|
||||
min_x = min(x_seq)
|
||||
if max_x == None:
|
||||
max_x = max(x_seq)
|
||||
if min_y == None:
|
||||
min_y = min(y_seq)
|
||||
if max_y == None:
|
||||
max_y = max(y_seq)
|
||||
|
||||
if max_y == min_y:
|
||||
max_y += 1
|
||||
|
||||
x_mod = (max_x-min_x)*self.x_margin
|
||||
y_mod = (max_y-min_y)*self.y_margin
|
||||
min_x-=x_mod
|
||||
max_x+=x_mod
|
||||
min_y-=y_mod
|
||||
max_y+=y_mod
|
||||
|
||||
|
||||
plot_data = self.PlotData(self.x_size, self.y_size, min_x, max_x, min_y, max_y, x_mod, y_mod)
|
||||
|
||||
output_buffer = [[" "]*self.y_size for i in range(self.x_size)]
|
||||
|
||||
if self.will_draw_axes:
|
||||
self.draw_axes(output_buffer, plot_data)
|
||||
|
||||
self.plot_data(zip(x_seq, y_seq), output_buffer, plot_data)
|
||||
|
||||
if self.will_plot_labels:
|
||||
self.plot_labels(output_buffer, plot_data)
|
||||
|
||||
trans_result = transposed(y_reversed(output_buffer))
|
||||
|
||||
result = self.new_line.join(["".join(row) for row in trans_result])
|
||||
return result
|
||||
|
||||
def draw_axes(self, output_buffer, plot_data):
|
||||
|
||||
|
||||
draw_x = False
|
||||
draw_y = False
|
||||
|
||||
if plot_data.min_x <= 0 and plot_data.max_x > 0:
|
||||
draw_y = True
|
||||
zero_x = self.get_coord(0, plot_data.min_x, plot_data.x_step)
|
||||
for y in xrange(plot_data.y_size):
|
||||
output_buffer[zero_x][y] = "|"
|
||||
|
||||
if plot_data.min_y <= 0 and plot_data.max_y > 0:
|
||||
draw_x = True
|
||||
zero_y = self.get_coord(0, plot_data.min_y, plot_data.y_step)
|
||||
for x in xrange(plot_data.x_size):
|
||||
output_buffer[x][zero_y] = "-"
|
||||
|
||||
if draw_x and draw_y:
|
||||
output_buffer[zero_x][zero_y] = "+"
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_coord(val, min, step):
|
||||
result = int((val - min)/step)
|
||||
return result
|
||||
|
||||
def clip_line(line_pt_1, line_pt_2, rect_bottom_left, rect_top_right):
|
||||
ts = [0.0,1.0]
|
||||
if line_pt_1[0] == line_pt_2[0]:
|
||||
return ((line_pt_1[0], max(min(line_pt_1[1], line_pt_2[1]), rect_bottom_left[1])),
|
||||
(line_pt_1[0], min(max(line_pt_1[1], line_pt_2[1]), rect_top_right[1])))
|
||||
if line_pt_1[1] == line_pt_2[1]:
|
||||
return ((max(min(line_pt_1[0], line_pt_2[0]), rect_bottom_left[0]), line_pt_1[1]),
|
||||
(min(max(line_pt_1[0], line_pt_2[0]), rect_top_right[0]), line_pt_1[1]))
|
||||
|
||||
if ((rect_bottom_left[0] <= line_pt_1[0] and line_pt_1[0] < rect_top_right[0]) and
|
||||
(rect_bottom_left[1] <= line_pt_1[1] and line_pt_1[1] < rect_top_right[1]) and
|
||||
(rect_bottom_left[0] <= line_pt_2[0] and line_pt_2[0] < rect_top_right[0]) and
|
||||
(rect_bottom_left[1] <= line_pt_2[1] and line_pt_2[1] < rect_top_right[1])):
|
||||
return line_pt_1, line_pt_2
|
||||
|
||||
ts.append( float(rect_bottom_left[0]-line_pt_1[0])/(line_pt_2[0]-line_pt_1[0]) )
|
||||
ts.append( float(rect_top_right[0]-line_pt_1[0])/(line_pt_2[0]-line_pt_1[0]) )
|
||||
ts.append( float(rect_bottom_left[1]-line_pt_1[1])/(line_pt_2[1]-line_pt_1[1]) )
|
||||
ts.append( float(rect_top_right[1]-line_pt_1[1])/(line_pt_2[1]-line_pt_1[1]) )
|
||||
|
||||
ts.sort()
|
||||
if ts[2] < 0 or ts[2] >= 1 or ts[3] < 0 or ts[2]>= 1:
|
||||
return None
|
||||
result = [(pt_1 + t*(pt_2-pt_1)) for t in (ts[2],ts[3]) for (pt_1, pt_2) in zip(line_pt_1, line_pt_2)]
|
||||
return (result[0],result[1]), (result[2], result[3])
|
||||
|
||||
|
||||
|
||||
def plot(*args,**flags):
|
||||
limit_flags_names = set(["min_x","min_y","max_x","max_y"])
|
||||
limit_flags = dict([(n,flags[n]) for n in limit_flags_names & set(flags)])
|
||||
settting_flags = dict([(n,flags[n]) for n in set(flags) - limit_flags_names])
|
||||
|
||||
if len(args) == 1:
|
||||
p = Plotter(**settting_flags)
|
||||
print p.plot_single(args[0],**limit_flags)
|
||||
elif len(args) == 2:
|
||||
p = Plotter(**settting_flags)
|
||||
print p.plot_double(args[0],args[1],**limit_flags)
|
||||
else:
|
||||
raise NotImplementedError("can't draw multiple graphs yet")
|
||||
|
||||
__all__ = ["Plotter","plot"]
|
||||
|
@@ -1,124 +0,0 @@
|
||||
# path: /newton/prep
|
||||
# layout: PrepData
|
||||
# start: Fri, 23 Mar 2012 10:00:30.000000 +0000
|
||||
# end: Fri, 23 Mar 2012 10:00:31.000000 +0000
|
||||
1332496830.000000 251774.000000 224241.000000 5688.100098 1915.530029 9329.219727 4183.709961 1212.349976 2641.790039
|
||||
1332496830.008333 259567.000000 222698.000000 6207.600098 678.671997 9380.230469 4575.580078 2830.610107 2688.629883
|
||||
1332496830.016667 263073.000000 223304.000000 4961.640137 2197.120117 7687.310059 4861.859863 2732.780029 3008.540039
|
||||
1332496830.025000 257614.000000 223323.000000 5003.660156 3525.139893 7165.310059 4685.620117 1715.380005 3440.479980
|
||||
1332496830.033333 255780.000000 221915.000000 6357.310059 2145.290039 8426.969727 3775.350098 1475.390015 3797.239990
|
||||
1332496830.041667 260166.000000 223008.000000 6702.589844 1484.959961 9288.099609 3330.830078 1228.500000 3214.320068
|
||||
1332496830.050000 261231.000000 226426.000000 4980.060059 2982.379883 8499.629883 4267.669922 994.088989 2292.889893
|
||||
1332496830.058333 255117.000000 226642.000000 4584.410156 4656.439941 7860.149902 5317.310059 1473.599976 2111.689941
|
||||
1332496830.066667 253300.000000 223554.000000 6455.089844 3036.649902 8869.750000 4986.310059 2607.360107 2839.590088
|
||||
1332496830.075000 261061.000000 221263.000000 6951.979980 1500.239990 9386.099609 3791.679932 2677.010010 3980.629883
|
||||
1332496830.083333 266503.000000 223198.000000 5189.609863 2594.560059 8571.530273 3175.000000 919.840027 3792.010010
|
||||
1332496830.091667 260692.000000 225184.000000 3782.479980 4642.879883 7662.959961 3917.790039 -251.097000 2907.060059
|
||||
1332496830.100000 253963.000000 225081.000000 5123.529785 3839.550049 8669.030273 4877.819824 943.723999 2527.449951
|
||||
1332496830.108333 256555.000000 224169.000000 5930.600098 2298.540039 8906.709961 5331.680176 2549.909912 3053.560059
|
||||
1332496830.116667 260889.000000 225010.000000 4681.129883 2971.870117 7900.040039 4874.080078 2322.429932 3649.120117
|
||||
1332496830.125000 257944.000000 224923.000000 3291.139893 4357.089844 7131.589844 4385.560059 1077.050049 3664.040039
|
||||
1332496830.133333 255009.000000 223018.000000 4584.819824 2864.000000 8469.490234 3625.580078 985.557007 3504.229980
|
||||
1332496830.141667 260114.000000 221947.000000 5676.189941 1210.339966 9393.780273 3390.239990 1654.020020 3018.699951
|
||||
1332496830.150000 264277.000000 224438.000000 4446.620117 2176.719971 8142.089844 4584.879883 2327.830078 2615.800049
|
||||
1332496830.158333 259221.000000 226471.000000 2734.439941 4182.759766 6389.549805 5540.520020 1958.880005 2720.120117
|
||||
1332496830.166667 252650.000000 224831.000000 4163.640137 2989.989990 7179.200195 5213.060059 1929.550049 3457.659912
|
||||
1332496830.175000 257083.000000 222048.000000 5759.040039 702.440979 8566.549805 3552.020020 1832.939941 3956.189941
|
||||
1332496830.183333 263130.000000 222967.000000 5141.140137 1166.119995 8666.959961 2720.370117 971.374023 3479.729980
|
||||
1332496830.191667 260236.000000 225265.000000 3425.139893 3339.080078 7853.609863 3674.949951 525.908020 2443.310059
|
||||
1332496830.200000 253503.000000 224527.000000 4398.129883 2927.429932 8110.279785 4842.470215 1513.869995 2467.100098
|
||||
1332496830.208333 256126.000000 222693.000000 6043.529785 656.223999 8797.559570 4832.410156 2832.370117 3426.139893
|
||||
1332496830.216667 261677.000000 223608.000000 5830.459961 1033.910034 8123.939941 3980.689941 1927.959961 4092.719971
|
||||
1332496830.225000 259457.000000 225536.000000 4015.570068 2995.989990 7135.439941 3713.550049 307.220001 3849.429932
|
||||
1332496830.233333 253352.000000 224216.000000 4650.560059 3196.620117 8131.279785 3586.159912 70.832298 3074.179932
|
||||
1332496830.241667 256124.000000 221513.000000 6100.479980 821.979980 9757.540039 3474.510010 1647.520020 2559.860107
|
||||
1332496830.250000 263024.000000 221559.000000 5789.959961 699.416992 9129.740234 4153.080078 2829.250000 2677.270020
|
||||
1332496830.258333 261720.000000 224015.000000 4358.500000 2645.360107 7414.109863 4810.669922 2225.989990 3185.989990
|
||||
1332496830.266667 254756.000000 224240.000000 4857.379883 3229.679932 7539.310059 4769.140137 1507.130005 3668.260010
|
||||
1332496830.275000 256889.000000 222658.000000 6473.419922 1214.109985 9010.759766 3848.729980 1303.839966 3778.500000
|
||||
1332496830.283333 264208.000000 223316.000000 5700.450195 1116.560059 9087.610352 3846.679932 1293.589966 2891.560059
|
||||
1332496830.291667 263310.000000 225719.000000 3936.120117 3252.360107 7552.850098 4897.859863 1156.630005 2037.160034
|
||||
1332496830.300000 255079.000000 225086.000000 4536.450195 3960.110107 7454.589844 5479.069824 1596.359985 2190.800049
|
||||
1332496830.308333 254487.000000 222508.000000 6635.859863 1758.849976 8732.969727 4466.970215 2650.360107 3139.310059
|
||||
1332496830.316667 261241.000000 222432.000000 6702.270020 1085.130005 8989.230469 3112.989990 1933.560059 3828.409912
|
||||
1332496830.325000 262119.000000 225587.000000 4714.950195 2892.360107 8107.819824 2961.310059 239.977997 3273.719971
|
||||
1332496830.333333 254999.000000 226514.000000 4532.089844 4126.899902 8200.129883 3872.590088 56.089001 2370.580078
|
||||
1332496830.341667 254289.000000 224033.000000 6538.810059 2251.439941 9419.429688 4564.450195 2077.810059 2508.169922
|
||||
1332496830.350000 261890.000000 221960.000000 6846.089844 1475.270020 9125.589844 4598.290039 3299.219971 3475.419922
|
||||
1332496830.358333 264502.000000 223085.000000 5066.379883 3270.560059 7933.169922 4173.709961 1908.910034 3867.459961
|
||||
1332496830.366667 257889.000000 223656.000000 4201.660156 4473.640137 7688.339844 4161.580078 687.578979 3653.689941
|
||||
1332496830.375000 254270.000000 223151.000000 5715.140137 2752.139893 9273.320312 3772.949951 896.403992 3256.060059
|
||||
1332496830.383333 258257.000000 224217.000000 6114.310059 1856.859985 9604.320312 4200.490234 1764.380005 2939.219971
|
||||
1332496830.391667 260020.000000 226868.000000 4237.529785 3605.879883 8066.220215 5430.250000 2138.580078 2696.709961
|
||||
1332496830.400000 255083.000000 225924.000000 3350.310059 4853.069824 7045.819824 5925.200195 1893.609985 2897.340088
|
||||
1332496830.408333 254453.000000 222127.000000 5271.330078 2491.500000 8436.679688 5032.080078 2436.050049 3724.590088
|
||||
1332496830.416667 262588.000000 219950.000000 5994.620117 789.273987 9029.650391 3515.739990 1953.569946 4014.520020
|
||||
1332496830.425000 265610.000000 223333.000000 4391.410156 2400.959961 8146.459961 3536.959961 530.231995 3133.919922
|
||||
1332496830.433333 257470.000000 226977.000000 2975.320068 4633.529785 7278.560059 4640.100098 -50.150200 2024.959961
|
||||
1332496830.441667 250687.000000 226331.000000 4517.859863 3183.800049 8072.600098 5281.660156 1605.140015 2335.139893
|
||||
1332496830.450000 255563.000000 224495.000000 5551.000000 1101.300049 8461.490234 4725.700195 2726.669922 3480.540039
|
||||
1332496830.458333 261335.000000 224645.000000 4764.680176 1557.020020 7833.350098 3524.810059 1577.410034 4038.620117
|
||||
1332496830.466667 260269.000000 224008.000000 3558.030029 2987.610107 7362.439941 3279.229980 562.442017 3786.550049
|
||||
1332496830.475000 257435.000000 221777.000000 4972.600098 2166.879883 8481.440430 3328.719971 1037.130005 3271.370117
|
||||
1332496830.483333 261046.000000 221550.000000 5816.180176 590.216980 9120.929688 3895.399902 2382.669922 2824.169922
|
||||
1332496830.491667 262766.000000 224473.000000 4835.049805 1785.770020 7880.759766 4745.620117 2443.659912 3229.550049
|
||||
1332496830.500000 256509.000000 226413.000000 3758.870117 3461.199951 6743.770020 4928.959961 1536.619995 3546.689941
|
||||
1332496830.508333 250793.000000 224372.000000 5218.490234 2865.260010 7803.959961 4351.089844 1333.819946 3680.489990
|
||||
1332496830.516667 256319.000000 222066.000000 6403.970215 732.344971 9627.759766 3089.300049 1516.780029 3653.689941
|
||||
1332496830.525000 263343.000000 223235.000000 5200.430176 1388.579956 9372.849609 3371.229980 1450.390015 2678.909912
|
||||
1332496830.533333 260903.000000 225110.000000 3722.580078 3246.659912 7876.540039 4716.810059 1498.439941 2116.520020
|
||||
1332496830.541667 254416.000000 223769.000000 4841.649902 2956.399902 8115.919922 5392.359863 2142.810059 2652.320068
|
||||
1332496830.550000 256698.000000 222172.000000 6471.229980 970.395996 8834.980469 4816.839844 2376.629883 3605.860107
|
||||
1332496830.558333 261841.000000 223537.000000 5500.740234 1189.660034 8365.730469 4016.469971 1042.270020 3821.199951
|
||||
1332496830.566667 259503.000000 225840.000000 3827.929932 3088.840088 7676.140137 3978.310059 -357.006989 3016.419922
|
||||
1332496830.575000 253457.000000 224636.000000 4914.609863 3097.449951 8224.900391 4321.439941 171.373993 2412.360107
|
||||
1332496830.583333 256029.000000 222221.000000 6841.799805 1028.500000 9252.299805 4387.569824 2418.139893 2510.100098
|
||||
1332496830.591667 262840.000000 222550.000000 6210.250000 1410.729980 8538.900391 4152.580078 3009.300049 3219.760010
|
||||
1332496830.600000 261633.000000 225065.000000 4284.529785 3357.209961 7282.169922 3823.590088 1402.839966 3644.669922
|
||||
1332496830.608333 254591.000000 225109.000000 4693.160156 3647.739990 7745.160156 3686.379883 490.161011 3448.860107
|
||||
1332496830.616667 254780.000000 223599.000000 6527.379883 1569.869995 9438.429688 3456.580078 1162.520020 3252.010010
|
||||
1332496830.625000 260639.000000 224107.000000 6531.049805 1633.050049 9283.719727 4174.020020 2089.550049 2775.750000
|
||||
1332496830.633333 261108.000000 225472.000000 4968.259766 3527.850098 7692.870117 5137.100098 2207.389893 2436.659912
|
||||
1332496830.641667 255775.000000 223708.000000 4963.450195 4017.370117 7701.419922 5269.649902 2284.399902 2842.080078
|
||||
1332496830.650000 257398.000000 220947.000000 6767.500000 1645.709961 9107.070312 4000.179932 2548.860107 3624.770020
|
||||
1332496830.658333 264924.000000 221559.000000 6471.459961 1110.329956 9459.650391 3108.169922 1696.969971 3893.439941
|
||||
1332496830.666667 265339.000000 225733.000000 4348.799805 3459.510010 8475.299805 4031.239990 573.346985 2910.270020
|
||||
1332496830.675000 256814.000000 226995.000000 3479.540039 4949.790039 7499.910156 5624.709961 751.656006 2347.709961
|
||||
1332496830.683333 253316.000000 225161.000000 5147.060059 3218.429932 8460.160156 5869.299805 2336.320068 2987.959961
|
||||
1332496830.691667 259360.000000 223101.000000 5549.120117 1869.949951 8740.759766 4668.939941 2457.909912 3758.820068
|
||||
1332496830.700000 262012.000000 224016.000000 4173.609863 3004.129883 8157.040039 3704.729980 987.963989 3652.750000
|
||||
1332496830.708333 257176.000000 224420.000000 3517.300049 4118.750000 7822.240234 3718.229980 37.264900 2953.679932
|
||||
1332496830.716667 255146.000000 223322.000000 4923.979980 2330.679932 9095.910156 3792.399902 1013.070007 2711.239990
|
||||
1332496830.725000 260524.000000 223651.000000 5413.629883 1146.209961 8817.169922 4419.649902 2446.649902 2832.050049
|
||||
1332496830.733333 262098.000000 225752.000000 4262.979980 2270.969971 7135.479980 5067.120117 2294.679932 3376.620117
|
||||
1332496830.741667 256889.000000 225379.000000 3606.459961 3568.189941 6552.649902 4970.270020 1516.380005 3662.570068
|
||||
1332496830.750000 253948.000000 222631.000000 5511.700195 2066.300049 7952.660156 4019.909912 1513.140015 3752.629883
|
||||
1332496830.758333 259799.000000 222067.000000 5873.500000 608.583984 9253.780273 2870.739990 1348.239990 3344.199951
|
||||
1332496830.766667 262547.000000 224901.000000 4346.080078 1928.099976 8590.969727 3455.459961 904.390991 2379.270020
|
||||
1332496830.775000 256137.000000 226761.000000 3423.560059 3379.080078 7471.149902 4894.169922 1153.540039 2031.410034
|
||||
1332496830.783333 250326.000000 225013.000000 5519.979980 2423.969971 7991.759766 5117.950195 2098.790039 3099.239990
|
||||
1332496830.791667 255454.000000 222992.000000 6547.950195 496.496002 8751.339844 3900.560059 2132.290039 4076.810059
|
||||
1332496830.800000 261286.000000 223489.000000 5152.850098 1501.510010 8425.610352 2888.030029 776.114014 3786.360107
|
||||
1332496830.808333 258969.000000 224069.000000 3832.610107 3001.979980 7979.259766 3182.310059 52.716000 2874.800049
|
||||
1332496830.816667 254946.000000 222035.000000 5317.879883 2139.800049 9103.139648 3955.610107 1235.170044 2394.149902
|
||||
1332496830.825000 258676.000000 221205.000000 6594.910156 505.343994 9423.360352 4562.470215 2913.739990 2892.350098
|
||||
1332496830.833333 262125.000000 223566.000000 5116.750000 1773.599976 8082.200195 4776.370117 2386.389893 3659.729980
|
||||
1332496830.841667 257835.000000 225918.000000 3714.300049 3477.080078 7205.370117 4554.609863 711.539001 3878.419922
|
||||
1332496830.850000 253660.000000 224371.000000 5022.450195 2592.429932 8277.200195 4119.370117 486.507996 3666.739990
|
||||
1332496830.858333 259503.000000 222061.000000 6589.950195 659.935974 9596.919922 3598.100098 1702.489990 3036.600098
|
||||
1332496830.866667 265495.000000 222843.000000 5541.850098 1728.430054 8459.959961 4492.000000 2231.969971 2430.620117
|
||||
1332496830.875000 260929.000000 224996.000000 4000.949951 3745.989990 6983.790039 5430.859863 1855.260010 2533.379883
|
||||
1332496830.883333 252716.000000 224335.000000 5086.560059 3401.149902 7597.970215 5196.120117 1755.719971 3079.760010
|
||||
1332496830.891667 254110.000000 223111.000000 6822.189941 1229.079956 9164.339844 3761.229980 1679.390015 3584.879883
|
||||
1332496830.900000 259969.000000 224693.000000 6183.950195 1538.500000 9222.080078 3139.169922 949.901978 3180.800049
|
||||
1332496830.908333 259078.000000 226913.000000 4388.890137 3694.820068 8195.019531 3933.000000 426.079987 2388.449951
|
||||
1332496830.916667 254563.000000 224760.000000 5168.439941 4020.939941 8450.269531 4758.910156 1458.900024 2286.429932
|
||||
1332496830.925000 258059.000000 221217.000000 6883.459961 1649.530029 9232.780273 4457.649902 3057.820068 3031.949951
|
||||
1332496830.933333 264667.000000 221177.000000 6218.509766 1645.729980 8657.179688 3663.500000 2528.280029 3978.340088
|
||||
1332496830.941667 262925.000000 224382.000000 4627.500000 3635.929932 7892.799805 3431.320068 604.508972 3901.370117
|
||||
1332496830.950000 254708.000000 225448.000000 4408.250000 4461.040039 8197.169922 3953.750000 -44.534599 3154.870117
|
||||
1332496830.958333 253702.000000 224635.000000 5825.770020 2577.050049 9590.049805 4569.250000 1460.270020 2785.169922
|
||||
1332496830.966667 260206.000000 224140.000000 5387.979980 1951.160034 8789.509766 5131.660156 2706.379883 2972.479980
|
||||
1332496830.975000 261240.000000 224737.000000 3860.810059 3418.310059 7414.529785 5284.520020 2271.379883 3183.149902
|
||||
1332496830.983333 256140.000000 223252.000000 3850.010010 3957.139893 7262.649902 4964.640137 1499.510010 3453.129883
|
||||
1332496830.991667 256116.000000 221349.000000 5594.479980 2054.399902 8835.129883 3662.010010 1485.510010 3613.010010
|
@@ -1,119 +0,0 @@
|
||||
1332496830.008333 259567.000000 222698.000000 6207.600098 678.671997 9380.230469 4575.580078 2830.610107 2688.629883
|
||||
1332496830.016667 263073.000000 223304.000000 4961.640137 2197.120117 7687.310059 4861.859863 2732.780029 3008.540039
|
||||
1332496830.025000 257614.000000 223323.000000 5003.660156 3525.139893 7165.310059 4685.620117 1715.380005 3440.479980
|
||||
1332496830.033333 255780.000000 221915.000000 6357.310059 2145.290039 8426.969727 3775.350098 1475.390015 3797.239990
|
||||
1332496830.041667 260166.000000 223008.000000 6702.589844 1484.959961 9288.099609 3330.830078 1228.500000 3214.320068
|
||||
1332496830.050000 261231.000000 226426.000000 4980.060059 2982.379883 8499.629883 4267.669922 994.088989 2292.889893
|
||||
1332496830.058333 255117.000000 226642.000000 4584.410156 4656.439941 7860.149902 5317.310059 1473.599976 2111.689941
|
||||
1332496830.066667 253300.000000 223554.000000 6455.089844 3036.649902 8869.750000 4986.310059 2607.360107 2839.590088
|
||||
1332496830.075000 261061.000000 221263.000000 6951.979980 1500.239990 9386.099609 3791.679932 2677.010010 3980.629883
|
||||
1332496830.083333 266503.000000 223198.000000 5189.609863 2594.560059 8571.530273 3175.000000 919.840027 3792.010010
|
||||
1332496830.091667 260692.000000 225184.000000 3782.479980 4642.879883 7662.959961 3917.790039 -251.097000 2907.060059
|
||||
1332496830.100000 253963.000000 225081.000000 5123.529785 3839.550049 8669.030273 4877.819824 943.723999 2527.449951
|
||||
1332496830.108333 256555.000000 224169.000000 5930.600098 2298.540039 8906.709961 5331.680176 2549.909912 3053.560059
|
||||
1332496830.116667 260889.000000 225010.000000 4681.129883 2971.870117 7900.040039 4874.080078 2322.429932 3649.120117
|
||||
1332496830.125000 257944.000000 224923.000000 3291.139893 4357.089844 7131.589844 4385.560059 1077.050049 3664.040039
|
||||
1332496830.133333 255009.000000 223018.000000 4584.819824 2864.000000 8469.490234 3625.580078 985.557007 3504.229980
|
||||
1332496830.141667 260114.000000 221947.000000 5676.189941 1210.339966 9393.780273 3390.239990 1654.020020 3018.699951
|
||||
1332496830.150000 264277.000000 224438.000000 4446.620117 2176.719971 8142.089844 4584.879883 2327.830078 2615.800049
|
||||
1332496830.158333 259221.000000 226471.000000 2734.439941 4182.759766 6389.549805 5540.520020 1958.880005 2720.120117
|
||||
1332496830.166667 252650.000000 224831.000000 4163.640137 2989.989990 7179.200195 5213.060059 1929.550049 3457.659912
|
||||
1332496830.175000 257083.000000 222048.000000 5759.040039 702.440979 8566.549805 3552.020020 1832.939941 3956.189941
|
||||
1332496830.183333 263130.000000 222967.000000 5141.140137 1166.119995 8666.959961 2720.370117 971.374023 3479.729980
|
||||
1332496830.191667 260236.000000 225265.000000 3425.139893 3339.080078 7853.609863 3674.949951 525.908020 2443.310059
|
||||
1332496830.200000 253503.000000 224527.000000 4398.129883 2927.429932 8110.279785 4842.470215 1513.869995 2467.100098
|
||||
1332496830.208333 256126.000000 222693.000000 6043.529785 656.223999 8797.559570 4832.410156 2832.370117 3426.139893
|
||||
1332496830.216667 261677.000000 223608.000000 5830.459961 1033.910034 8123.939941 3980.689941 1927.959961 4092.719971
|
||||
1332496830.225000 259457.000000 225536.000000 4015.570068 2995.989990 7135.439941 3713.550049 307.220001 3849.429932
|
||||
1332496830.233333 253352.000000 224216.000000 4650.560059 3196.620117 8131.279785 3586.159912 70.832298 3074.179932
|
||||
1332496830.241667 256124.000000 221513.000000 6100.479980 821.979980 9757.540039 3474.510010 1647.520020 2559.860107
|
||||
1332496830.250000 263024.000000 221559.000000 5789.959961 699.416992 9129.740234 4153.080078 2829.250000 2677.270020
|
||||
1332496830.258333 261720.000000 224015.000000 4358.500000 2645.360107 7414.109863 4810.669922 2225.989990 3185.989990
|
||||
1332496830.266667 254756.000000 224240.000000 4857.379883 3229.679932 7539.310059 4769.140137 1507.130005 3668.260010
|
||||
1332496830.275000 256889.000000 222658.000000 6473.419922 1214.109985 9010.759766 3848.729980 1303.839966 3778.500000
|
||||
1332496830.283333 264208.000000 223316.000000 5700.450195 1116.560059 9087.610352 3846.679932 1293.589966 2891.560059
|
||||
1332496830.291667 263310.000000 225719.000000 3936.120117 3252.360107 7552.850098 4897.859863 1156.630005 2037.160034
|
||||
1332496830.300000 255079.000000 225086.000000 4536.450195 3960.110107 7454.589844 5479.069824 1596.359985 2190.800049
|
||||
1332496830.308333 254487.000000 222508.000000 6635.859863 1758.849976 8732.969727 4466.970215 2650.360107 3139.310059
|
||||
1332496830.316667 261241.000000 222432.000000 6702.270020 1085.130005 8989.230469 3112.989990 1933.560059 3828.409912
|
||||
1332496830.325000 262119.000000 225587.000000 4714.950195 2892.360107 8107.819824 2961.310059 239.977997 3273.719971
|
||||
1332496830.333333 254999.000000 226514.000000 4532.089844 4126.899902 8200.129883 3872.590088 56.089001 2370.580078
|
||||
1332496830.341667 254289.000000 224033.000000 6538.810059 2251.439941 9419.429688 4564.450195 2077.810059 2508.169922
|
||||
1332496830.350000 261890.000000 221960.000000 6846.089844 1475.270020 9125.589844 4598.290039 3299.219971 3475.419922
|
||||
1332496830.358333 264502.000000 223085.000000 5066.379883 3270.560059 7933.169922 4173.709961 1908.910034 3867.459961
|
||||
1332496830.366667 257889.000000 223656.000000 4201.660156 4473.640137 7688.339844 4161.580078 687.578979 3653.689941
|
||||
1332496830.375000 254270.000000 223151.000000 5715.140137 2752.139893 9273.320312 3772.949951 896.403992 3256.060059
|
||||
1332496830.383333 258257.000000 224217.000000 6114.310059 1856.859985 9604.320312 4200.490234 1764.380005 2939.219971
|
||||
1332496830.391667 260020.000000 226868.000000 4237.529785 3605.879883 8066.220215 5430.250000 2138.580078 2696.709961
|
||||
1332496830.400000 255083.000000 225924.000000 3350.310059 4853.069824 7045.819824 5925.200195 1893.609985 2897.340088
|
||||
1332496830.408333 254453.000000 222127.000000 5271.330078 2491.500000 8436.679688 5032.080078 2436.050049 3724.590088
|
||||
1332496830.416667 262588.000000 219950.000000 5994.620117 789.273987 9029.650391 3515.739990 1953.569946 4014.520020
|
||||
1332496830.425000 265610.000000 223333.000000 4391.410156 2400.959961 8146.459961 3536.959961 530.231995 3133.919922
|
||||
1332496830.433333 257470.000000 226977.000000 2975.320068 4633.529785 7278.560059 4640.100098 -50.150200 2024.959961
|
||||
1332496830.441667 250687.000000 226331.000000 4517.859863 3183.800049 8072.600098 5281.660156 1605.140015 2335.139893
|
||||
1332496830.450000 255563.000000 224495.000000 5551.000000 1101.300049 8461.490234 4725.700195 2726.669922 3480.540039
|
||||
1332496830.458333 261335.000000 224645.000000 4764.680176 1557.020020 7833.350098 3524.810059 1577.410034 4038.620117
|
||||
1332496830.466667 260269.000000 224008.000000 3558.030029 2987.610107 7362.439941 3279.229980 562.442017 3786.550049
|
||||
1332496830.475000 257435.000000 221777.000000 4972.600098 2166.879883 8481.440430 3328.719971 1037.130005 3271.370117
|
||||
1332496830.483333 261046.000000 221550.000000 5816.180176 590.216980 9120.929688 3895.399902 2382.669922 2824.169922
|
||||
1332496830.491667 262766.000000 224473.000000 4835.049805 1785.770020 7880.759766 4745.620117 2443.659912 3229.550049
|
||||
1332496830.500000 256509.000000 226413.000000 3758.870117 3461.199951 6743.770020 4928.959961 1536.619995 3546.689941
|
||||
1332496830.508333 250793.000000 224372.000000 5218.490234 2865.260010 7803.959961 4351.089844 1333.819946 3680.489990
|
||||
1332496830.516667 256319.000000 222066.000000 6403.970215 732.344971 9627.759766 3089.300049 1516.780029 3653.689941
|
||||
1332496830.525000 263343.000000 223235.000000 5200.430176 1388.579956 9372.849609 3371.229980 1450.390015 2678.909912
|
||||
1332496830.533333 260903.000000 225110.000000 3722.580078 3246.659912 7876.540039 4716.810059 1498.439941 2116.520020
|
||||
1332496830.541667 254416.000000 223769.000000 4841.649902 2956.399902 8115.919922 5392.359863 2142.810059 2652.320068
|
||||
1332496830.550000 256698.000000 222172.000000 6471.229980 970.395996 8834.980469 4816.839844 2376.629883 3605.860107
|
||||
1332496830.558333 261841.000000 223537.000000 5500.740234 1189.660034 8365.730469 4016.469971 1042.270020 3821.199951
|
||||
1332496830.566667 259503.000000 225840.000000 3827.929932 3088.840088 7676.140137 3978.310059 -357.006989 3016.419922
|
||||
1332496830.575000 253457.000000 224636.000000 4914.609863 3097.449951 8224.900391 4321.439941 171.373993 2412.360107
|
||||
1332496830.583333 256029.000000 222221.000000 6841.799805 1028.500000 9252.299805 4387.569824 2418.139893 2510.100098
|
||||
1332496830.591667 262840.000000 222550.000000 6210.250000 1410.729980 8538.900391 4152.580078 3009.300049 3219.760010
|
||||
1332496830.600000 261633.000000 225065.000000 4284.529785 3357.209961 7282.169922 3823.590088 1402.839966 3644.669922
|
||||
1332496830.608333 254591.000000 225109.000000 4693.160156 3647.739990 7745.160156 3686.379883 490.161011 3448.860107
|
||||
1332496830.616667 254780.000000 223599.000000 6527.379883 1569.869995 9438.429688 3456.580078 1162.520020 3252.010010
|
||||
1332496830.625000 260639.000000 224107.000000 6531.049805 1633.050049 9283.719727 4174.020020 2089.550049 2775.750000
|
||||
1332496830.633333 261108.000000 225472.000000 4968.259766 3527.850098 7692.870117 5137.100098 2207.389893 2436.659912
|
||||
1332496830.641667 255775.000000 223708.000000 4963.450195 4017.370117 7701.419922 5269.649902 2284.399902 2842.080078
|
||||
1332496830.650000 257398.000000 220947.000000 6767.500000 1645.709961 9107.070312 4000.179932 2548.860107 3624.770020
|
||||
1332496830.658333 264924.000000 221559.000000 6471.459961 1110.329956 9459.650391 3108.169922 1696.969971 3893.439941
|
||||
1332496830.666667 265339.000000 225733.000000 4348.799805 3459.510010 8475.299805 4031.239990 573.346985 2910.270020
|
||||
1332496830.675000 256814.000000 226995.000000 3479.540039 4949.790039 7499.910156 5624.709961 751.656006 2347.709961
|
||||
1332496830.683333 253316.000000 225161.000000 5147.060059 3218.429932 8460.160156 5869.299805 2336.320068 2987.959961
|
||||
1332496830.691667 259360.000000 223101.000000 5549.120117 1869.949951 8740.759766 4668.939941 2457.909912 3758.820068
|
||||
1332496830.700000 262012.000000 224016.000000 4173.609863 3004.129883 8157.040039 3704.729980 987.963989 3652.750000
|
||||
1332496830.708333 257176.000000 224420.000000 3517.300049 4118.750000 7822.240234 3718.229980 37.264900 2953.679932
|
||||
1332496830.716667 255146.000000 223322.000000 4923.979980 2330.679932 9095.910156 3792.399902 1013.070007 2711.239990
|
||||
1332496830.725000 260524.000000 223651.000000 5413.629883 1146.209961 8817.169922 4419.649902 2446.649902 2832.050049
|
||||
1332496830.733333 262098.000000 225752.000000 4262.979980 2270.969971 7135.479980 5067.120117 2294.679932 3376.620117
|
||||
1332496830.741667 256889.000000 225379.000000 3606.459961 3568.189941 6552.649902 4970.270020 1516.380005 3662.570068
|
||||
1332496830.750000 253948.000000 222631.000000 5511.700195 2066.300049 7952.660156 4019.909912 1513.140015 3752.629883
|
||||
1332496830.758333 259799.000000 222067.000000 5873.500000 608.583984 9253.780273 2870.739990 1348.239990 3344.199951
|
||||
1332496830.766667 262547.000000 224901.000000 4346.080078 1928.099976 8590.969727 3455.459961 904.390991 2379.270020
|
||||
1332496830.775000 256137.000000 226761.000000 3423.560059 3379.080078 7471.149902 4894.169922 1153.540039 2031.410034
|
||||
1332496830.783333 250326.000000 225013.000000 5519.979980 2423.969971 7991.759766 5117.950195 2098.790039 3099.239990
|
||||
1332496830.791667 255454.000000 222992.000000 6547.950195 496.496002 8751.339844 3900.560059 2132.290039 4076.810059
|
||||
1332496830.800000 261286.000000 223489.000000 5152.850098 1501.510010 8425.610352 2888.030029 776.114014 3786.360107
|
||||
1332496830.808333 258969.000000 224069.000000 3832.610107 3001.979980 7979.259766 3182.310059 52.716000 2874.800049
|
||||
1332496830.816667 254946.000000 222035.000000 5317.879883 2139.800049 9103.139648 3955.610107 1235.170044 2394.149902
|
||||
1332496830.825000 258676.000000 221205.000000 6594.910156 505.343994 9423.360352 4562.470215 2913.739990 2892.350098
|
||||
1332496830.833333 262125.000000 223566.000000 5116.750000 1773.599976 8082.200195 4776.370117 2386.389893 3659.729980
|
||||
1332496830.841667 257835.000000 225918.000000 3714.300049 3477.080078 7205.370117 4554.609863 711.539001 3878.419922
|
||||
1332496830.850000 253660.000000 224371.000000 5022.450195 2592.429932 8277.200195 4119.370117 486.507996 3666.739990
|
||||
1332496830.858333 259503.000000 222061.000000 6589.950195 659.935974 9596.919922 3598.100098 1702.489990 3036.600098
|
||||
1332496830.866667 265495.000000 222843.000000 5541.850098 1728.430054 8459.959961 4492.000000 2231.969971 2430.620117
|
||||
1332496830.875000 260929.000000 224996.000000 4000.949951 3745.989990 6983.790039 5430.859863 1855.260010 2533.379883
|
||||
1332496830.883333 252716.000000 224335.000000 5086.560059 3401.149902 7597.970215 5196.120117 1755.719971 3079.760010
|
||||
1332496830.891667 254110.000000 223111.000000 6822.189941 1229.079956 9164.339844 3761.229980 1679.390015 3584.879883
|
||||
1332496830.900000 259969.000000 224693.000000 6183.950195 1538.500000 9222.080078 3139.169922 949.901978 3180.800049
|
||||
1332496830.908333 259078.000000 226913.000000 4388.890137 3694.820068 8195.019531 3933.000000 426.079987 2388.449951
|
||||
1332496830.916667 254563.000000 224760.000000 5168.439941 4020.939941 8450.269531 4758.910156 1458.900024 2286.429932
|
||||
1332496830.925000 258059.000000 221217.000000 6883.459961 1649.530029 9232.780273 4457.649902 3057.820068 3031.949951
|
||||
1332496830.933333 264667.000000 221177.000000 6218.509766 1645.729980 8657.179688 3663.500000 2528.280029 3978.340088
|
||||
1332496830.941667 262925.000000 224382.000000 4627.500000 3635.929932 7892.799805 3431.320068 604.508972 3901.370117
|
||||
1332496830.950000 254708.000000 225448.000000 4408.250000 4461.040039 8197.169922 3953.750000 -44.534599 3154.870117
|
||||
1332496830.958333 253702.000000 224635.000000 5825.770020 2577.050049 9590.049805 4569.250000 1460.270020 2785.169922
|
||||
1332496830.966667 260206.000000 224140.000000 5387.979980 1951.160034 8789.509766 5131.660156 2706.379883 2972.479980
|
||||
1332496830.975000 261240.000000 224737.000000 3860.810059 3418.310059 7414.529785 5284.520020 2271.379883 3183.149902
|
||||
1332496830.983333 256140.000000 223252.000000 3850.010010 3957.139893 7262.649902 4964.640137 1499.510010 3453.129883
|
||||
1332496830.991667 256116.000000 221349.000000 5594.479980 2054.399902 8835.129883 3662.010010 1485.510010 3613.010010
|
@@ -1 +0,0 @@
|
||||
1332496830.008333 259567.000000 222698.000000 6207.600098 678.671997 9380.230469 4575.580078 2830.610107 2688.629883
|
@@ -1,2 +0,0 @@
|
||||
1332496830.008333 259567.000000 222698.000000 6207.600098 678.671997 9380.230469 4575.580078 2830.610107 2688.629883
|
||||
1332496830.016667 263073.000000 223304.000000 4961.640137 2197.120117 7687.310059 4861.859863 2732.780029 3008.540039
|
@@ -1,124 +0,0 @@
|
||||
# path: /newton/prep
|
||||
# layout: PrepData
|
||||
# start: Fri, 23 Mar 2012 10:00:30.000000 +0000
|
||||
# end: Fri, 23 Mar 2012 10:00:31.000000 +0000
|
||||
251774.000000 224241.000000 5688.100098 1915.530029 9329.219727 4183.709961 1212.349976 2641.790039
|
||||
259567.000000 222698.000000 6207.600098 678.671997 9380.230469 4575.580078 2830.610107 2688.629883
|
||||
263073.000000 223304.000000 4961.640137 2197.120117 7687.310059 4861.859863 2732.780029 3008.540039
|
||||
257614.000000 223323.000000 5003.660156 3525.139893 7165.310059 4685.620117 1715.380005 3440.479980
|
||||
255780.000000 221915.000000 6357.310059 2145.290039 8426.969727 3775.350098 1475.390015 3797.239990
|
||||
260166.000000 223008.000000 6702.589844 1484.959961 9288.099609 3330.830078 1228.500000 3214.320068
|
||||
261231.000000 226426.000000 4980.060059 2982.379883 8499.629883 4267.669922 994.088989 2292.889893
|
||||
255117.000000 226642.000000 4584.410156 4656.439941 7860.149902 5317.310059 1473.599976 2111.689941
|
||||
253300.000000 223554.000000 6455.089844 3036.649902 8869.750000 4986.310059 2607.360107 2839.590088
|
||||
261061.000000 221263.000000 6951.979980 1500.239990 9386.099609 3791.679932 2677.010010 3980.629883
|
||||
266503.000000 223198.000000 5189.609863 2594.560059 8571.530273 3175.000000 919.840027 3792.010010
|
||||
260692.000000 225184.000000 3782.479980 4642.879883 7662.959961 3917.790039 -251.097000 2907.060059
|
||||
253963.000000 225081.000000 5123.529785 3839.550049 8669.030273 4877.819824 943.723999 2527.449951
|
||||
256555.000000 224169.000000 5930.600098 2298.540039 8906.709961 5331.680176 2549.909912 3053.560059
|
||||
260889.000000 225010.000000 4681.129883 2971.870117 7900.040039 4874.080078 2322.429932 3649.120117
|
||||
257944.000000 224923.000000 3291.139893 4357.089844 7131.589844 4385.560059 1077.050049 3664.040039
|
||||
255009.000000 223018.000000 4584.819824 2864.000000 8469.490234 3625.580078 985.557007 3504.229980
|
||||
260114.000000 221947.000000 5676.189941 1210.339966 9393.780273 3390.239990 1654.020020 3018.699951
|
||||
264277.000000 224438.000000 4446.620117 2176.719971 8142.089844 4584.879883 2327.830078 2615.800049
|
||||
259221.000000 226471.000000 2734.439941 4182.759766 6389.549805 5540.520020 1958.880005 2720.120117
|
||||
252650.000000 224831.000000 4163.640137 2989.989990 7179.200195 5213.060059 1929.550049 3457.659912
|
||||
257083.000000 222048.000000 5759.040039 702.440979 8566.549805 3552.020020 1832.939941 3956.189941
|
||||
263130.000000 222967.000000 5141.140137 1166.119995 8666.959961 2720.370117 971.374023 3479.729980
|
||||
260236.000000 225265.000000 3425.139893 3339.080078 7853.609863 3674.949951 525.908020 2443.310059
|
||||
253503.000000 224527.000000 4398.129883 2927.429932 8110.279785 4842.470215 1513.869995 2467.100098
|
||||
256126.000000 222693.000000 6043.529785 656.223999 8797.559570 4832.410156 2832.370117 3426.139893
|
||||
261677.000000 223608.000000 5830.459961 1033.910034 8123.939941 3980.689941 1927.959961 4092.719971
|
||||
259457.000000 225536.000000 4015.570068 2995.989990 7135.439941 3713.550049 307.220001 3849.429932
|
||||
253352.000000 224216.000000 4650.560059 3196.620117 8131.279785 3586.159912 70.832298 3074.179932
|
||||
256124.000000 221513.000000 6100.479980 821.979980 9757.540039 3474.510010 1647.520020 2559.860107
|
||||
263024.000000 221559.000000 5789.959961 699.416992 9129.740234 4153.080078 2829.250000 2677.270020
|
||||
261720.000000 224015.000000 4358.500000 2645.360107 7414.109863 4810.669922 2225.989990 3185.989990
|
||||
254756.000000 224240.000000 4857.379883 3229.679932 7539.310059 4769.140137 1507.130005 3668.260010
|
||||
256889.000000 222658.000000 6473.419922 1214.109985 9010.759766 3848.729980 1303.839966 3778.500000
|
||||
264208.000000 223316.000000 5700.450195 1116.560059 9087.610352 3846.679932 1293.589966 2891.560059
|
||||
263310.000000 225719.000000 3936.120117 3252.360107 7552.850098 4897.859863 1156.630005 2037.160034
|
||||
255079.000000 225086.000000 4536.450195 3960.110107 7454.589844 5479.069824 1596.359985 2190.800049
|
||||
254487.000000 222508.000000 6635.859863 1758.849976 8732.969727 4466.970215 2650.360107 3139.310059
|
||||
261241.000000 222432.000000 6702.270020 1085.130005 8989.230469 3112.989990 1933.560059 3828.409912
|
||||
262119.000000 225587.000000 4714.950195 2892.360107 8107.819824 2961.310059 239.977997 3273.719971
|
||||
254999.000000 226514.000000 4532.089844 4126.899902 8200.129883 3872.590088 56.089001 2370.580078
|
||||
254289.000000 224033.000000 6538.810059 2251.439941 9419.429688 4564.450195 2077.810059 2508.169922
|
||||
261890.000000 221960.000000 6846.089844 1475.270020 9125.589844 4598.290039 3299.219971 3475.419922
|
||||
264502.000000 223085.000000 5066.379883 3270.560059 7933.169922 4173.709961 1908.910034 3867.459961
|
||||
257889.000000 223656.000000 4201.660156 4473.640137 7688.339844 4161.580078 687.578979 3653.689941
|
||||
254270.000000 223151.000000 5715.140137 2752.139893 9273.320312 3772.949951 896.403992 3256.060059
|
||||
258257.000000 224217.000000 6114.310059 1856.859985 9604.320312 4200.490234 1764.380005 2939.219971
|
||||
260020.000000 226868.000000 4237.529785 3605.879883 8066.220215 5430.250000 2138.580078 2696.709961
|
||||
255083.000000 225924.000000 3350.310059 4853.069824 7045.819824 5925.200195 1893.609985 2897.340088
|
||||
254453.000000 222127.000000 5271.330078 2491.500000 8436.679688 5032.080078 2436.050049 3724.590088
|
||||
262588.000000 219950.000000 5994.620117 789.273987 9029.650391 3515.739990 1953.569946 4014.520020
|
||||
265610.000000 223333.000000 4391.410156 2400.959961 8146.459961 3536.959961 530.231995 3133.919922
|
||||
257470.000000 226977.000000 2975.320068 4633.529785 7278.560059 4640.100098 -50.150200 2024.959961
|
||||
250687.000000 226331.000000 4517.859863 3183.800049 8072.600098 5281.660156 1605.140015 2335.139893
|
||||
255563.000000 224495.000000 5551.000000 1101.300049 8461.490234 4725.700195 2726.669922 3480.540039
|
||||
261335.000000 224645.000000 4764.680176 1557.020020 7833.350098 3524.810059 1577.410034 4038.620117
|
||||
260269.000000 224008.000000 3558.030029 2987.610107 7362.439941 3279.229980 562.442017 3786.550049
|
||||
257435.000000 221777.000000 4972.600098 2166.879883 8481.440430 3328.719971 1037.130005 3271.370117
|
||||
261046.000000 221550.000000 5816.180176 590.216980 9120.929688 3895.399902 2382.669922 2824.169922
|
||||
262766.000000 224473.000000 4835.049805 1785.770020 7880.759766 4745.620117 2443.659912 3229.550049
|
||||
256509.000000 226413.000000 3758.870117 3461.199951 6743.770020 4928.959961 1536.619995 3546.689941
|
||||
250793.000000 224372.000000 5218.490234 2865.260010 7803.959961 4351.089844 1333.819946 3680.489990
|
||||
256319.000000 222066.000000 6403.970215 732.344971 9627.759766 3089.300049 1516.780029 3653.689941
|
||||
263343.000000 223235.000000 5200.430176 1388.579956 9372.849609 3371.229980 1450.390015 2678.909912
|
||||
260903.000000 225110.000000 3722.580078 3246.659912 7876.540039 4716.810059 1498.439941 2116.520020
|
||||
254416.000000 223769.000000 4841.649902 2956.399902 8115.919922 5392.359863 2142.810059 2652.320068
|
||||
256698.000000 222172.000000 6471.229980 970.395996 8834.980469 4816.839844 2376.629883 3605.860107
|
||||
261841.000000 223537.000000 5500.740234 1189.660034 8365.730469 4016.469971 1042.270020 3821.199951
|
||||
259503.000000 225840.000000 3827.929932 3088.840088 7676.140137 3978.310059 -357.006989 3016.419922
|
||||
253457.000000 224636.000000 4914.609863 3097.449951 8224.900391 4321.439941 171.373993 2412.360107
|
||||
256029.000000 222221.000000 6841.799805 1028.500000 9252.299805 4387.569824 2418.139893 2510.100098
|
||||
262840.000000 222550.000000 6210.250000 1410.729980 8538.900391 4152.580078 3009.300049 3219.760010
|
||||
261633.000000 225065.000000 4284.529785 3357.209961 7282.169922 3823.590088 1402.839966 3644.669922
|
||||
254591.000000 225109.000000 4693.160156 3647.739990 7745.160156 3686.379883 490.161011 3448.860107
|
||||
254780.000000 223599.000000 6527.379883 1569.869995 9438.429688 3456.580078 1162.520020 3252.010010
|
||||
260639.000000 224107.000000 6531.049805 1633.050049 9283.719727 4174.020020 2089.550049 2775.750000
|
||||
261108.000000 225472.000000 4968.259766 3527.850098 7692.870117 5137.100098 2207.389893 2436.659912
|
||||
255775.000000 223708.000000 4963.450195 4017.370117 7701.419922 5269.649902 2284.399902 2842.080078
|
||||
257398.000000 220947.000000 6767.500000 1645.709961 9107.070312 4000.179932 2548.860107 3624.770020
|
||||
264924.000000 221559.000000 6471.459961 1110.329956 9459.650391 3108.169922 1696.969971 3893.439941
|
||||
265339.000000 225733.000000 4348.799805 3459.510010 8475.299805 4031.239990 573.346985 2910.270020
|
||||
256814.000000 226995.000000 3479.540039 4949.790039 7499.910156 5624.709961 751.656006 2347.709961
|
||||
253316.000000 225161.000000 5147.060059 3218.429932 8460.160156 5869.299805 2336.320068 2987.959961
|
||||
259360.000000 223101.000000 5549.120117 1869.949951 8740.759766 4668.939941 2457.909912 3758.820068
|
||||
262012.000000 224016.000000 4173.609863 3004.129883 8157.040039 3704.729980 987.963989 3652.750000
|
||||
257176.000000 224420.000000 3517.300049 4118.750000 7822.240234 3718.229980 37.264900 2953.679932
|
||||
255146.000000 223322.000000 4923.979980 2330.679932 9095.910156 3792.399902 1013.070007 2711.239990
|
||||
260524.000000 223651.000000 5413.629883 1146.209961 8817.169922 4419.649902 2446.649902 2832.050049
|
||||
262098.000000 225752.000000 4262.979980 2270.969971 7135.479980 5067.120117 2294.679932 3376.620117
|
||||
256889.000000 225379.000000 3606.459961 3568.189941 6552.649902 4970.270020 1516.380005 3662.570068
|
||||
253948.000000 222631.000000 5511.700195 2066.300049 7952.660156 4019.909912 1513.140015 3752.629883
|
||||
259799.000000 222067.000000 5873.500000 608.583984 9253.780273 2870.739990 1348.239990 3344.199951
|
||||
262547.000000 224901.000000 4346.080078 1928.099976 8590.969727 3455.459961 904.390991 2379.270020
|
||||
256137.000000 226761.000000 3423.560059 3379.080078 7471.149902 4894.169922 1153.540039 2031.410034
|
||||
250326.000000 225013.000000 5519.979980 2423.969971 7991.759766 5117.950195 2098.790039 3099.239990
|
||||
255454.000000 222992.000000 6547.950195 496.496002 8751.339844 3900.560059 2132.290039 4076.810059
|
||||
261286.000000 223489.000000 5152.850098 1501.510010 8425.610352 2888.030029 776.114014 3786.360107
|
||||
258969.000000 224069.000000 3832.610107 3001.979980 7979.259766 3182.310059 52.716000 2874.800049
|
||||
254946.000000 222035.000000 5317.879883 2139.800049 9103.139648 3955.610107 1235.170044 2394.149902
|
||||
258676.000000 221205.000000 6594.910156 505.343994 9423.360352 4562.470215 2913.739990 2892.350098
|
||||
262125.000000 223566.000000 5116.750000 1773.599976 8082.200195 4776.370117 2386.389893 3659.729980
|
||||
257835.000000 225918.000000 3714.300049 3477.080078 7205.370117 4554.609863 711.539001 3878.419922
|
||||
253660.000000 224371.000000 5022.450195 2592.429932 8277.200195 4119.370117 486.507996 3666.739990
|
||||
259503.000000 222061.000000 6589.950195 659.935974 9596.919922 3598.100098 1702.489990 3036.600098
|
||||
265495.000000 222843.000000 5541.850098 1728.430054 8459.959961 4492.000000 2231.969971 2430.620117
|
||||
260929.000000 224996.000000 4000.949951 3745.989990 6983.790039 5430.859863 1855.260010 2533.379883
|
||||
252716.000000 224335.000000 5086.560059 3401.149902 7597.970215 5196.120117 1755.719971 3079.760010
|
||||
254110.000000 223111.000000 6822.189941 1229.079956 9164.339844 3761.229980 1679.390015 3584.879883
|
||||
259969.000000 224693.000000 6183.950195 1538.500000 9222.080078 3139.169922 949.901978 3180.800049
|
||||
259078.000000 226913.000000 4388.890137 3694.820068 8195.019531 3933.000000 426.079987 2388.449951
|
||||
254563.000000 224760.000000 5168.439941 4020.939941 8450.269531 4758.910156 1458.900024 2286.429932
|
||||
258059.000000 221217.000000 6883.459961 1649.530029 9232.780273 4457.649902 3057.820068 3031.949951
|
||||
264667.000000 221177.000000 6218.509766 1645.729980 8657.179688 3663.500000 2528.280029 3978.340088
|
||||
262925.000000 224382.000000 4627.500000 3635.929932 7892.799805 3431.320068 604.508972 3901.370117
|
||||
254708.000000 225448.000000 4408.250000 4461.040039 8197.169922 3953.750000 -44.534599 3154.870117
|
||||
253702.000000 224635.000000 5825.770020 2577.050049 9590.049805 4569.250000 1460.270020 2785.169922
|
||||
260206.000000 224140.000000 5387.979980 1951.160034 8789.509766 5131.660156 2706.379883 2972.479980
|
||||
261240.000000 224737.000000 3860.810059 3418.310059 7414.529785 5284.520020 2271.379883 3183.149902
|
||||
256140.000000 223252.000000 3850.010010 3957.139893 7262.649902 4964.640137 1499.510010 3453.129883
|
||||
256116.000000 221349.000000 5594.479980 2054.399902 8835.129883 3662.010010 1485.510010 3613.010010
|
@@ -1,120 +0,0 @@
|
||||
251774.000000 224241.000000 5688.100098 1915.530029 9329.219727 4183.709961 1212.349976 2641.790039
|
||||
259567.000000 222698.000000 6207.600098 678.671997 9380.230469 4575.580078 2830.610107 2688.629883
|
||||
263073.000000 223304.000000 4961.640137 2197.120117 7687.310059 4861.859863 2732.780029 3008.540039
|
||||
257614.000000 223323.000000 5003.660156 3525.139893 7165.310059 4685.620117 1715.380005 3440.479980
|
||||
255780.000000 221915.000000 6357.310059 2145.290039 8426.969727 3775.350098 1475.390015 3797.239990
|
||||
260166.000000 223008.000000 6702.589844 1484.959961 9288.099609 3330.830078 1228.500000 3214.320068
|
||||
261231.000000 226426.000000 4980.060059 2982.379883 8499.629883 4267.669922 994.088989 2292.889893
|
||||
255117.000000 226642.000000 4584.410156 4656.439941 7860.149902 5317.310059 1473.599976 2111.689941
|
||||
253300.000000 223554.000000 6455.089844 3036.649902 8869.750000 4986.310059 2607.360107 2839.590088
|
||||
261061.000000 221263.000000 6951.979980 1500.239990 9386.099609 3791.679932 2677.010010 3980.629883
|
||||
266503.000000 223198.000000 5189.609863 2594.560059 8571.530273 3175.000000 919.840027 3792.010010
|
||||
260692.000000 225184.000000 3782.479980 4642.879883 7662.959961 3917.790039 -251.097000 2907.060059
|
||||
253963.000000 225081.000000 5123.529785 3839.550049 8669.030273 4877.819824 943.723999 2527.449951
|
||||
256555.000000 224169.000000 5930.600098 2298.540039 8906.709961 5331.680176 2549.909912 3053.560059
|
||||
260889.000000 225010.000000 4681.129883 2971.870117 7900.040039 4874.080078 2322.429932 3649.120117
|
||||
257944.000000 224923.000000 3291.139893 4357.089844 7131.589844 4385.560059 1077.050049 3664.040039
|
||||
255009.000000 223018.000000 4584.819824 2864.000000 8469.490234 3625.580078 985.557007 3504.229980
|
||||
260114.000000 221947.000000 5676.189941 1210.339966 9393.780273 3390.239990 1654.020020 3018.699951
|
||||
264277.000000 224438.000000 4446.620117 2176.719971 8142.089844 4584.879883 2327.830078 2615.800049
|
||||
259221.000000 226471.000000 2734.439941 4182.759766 6389.549805 5540.520020 1958.880005 2720.120117
|
||||
252650.000000 224831.000000 4163.640137 2989.989990 7179.200195 5213.060059 1929.550049 3457.659912
|
||||
257083.000000 222048.000000 5759.040039 702.440979 8566.549805 3552.020020 1832.939941 3956.189941
|
||||
263130.000000 222967.000000 5141.140137 1166.119995 8666.959961 2720.370117 971.374023 3479.729980
|
||||
260236.000000 225265.000000 3425.139893 3339.080078 7853.609863 3674.949951 525.908020 2443.310059
|
||||
253503.000000 224527.000000 4398.129883 2927.429932 8110.279785 4842.470215 1513.869995 2467.100098
|
||||
256126.000000 222693.000000 6043.529785 656.223999 8797.559570 4832.410156 2832.370117 3426.139893
|
||||
261677.000000 223608.000000 5830.459961 1033.910034 8123.939941 3980.689941 1927.959961 4092.719971
|
||||
259457.000000 225536.000000 4015.570068 2995.989990 7135.439941 3713.550049 307.220001 3849.429932
|
||||
253352.000000 224216.000000 4650.560059 3196.620117 8131.279785 3586.159912 70.832298 3074.179932
|
||||
256124.000000 221513.000000 6100.479980 821.979980 9757.540039 3474.510010 1647.520020 2559.860107
|
||||
263024.000000 221559.000000 5789.959961 699.416992 9129.740234 4153.080078 2829.250000 2677.270020
|
||||
261720.000000 224015.000000 4358.500000 2645.360107 7414.109863 4810.669922 2225.989990 3185.989990
|
||||
254756.000000 224240.000000 4857.379883 3229.679932 7539.310059 4769.140137 1507.130005 3668.260010
|
||||
256889.000000 222658.000000 6473.419922 1214.109985 9010.759766 3848.729980 1303.839966 3778.500000
|
||||
264208.000000 223316.000000 5700.450195 1116.560059 9087.610352 3846.679932 1293.589966 2891.560059
|
||||
263310.000000 225719.000000 3936.120117 3252.360107 7552.850098 4897.859863 1156.630005 2037.160034
|
||||
255079.000000 225086.000000 4536.450195 3960.110107 7454.589844 5479.069824 1596.359985 2190.800049
|
||||
254487.000000 222508.000000 6635.859863 1758.849976 8732.969727 4466.970215 2650.360107 3139.310059
|
||||
261241.000000 222432.000000 6702.270020 1085.130005 8989.230469 3112.989990 1933.560059 3828.409912
|
||||
262119.000000 225587.000000 4714.950195 2892.360107 8107.819824 2961.310059 239.977997 3273.719971
|
||||
254999.000000 226514.000000 4532.089844 4126.899902 8200.129883 3872.590088 56.089001 2370.580078
|
||||
254289.000000 224033.000000 6538.810059 2251.439941 9419.429688 4564.450195 2077.810059 2508.169922
|
||||
261890.000000 221960.000000 6846.089844 1475.270020 9125.589844 4598.290039 3299.219971 3475.419922
|
||||
264502.000000 223085.000000 5066.379883 3270.560059 7933.169922 4173.709961 1908.910034 3867.459961
|
||||
257889.000000 223656.000000 4201.660156 4473.640137 7688.339844 4161.580078 687.578979 3653.689941
|
||||
254270.000000 223151.000000 5715.140137 2752.139893 9273.320312 3772.949951 896.403992 3256.060059
|
||||
258257.000000 224217.000000 6114.310059 1856.859985 9604.320312 4200.490234 1764.380005 2939.219971
|
||||
260020.000000 226868.000000 4237.529785 3605.879883 8066.220215 5430.250000 2138.580078 2696.709961
|
||||
255083.000000 225924.000000 3350.310059 4853.069824 7045.819824 5925.200195 1893.609985 2897.340088
|
||||
254453.000000 222127.000000 5271.330078 2491.500000 8436.679688 5032.080078 2436.050049 3724.590088
|
||||
262588.000000 219950.000000 5994.620117 789.273987 9029.650391 3515.739990 1953.569946 4014.520020
|
||||
265610.000000 223333.000000 4391.410156 2400.959961 8146.459961 3536.959961 530.231995 3133.919922
|
||||
257470.000000 226977.000000 2975.320068 4633.529785 7278.560059 4640.100098 -50.150200 2024.959961
|
||||
250687.000000 226331.000000 4517.859863 3183.800049 8072.600098 5281.660156 1605.140015 2335.139893
|
||||
255563.000000 224495.000000 5551.000000 1101.300049 8461.490234 4725.700195 2726.669922 3480.540039
|
||||
261335.000000 224645.000000 4764.680176 1557.020020 7833.350098 3524.810059 1577.410034 4038.620117
|
||||
260269.000000 224008.000000 3558.030029 2987.610107 7362.439941 3279.229980 562.442017 3786.550049
|
||||
257435.000000 221777.000000 4972.600098 2166.879883 8481.440430 3328.719971 1037.130005 3271.370117
|
||||
261046.000000 221550.000000 5816.180176 590.216980 9120.929688 3895.399902 2382.669922 2824.169922
|
||||
262766.000000 224473.000000 4835.049805 1785.770020 7880.759766 4745.620117 2443.659912 3229.550049
|
||||
256509.000000 226413.000000 3758.870117 3461.199951 6743.770020 4928.959961 1536.619995 3546.689941
|
||||
250793.000000 224372.000000 5218.490234 2865.260010 7803.959961 4351.089844 1333.819946 3680.489990
|
||||
256319.000000 222066.000000 6403.970215 732.344971 9627.759766 3089.300049 1516.780029 3653.689941
|
||||
263343.000000 223235.000000 5200.430176 1388.579956 9372.849609 3371.229980 1450.390015 2678.909912
|
||||
260903.000000 225110.000000 3722.580078 3246.659912 7876.540039 4716.810059 1498.439941 2116.520020
|
||||
254416.000000 223769.000000 4841.649902 2956.399902 8115.919922 5392.359863 2142.810059 2652.320068
|
||||
256698.000000 222172.000000 6471.229980 970.395996 8834.980469 4816.839844 2376.629883 3605.860107
|
||||
261841.000000 223537.000000 5500.740234 1189.660034 8365.730469 4016.469971 1042.270020 3821.199951
|
||||
259503.000000 225840.000000 3827.929932 3088.840088 7676.140137 3978.310059 -357.006989 3016.419922
|
||||
253457.000000 224636.000000 4914.609863 3097.449951 8224.900391 4321.439941 171.373993 2412.360107
|
||||
256029.000000 222221.000000 6841.799805 1028.500000 9252.299805 4387.569824 2418.139893 2510.100098
|
||||
262840.000000 222550.000000 6210.250000 1410.729980 8538.900391 4152.580078 3009.300049 3219.760010
|
||||
261633.000000 225065.000000 4284.529785 3357.209961 7282.169922 3823.590088 1402.839966 3644.669922
|
||||
254591.000000 225109.000000 4693.160156 3647.739990 7745.160156 3686.379883 490.161011 3448.860107
|
||||
254780.000000 223599.000000 6527.379883 1569.869995 9438.429688 3456.580078 1162.520020 3252.010010
|
||||
260639.000000 224107.000000 6531.049805 1633.050049 9283.719727 4174.020020 2089.550049 2775.750000
|
||||
261108.000000 225472.000000 4968.259766 3527.850098 7692.870117 5137.100098 2207.389893 2436.659912
|
||||
255775.000000 223708.000000 4963.450195 4017.370117 7701.419922 5269.649902 2284.399902 2842.080078
|
||||
257398.000000 220947.000000 6767.500000 1645.709961 9107.070312 4000.179932 2548.860107 3624.770020
|
||||
264924.000000 221559.000000 6471.459961 1110.329956 9459.650391 3108.169922 1696.969971 3893.439941
|
||||
265339.000000 225733.000000 4348.799805 3459.510010 8475.299805 4031.239990 573.346985 2910.270020
|
||||
256814.000000 226995.000000 3479.540039 4949.790039 7499.910156 5624.709961 751.656006 2347.709961
|
||||
253316.000000 225161.000000 5147.060059 3218.429932 8460.160156 5869.299805 2336.320068 2987.959961
|
||||
259360.000000 223101.000000 5549.120117 1869.949951 8740.759766 4668.939941 2457.909912 3758.820068
|
||||
262012.000000 224016.000000 4173.609863 3004.129883 8157.040039 3704.729980 987.963989 3652.750000
|
||||
257176.000000 224420.000000 3517.300049 4118.750000 7822.240234 3718.229980 37.264900 2953.679932
|
||||
255146.000000 223322.000000 4923.979980 2330.679932 9095.910156 3792.399902 1013.070007 2711.239990
|
||||
260524.000000 223651.000000 5413.629883 1146.209961 8817.169922 4419.649902 2446.649902 2832.050049
|
||||
262098.000000 225752.000000 4262.979980 2270.969971 7135.479980 5067.120117 2294.679932 3376.620117
|
||||
256889.000000 225379.000000 3606.459961 3568.189941 6552.649902 4970.270020 1516.380005 3662.570068
|
||||
253948.000000 222631.000000 5511.700195 2066.300049 7952.660156 4019.909912 1513.140015 3752.629883
|
||||
259799.000000 222067.000000 5873.500000 608.583984 9253.780273 2870.739990 1348.239990 3344.199951
|
||||
262547.000000 224901.000000 4346.080078 1928.099976 8590.969727 3455.459961 904.390991 2379.270020
|
||||
256137.000000 226761.000000 3423.560059 3379.080078 7471.149902 4894.169922 1153.540039 2031.410034
|
||||
250326.000000 225013.000000 5519.979980 2423.969971 7991.759766 5117.950195 2098.790039 3099.239990
|
||||
255454.000000 222992.000000 6547.950195 496.496002 8751.339844 3900.560059 2132.290039 4076.810059
|
||||
261286.000000 223489.000000 5152.850098 1501.510010 8425.610352 2888.030029 776.114014 3786.360107
|
||||
258969.000000 224069.000000 3832.610107 3001.979980 7979.259766 3182.310059 52.716000 2874.800049
|
||||
254946.000000 222035.000000 5317.879883 2139.800049 9103.139648 3955.610107 1235.170044 2394.149902
|
||||
258676.000000 221205.000000 6594.910156 505.343994 9423.360352 4562.470215 2913.739990 2892.350098
|
||||
262125.000000 223566.000000 5116.750000 1773.599976 8082.200195 4776.370117 2386.389893 3659.729980
|
||||
257835.000000 225918.000000 3714.300049 3477.080078 7205.370117 4554.609863 711.539001 3878.419922
|
||||
253660.000000 224371.000000 5022.450195 2592.429932 8277.200195 4119.370117 486.507996 3666.739990
|
||||
259503.000000 222061.000000 6589.950195 659.935974 9596.919922 3598.100098 1702.489990 3036.600098
|
||||
265495.000000 222843.000000 5541.850098 1728.430054 8459.959961 4492.000000 2231.969971 2430.620117
|
||||
260929.000000 224996.000000 4000.949951 3745.989990 6983.790039 5430.859863 1855.260010 2533.379883
|
||||
252716.000000 224335.000000 5086.560059 3401.149902 7597.970215 5196.120117 1755.719971 3079.760010
|
||||
254110.000000 223111.000000 6822.189941 1229.079956 9164.339844 3761.229980 1679.390015 3584.879883
|
||||
259969.000000 224693.000000 6183.950195 1538.500000 9222.080078 3139.169922 949.901978 3180.800049
|
||||
259078.000000 226913.000000 4388.890137 3694.820068 8195.019531 3933.000000 426.079987 2388.449951
|
||||
254563.000000 224760.000000 5168.439941 4020.939941 8450.269531 4758.910156 1458.900024 2286.429932
|
||||
258059.000000 221217.000000 6883.459961 1649.530029 9232.780273 4457.649902 3057.820068 3031.949951
|
||||
264667.000000 221177.000000 6218.509766 1645.729980 8657.179688 3663.500000 2528.280029 3978.340088
|
||||
262925.000000 224382.000000 4627.500000 3635.929932 7892.799805 3431.320068 604.508972 3901.370117
|
||||
254708.000000 225448.000000 4408.250000 4461.040039 8197.169922 3953.750000 -44.534599 3154.870117
|
||||
253702.000000 224635.000000 5825.770020 2577.050049 9590.049805 4569.250000 1460.270020 2785.169922
|
||||
260206.000000 224140.000000 5387.979980 1951.160034 8789.509766 5131.660156 2706.379883 2972.479980
|
||||
261240.000000 224737.000000 3860.810059 3418.310059 7414.529785 5284.520020 2271.379883 3183.149902
|
||||
256140.000000 223252.000000 3850.010010 3957.139893 7262.649902 4964.640137 1499.510010 3453.129883
|
||||
256116.000000 221349.000000 5594.479980 2054.399902 8835.129883 3662.010010 1485.510010 3613.010010
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,250 +0,0 @@
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
from nilmdb.client import ClientError, ServerError
|
||||
|
||||
import datetime_tz
|
||||
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
import itertools
|
||||
import distutils.version
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import cStringIO
|
||||
import simplejson as json
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
from test_helpers import *
|
||||
|
||||
testdb = "tests/client-testdb"
|
||||
|
||||
def setup_module():
|
||||
global test_server, test_db
|
||||
# Clear out DB
|
||||
recursive_unlink(testdb)
|
||||
|
||||
# Start web app on a custom port
|
||||
test_db = nilmdb.NilmDB(testdb, sync = False)
|
||||
test_server = nilmdb.Server(test_db, host = "127.0.0.1",
|
||||
port = 12380, stoppable = False,
|
||||
fast_shutdown = True,
|
||||
force_traceback = False)
|
||||
test_server.start(blocking = False)
|
||||
|
||||
def teardown_module():
|
||||
global test_server, test_db
|
||||
# Close web app
|
||||
test_server.stop()
|
||||
test_db.close()
|
||||
|
||||
class TestClient(object):
|
||||
|
||||
def test_client_1_basic(self):
|
||||
# Test a fake host
|
||||
client = nilmdb.Client(url = "http://localhost:1/")
|
||||
with assert_raises(nilmdb.client.ServerError):
|
||||
client.version()
|
||||
|
||||
# Trigger same error with a PUT request
|
||||
client = nilmdb.Client(url = "http://localhost:1/")
|
||||
with assert_raises(nilmdb.client.ServerError):
|
||||
client.version()
|
||||
|
||||
# Then a fake URL on a real host
|
||||
client = nilmdb.Client(url = "http://localhost:12380/fake/")
|
||||
with assert_raises(nilmdb.client.ClientError):
|
||||
client.version()
|
||||
|
||||
# Now a real URL with no http:// prefix
|
||||
client = nilmdb.Client(url = "localhost:12380")
|
||||
version = client.version()
|
||||
|
||||
# Now use the real URL
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
version = client.version()
|
||||
eq_(distutils.version.StrictVersion(version),
|
||||
distutils.version.StrictVersion(test_server.version))
|
||||
|
||||
def test_client_2_nilmdb(self):
|
||||
# Basic stream tests, like those in test_nilmdb:test_stream
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
|
||||
# Database starts empty
|
||||
eq_(client.stream_list(), [])
|
||||
|
||||
# Bad path
|
||||
with assert_raises(ClientError):
|
||||
client.stream_create("foo/bar/baz", "PrepData")
|
||||
with assert_raises(ClientError):
|
||||
client.stream_create("/foo", "PrepData")
|
||||
# Bad layout type
|
||||
with assert_raises(ClientError):
|
||||
client.stream_create("/newton/prep", "NoSuchLayout")
|
||||
client.stream_create("/newton/prep", "PrepData")
|
||||
client.stream_create("/newton/raw", "RawData")
|
||||
client.stream_create("/newton/zzz/rawnotch", "RawNotchedData")
|
||||
|
||||
# Verify we got 3 streams
|
||||
eq_(client.stream_list(), [ ["/newton/prep", "PrepData"],
|
||||
["/newton/raw", "RawData"],
|
||||
["/newton/zzz/rawnotch", "RawNotchedData"]
|
||||
])
|
||||
# Match just one type or one path
|
||||
eq_(client.stream_list(layout="RawData"), [ ["/newton/raw", "RawData"] ])
|
||||
eq_(client.stream_list(path="/newton/raw"), [ ["/newton/raw", "RawData"] ])
|
||||
|
||||
# Set / get metadata
|
||||
eq_(client.stream_get_metadata("/newton/prep"), {})
|
||||
eq_(client.stream_get_metadata("/newton/raw"), {})
|
||||
meta1 = { "description": "The Data",
|
||||
"v_scale": "1.234" }
|
||||
meta2 = { "description": "The Data" }
|
||||
meta3 = { "v_scale": "1.234" }
|
||||
client.stream_set_metadata("/newton/prep", meta1)
|
||||
client.stream_update_metadata("/newton/prep", {})
|
||||
client.stream_update_metadata("/newton/raw", meta2)
|
||||
client.stream_update_metadata("/newton/raw", meta3)
|
||||
eq_(client.stream_get_metadata("/newton/prep"), meta1)
|
||||
eq_(client.stream_get_metadata("/newton/raw"), meta1)
|
||||
eq_(client.stream_get_metadata("/newton/raw", [ "description" ] ), meta2)
|
||||
eq_(client.stream_get_metadata("/newton/raw", [ "description",
|
||||
"v_scale" ] ), meta1)
|
||||
|
||||
# missing key
|
||||
eq_(client.stream_get_metadata("/newton/raw", "descr"),
|
||||
{ "descr": None })
|
||||
eq_(client.stream_get_metadata("/newton/raw", [ "descr" ]),
|
||||
{ "descr": None })
|
||||
|
||||
# test wrong types (list instead of dict)
|
||||
with assert_raises(ClientError):
|
||||
client.stream_set_metadata("/newton/prep", [1,2,3])
|
||||
with assert_raises(ClientError):
|
||||
client.stream_update_metadata("/newton/prep", [1,2,3])
|
||||
|
||||
def test_client_3_insert(self):
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
|
||||
datetime_tz.localtz_set("America/New_York")
|
||||
|
||||
testfile = "tests/data/prep-20120323T1000"
|
||||
start = datetime_tz.datetime_tz.smartparse("20120323T1000")
|
||||
rate = 120
|
||||
|
||||
# First try a nonexistent path
|
||||
data = nilmdb.timestamper.TimestamperRate(testfile, start, 120)
|
||||
with assert_raises(ClientError) as e:
|
||||
result = client.stream_insert("/newton/no-such-path", data)
|
||||
in_("404 Not Found", str(e.exception))
|
||||
|
||||
# Now try reversed timestamps
|
||||
data = nilmdb.timestamper.TimestamperRate(testfile, start, 120)
|
||||
data = reversed(list(data))
|
||||
with assert_raises(ClientError) as e:
|
||||
result = client.stream_insert("/newton/prep", data)
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("timestamp is not monotonically increasing", str(e.exception))
|
||||
|
||||
# Now try empty data (no server request made)
|
||||
empty = cStringIO.StringIO("")
|
||||
data = nilmdb.timestamper.TimestamperRate(empty, start, 120)
|
||||
result = client.stream_insert("/newton/prep", data)
|
||||
eq_(result, None)
|
||||
|
||||
# Try forcing a server request with empty data
|
||||
with assert_raises(ClientError) as e:
|
||||
client.http.put("stream/insert", "", { "path": "/newton/prep" })
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("no data provided", str(e.exception))
|
||||
|
||||
# Now do the real load
|
||||
data = nilmdb.timestamper.TimestamperRate(testfile, start, 120)
|
||||
result = client.stream_insert("/newton/prep", data)
|
||||
eq_(result[0], "ok")
|
||||
|
||||
# Try some overlapping data -- just insert it again
|
||||
data = nilmdb.timestamper.TimestamperRate(testfile, start, 120)
|
||||
with assert_raises(ClientError) as e:
|
||||
result = client.stream_insert("/newton/prep", data)
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("OverlapError", str(e.exception))
|
||||
|
||||
def test_client_4_extract(self):
|
||||
# Misc tests for extract. Most of them are in test_cmdline.
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
|
||||
for x in client.stream_extract("/newton/prep", 123, 123):
|
||||
raise Exception("shouldn't be any data for this request")
|
||||
|
||||
def test_client_5_generators(self):
|
||||
# A lot of the client functionality is already tested by test_cmdline,
|
||||
# but this gets a bit more coverage that cmdline misses.
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
|
||||
# Trigger a client error in generator
|
||||
start = datetime_tz.datetime_tz.smartparse("20120323T2000")
|
||||
end = datetime_tz.datetime_tz.smartparse("20120323T1000")
|
||||
for function in [ client.stream_intervals, client.stream_extract ]:
|
||||
with assert_raises(ClientError) as e:
|
||||
function("/newton/prep",
|
||||
start.totimestamp(),
|
||||
end.totimestamp()).next()
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("end before start", str(e.exception))
|
||||
|
||||
# Trigger a curl error in generator
|
||||
with assert_raises(ServerError) as e:
|
||||
client.http.get_gen("http://nosuchurl/").next()
|
||||
|
||||
# Trigger a curl error in generator
|
||||
with assert_raises(ServerError) as e:
|
||||
client.http.get_gen("http://nosuchurl/").next()
|
||||
|
||||
# Check non-json version of string output
|
||||
eq_(json.loads(client.http.get("/stream/list",retjson=False)),
|
||||
client.http.get("/stream/list",retjson=True))
|
||||
|
||||
# Check non-json version of generator output
|
||||
for (a, b) in itertools.izip(
|
||||
client.http.get_gen("/stream/list",retjson=False),
|
||||
client.http.get_gen("/stream/list",retjson=True)):
|
||||
eq_(json.loads(a), b)
|
||||
|
||||
# Check PUT with generator out
|
||||
with assert_raises(ClientError) as e:
|
||||
client.http.put_gen("stream/insert", "",
|
||||
{ "path": "/newton/prep" }).next()
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("no data provided", str(e.exception))
|
||||
|
||||
# Check 404 for missing streams
|
||||
for function in [ client.stream_intervals, client.stream_extract ]:
|
||||
with assert_raises(ClientError) as e:
|
||||
function("/no/such/stream").next()
|
||||
in_("404 Not Found", str(e.exception))
|
||||
in_("No such stream", str(e.exception))
|
||||
|
||||
def test_client_6_chunked(self):
|
||||
# Make sure that /stream/intervals and /stream/extract
|
||||
# properly return streaming, chunked response. Pokes around
|
||||
# in client.http internals a bit to look at the response
|
||||
# headers.
|
||||
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
|
||||
# Use a warning rather than returning a test failure, so that we can
|
||||
# still disable chunked responses for debugging.
|
||||
x = client.http.get("stream/intervals", { "path": "/newton/prep" },
|
||||
retjson=False)
|
||||
eq_(x.count('\n'), 2)
|
||||
if "transfer-encoding: chunked" not in client.http._headers.lower():
|
||||
warnings.warn("Non-chunked HTTP response for /stream/intervals")
|
||||
|
||||
x = client.http.get("stream/extract",
|
||||
{ "path": "/newton/prep",
|
||||
"start": "123",
|
||||
"end": "123" }, retjson=False)
|
||||
if "transfer-encoding: chunked" not in client.http._headers.lower():
|
||||
warnings.warn("Non-chunked HTTP response for /stream/extract")
|
@@ -1,458 +0,0 @@
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
import nilmdb.cmdline
|
||||
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
import itertools
|
||||
import datetime_tz
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import threading
|
||||
import urllib2
|
||||
from urllib2 import urlopen, HTTPError
|
||||
import Queue
|
||||
import cStringIO
|
||||
import shlex
|
||||
|
||||
from test_helpers import *
|
||||
|
||||
testdb = "tests/cmdline-testdb"
|
||||
|
||||
def server_start(max_results = None):
|
||||
global test_server, test_db
|
||||
# Start web app on a custom port
|
||||
test_db = nilmdb.NilmDB(testdb, sync = False, max_results = max_results)
|
||||
test_server = nilmdb.Server(test_db, host = "127.0.0.1",
|
||||
port = 12380, stoppable = False,
|
||||
fast_shutdown = True,
|
||||
force_traceback = False)
|
||||
test_server.start(blocking = False)
|
||||
|
||||
def server_stop():
|
||||
global test_server, test_db
|
||||
# Close web app
|
||||
test_server.stop()
|
||||
test_db.close()
|
||||
|
||||
def setup_module():
|
||||
global test_server, test_db
|
||||
# Clear out DB
|
||||
recursive_unlink(testdb)
|
||||
server_start()
|
||||
|
||||
def teardown_module():
|
||||
server_stop()
|
||||
|
||||
class TestCmdline(object):
|
||||
|
||||
def run(self, arg_string, infile=None, outfile=None):
|
||||
"""Run a cmdline client with the specified argument string,
|
||||
passing the given input. Returns a tuple with the output and
|
||||
exit code"""
|
||||
class stdio_wrapper:
|
||||
def __init__(self, stdin, stdout, stderr):
|
||||
self.io = (stdin, stdout, stderr)
|
||||
def __enter__(self):
|
||||
self.saved = ( sys.stdin, sys.stdout, sys.stderr )
|
||||
( sys.stdin, sys.stdout, sys.stderr ) = self.io
|
||||
def __exit__(self, type, value, traceback):
|
||||
( sys.stdin, sys.stdout, sys.stderr ) = self.saved
|
||||
# Empty input if none provided
|
||||
if infile is None:
|
||||
infile = cStringIO.StringIO("")
|
||||
# Capture stderr
|
||||
errfile = cStringIO.StringIO()
|
||||
if outfile is None:
|
||||
# If no output file, capture stdout with stderr
|
||||
outfile = errfile
|
||||
with stdio_wrapper(infile, outfile, errfile) as s:
|
||||
try:
|
||||
nilmdb.cmdline.Cmdline(shlex.split(arg_string)).run()
|
||||
sys.exit(0)
|
||||
except SystemExit as e:
|
||||
exitcode = e.code
|
||||
captured = outfile.getvalue()
|
||||
self.captured = captured
|
||||
self.exitcode = exitcode
|
||||
|
||||
def ok(self, arg_string, infile = None):
|
||||
self.run(arg_string, infile)
|
||||
if self.exitcode != 0:
|
||||
self.dump()
|
||||
eq_(self.exitcode, 0)
|
||||
|
||||
def fail(self, arg_string, infile = None, exitcode = None):
|
||||
self.run(arg_string, infile)
|
||||
if exitcode is not None and self.exitcode != exitcode:
|
||||
self.dump()
|
||||
eq_(self.exitcode, exitcode)
|
||||
if self.exitcode == 0:
|
||||
self.dump()
|
||||
ne_(self.exitcode, 0)
|
||||
|
||||
def contain(self, checkstring):
|
||||
in_(checkstring, self.captured)
|
||||
|
||||
def match(self, checkstring):
|
||||
eq_(checkstring, self.captured)
|
||||
|
||||
def matchfile(self, file):
|
||||
# Captured data should match file contents exactly
|
||||
with open(file) as f:
|
||||
contents = f.read()
|
||||
if contents != self.captured:
|
||||
#print contents[1:1000] + "\n"
|
||||
#print self.captured[1:1000] + "\n"
|
||||
raise AssertionError("captured data doesn't match " + file)
|
||||
|
||||
def matchfilecount(self, file):
|
||||
# Last line of captured data should match the number of
|
||||
# non-commented lines in file
|
||||
count = 0
|
||||
with open(file) as f:
|
||||
for line in f:
|
||||
if line[0] != '#':
|
||||
count += 1
|
||||
eq_(self.captured.splitlines()[-1], sprintf("%d", count))
|
||||
|
||||
def dump(self):
|
||||
printf("-----dump start-----\n%s-----dump end-----\n", self.captured)
|
||||
|
||||
def test_cmdline_01_basic(self):
|
||||
|
||||
# help
|
||||
self.ok("--help")
|
||||
self.contain("usage:")
|
||||
|
||||
# fail for no args
|
||||
self.fail("")
|
||||
|
||||
# fail for no such option
|
||||
self.fail("--nosuchoption")
|
||||
|
||||
# fail for bad command
|
||||
self.fail("badcommand")
|
||||
|
||||
# try some URL constructions
|
||||
self.fail("--url http://nosuchurl/ info")
|
||||
self.contain("Couldn't resolve host 'nosuchurl'")
|
||||
|
||||
self.fail("--url nosuchurl info")
|
||||
self.contain("Couldn't resolve host 'nosuchurl'")
|
||||
|
||||
self.fail("-u nosuchurl/foo info")
|
||||
self.contain("Couldn't resolve host 'nosuchurl'")
|
||||
|
||||
self.fail("-u localhost:0 info")
|
||||
self.contain("couldn't connect to host")
|
||||
|
||||
self.ok("-u localhost:12380 info")
|
||||
self.ok("info")
|
||||
|
||||
# Duplicated arguments should fail, but this isn't implemented
|
||||
# due to it being kind of a pain with argparse.
|
||||
if 0:
|
||||
self.fail("-u url1 -u url2 info")
|
||||
self.contain("duplicated argument")
|
||||
|
||||
self.fail("list --detail --detail")
|
||||
self.contain("duplicated argument")
|
||||
|
||||
self.fail("list --detail --path path1 --path path2")
|
||||
self.contain("duplicated argument")
|
||||
|
||||
self.fail("extract --start 2000-01-01 --start 2001-01-02")
|
||||
self.contain("duplicated argument")
|
||||
|
||||
def test_cmdline_02_info(self):
|
||||
self.ok("info")
|
||||
self.contain("Server URL: http://localhost:12380/")
|
||||
self.contain("Server version: " + test_server.version)
|
||||
self.contain("Server database path")
|
||||
self.contain("Server database size")
|
||||
|
||||
def test_cmdline_03_createlist(self):
|
||||
# Basic stream tests, like those in test_client.
|
||||
|
||||
# No streams
|
||||
self.ok("list")
|
||||
self.match("")
|
||||
|
||||
# Bad paths
|
||||
self.fail("create foo/bar/baz PrepData")
|
||||
self.contain("paths must start with /")
|
||||
|
||||
self.fail("create /foo PrepData")
|
||||
self.contain("invalid path")
|
||||
|
||||
# Bad layout type
|
||||
self.fail("create /newton/prep NoSuchLayout")
|
||||
self.contain("no such layout")
|
||||
|
||||
# Create a few streams
|
||||
self.ok("create /newton/prep PrepData")
|
||||
self.ok("create /newton/raw RawData")
|
||||
self.ok("create /newton/zzz/rawnotch RawNotchedData")
|
||||
|
||||
# Verify we got those 3 streams
|
||||
self.ok("list")
|
||||
self.match("/newton/prep PrepData\n"
|
||||
"/newton/raw RawData\n"
|
||||
"/newton/zzz/rawnotch RawNotchedData\n")
|
||||
|
||||
# Match just one type or one path
|
||||
self.ok("list --path /newton/raw")
|
||||
self.match("/newton/raw RawData\n")
|
||||
|
||||
self.ok("list --layout RawData")
|
||||
self.match("/newton/raw RawData\n")
|
||||
|
||||
# Wildcard matches
|
||||
self.ok("list --layout Raw*")
|
||||
self.match("/newton/raw RawData\n"
|
||||
"/newton/zzz/rawnotch RawNotchedData\n")
|
||||
|
||||
self.ok("list --path *zzz* --layout Raw*")
|
||||
self.match("/newton/zzz/rawnotch RawNotchedData\n")
|
||||
|
||||
self.ok("list --path *zzz* --layout Prep*")
|
||||
self.match("")
|
||||
|
||||
def test_cmdline_04_metadata(self):
|
||||
# Set / get metadata
|
||||
self.fail("metadata")
|
||||
self.fail("metadata --get")
|
||||
|
||||
self.ok("metadata /newton/prep")
|
||||
self.match("")
|
||||
|
||||
self.ok("metadata /newton/raw --get")
|
||||
self.match("")
|
||||
|
||||
self.ok("metadata /newton/prep --set "
|
||||
"'description=The Data' "
|
||||
"v_scale=1.234")
|
||||
self.ok("metadata /newton/raw --update "
|
||||
"'description=The Data'")
|
||||
self.ok("metadata /newton/raw --update "
|
||||
"v_scale=1.234")
|
||||
|
||||
# various parsing tests
|
||||
self.ok("metadata /newton/raw --update foo=")
|
||||
self.fail("metadata /newton/raw --update =bar")
|
||||
self.fail("metadata /newton/raw --update foo==bar")
|
||||
self.fail("metadata /newton/raw --update foo;bar")
|
||||
|
||||
# errors
|
||||
self.fail("metadata /newton/nosuchstream foo=bar")
|
||||
self.contain("unrecognized arguments")
|
||||
self.fail("metadata /newton/nosuchstream")
|
||||
self.contain("No stream at path")
|
||||
self.fail("metadata /newton/nosuchstream --set foo=bar")
|
||||
self.contain("No stream at path")
|
||||
|
||||
self.ok("metadata /newton/prep")
|
||||
self.match("description=The Data\nv_scale=1.234\n")
|
||||
|
||||
self.ok("metadata /newton/prep --get")
|
||||
self.match("description=The Data\nv_scale=1.234\n")
|
||||
|
||||
self.ok("metadata /newton/prep --get descr")
|
||||
self.match("descr=\n")
|
||||
|
||||
self.ok("metadata /newton/prep --get description")
|
||||
self.match("description=The Data\n")
|
||||
|
||||
self.ok("metadata /newton/prep --get description v_scale")
|
||||
self.match("description=The Data\nv_scale=1.234\n")
|
||||
|
||||
self.ok("metadata /newton/prep --set "
|
||||
"'description=The Data'")
|
||||
|
||||
self.ok("metadata /newton/prep --get")
|
||||
self.match("description=The Data\n")
|
||||
|
||||
self.fail("metadata /newton/nosuchpath")
|
||||
self.contain("No stream at path /newton/nosuchpath")
|
||||
|
||||
def test_cmdline_05_parsetime(self):
|
||||
os.environ['TZ'] = "America/New_York"
|
||||
cmd = nilmdb.cmdline.Cmdline(None)
|
||||
test = datetime_tz.datetime_tz.now()
|
||||
eq_(cmd.parse_time(str(test)), test)
|
||||
test = datetime_tz.datetime_tz.smartparse("20120405 1400-0400")
|
||||
eq_(cmd.parse_time("hi there 20120405 1400-0400 testing! 123"), test)
|
||||
eq_(cmd.parse_time("20120405 1800 UTC"), test)
|
||||
eq_(cmd.parse_time("20120405 1400-0400 UTC"), test)
|
||||
with assert_raises(ValueError):
|
||||
print cmd.parse_time("20120405 1400-9999")
|
||||
with assert_raises(ValueError):
|
||||
print cmd.parse_time("hello")
|
||||
with assert_raises(ValueError):
|
||||
print cmd.parse_time("-")
|
||||
with assert_raises(ValueError):
|
||||
print cmd.parse_time("")
|
||||
with assert_raises(ValueError):
|
||||
print cmd.parse_time("14:00")
|
||||
eq_(cmd.parse_time("snapshot-20120405-140000.raw.gz"), test)
|
||||
eq_(cmd.parse_time("prep-20120405T1400"), test)
|
||||
|
||||
def test_cmdline_06_insert(self):
|
||||
self.ok("insert --help")
|
||||
|
||||
self.fail("insert /foo/bar baz qwer")
|
||||
self.contain("Error getting stream info")
|
||||
|
||||
self.fail("insert /newton/prep baz qwer")
|
||||
self.match("Error opening input file baz\n")
|
||||
|
||||
self.fail("insert /newton/prep")
|
||||
self.contain("Error extracting time")
|
||||
|
||||
self.fail("insert --start 19801205 /newton/prep 1 2 3 4")
|
||||
self.contain("--start can only be used with one input file")
|
||||
|
||||
self.fail("insert /newton/prep "
|
||||
"tests/data/prep-20120323T1000")
|
||||
|
||||
# insert pre-timestamped data, from stdin
|
||||
os.environ['TZ'] = "UTC"
|
||||
with open("tests/data/prep-20120323T1004-timestamped") as input:
|
||||
self.ok("insert --none /newton/prep", input)
|
||||
|
||||
# insert data with normal timestamper from filename
|
||||
os.environ['TZ'] = "UTC"
|
||||
self.ok("insert --rate 120 /newton/prep "
|
||||
"tests/data/prep-20120323T1000 "
|
||||
"tests/data/prep-20120323T1002")
|
||||
|
||||
# overlap
|
||||
os.environ['TZ'] = "UTC"
|
||||
self.fail("insert --rate 120 /newton/prep "
|
||||
"tests/data/prep-20120323T1004")
|
||||
self.contain("overlap")
|
||||
|
||||
# Just to help test more situations -- stop and restart
|
||||
# the server now. This tests nilmdb's interval caching,
|
||||
# at the very least.
|
||||
server_stop()
|
||||
server_start()
|
||||
|
||||
# still an overlap if we specify a different start
|
||||
os.environ['TZ'] = "America/New_York"
|
||||
self.fail("insert --rate 120 --start '03/23/2012 06:05:00' /newton/prep"
|
||||
" tests/data/prep-20120323T1004")
|
||||
self.contain("overlap")
|
||||
|
||||
# wrong format
|
||||
os.environ['TZ'] = "UTC"
|
||||
self.fail("insert --rate 120 /newton/raw "
|
||||
"tests/data/prep-20120323T1004")
|
||||
self.contain("Error parsing input data")
|
||||
|
||||
# empty data does nothing
|
||||
self.ok("insert --rate 120 --start '03/23/2012 06:05:00' /newton/prep "
|
||||
"/dev/null")
|
||||
|
||||
# bad start time
|
||||
self.fail("insert --rate 120 --start 'whatever' /newton/prep /dev/null")
|
||||
|
||||
def test_cmdline_07_detail(self):
|
||||
# Just count the number of lines, it's probably fine
|
||||
self.ok("list --detail")
|
||||
eq_(self.captured.count('\n'), 11)
|
||||
|
||||
self.ok("list --detail --path *prep")
|
||||
eq_(self.captured.count('\n'), 7)
|
||||
|
||||
self.ok("list --detail --path *prep --start='23 Mar 2012 10:02'")
|
||||
eq_(self.captured.count('\n'), 5)
|
||||
|
||||
self.ok("list --detail --path *prep --start='23 Mar 2012 10:05'")
|
||||
eq_(self.captured.count('\n'), 3)
|
||||
|
||||
self.ok("list --detail --path *prep --start='23 Mar 2012 10:05:15'")
|
||||
eq_(self.captured.count('\n'), 2)
|
||||
self.contain("10:05:15.000")
|
||||
|
||||
self.ok("list --detail --path *prep --start='23 Mar 2012 10:05:15.50'")
|
||||
eq_(self.captured.count('\n'), 2)
|
||||
self.contain("10:05:15.500")
|
||||
|
||||
self.ok("list --detail --path *prep --start='23 Mar 2012 19:05:15.50'")
|
||||
eq_(self.captured.count('\n'), 2)
|
||||
self.contain("no intervals")
|
||||
|
||||
self.ok("list --detail --path *prep --start='23 Mar 2012 10:05:15.50'"
|
||||
+ " --end='23 Mar 2012 10:05:15.50'")
|
||||
eq_(self.captured.count('\n'), 2)
|
||||
self.contain("10:05:15.500")
|
||||
|
||||
self.ok("list --detail")
|
||||
eq_(self.captured.count('\n'), 11)
|
||||
|
||||
def test_cmdline_08_extract(self):
|
||||
# nonexistent stream
|
||||
self.fail("extract /no/such/foo --start 2000-01-01 --end 2020-01-01")
|
||||
self.contain("Error getting stream info")
|
||||
|
||||
# empty ranges return an error
|
||||
self.fail("extract -a /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30' " +
|
||||
"--end '23 Mar 2012 10:00:30'", exitcode = 2)
|
||||
self.contain("no data")
|
||||
self.fail("extract -a /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30.000001' " +
|
||||
"--end '23 Mar 2012 10:00:30.000001'", exitcode = 2)
|
||||
self.contain("no data")
|
||||
self.fail("extract -a /newton/prep " +
|
||||
"--start '23 Mar 2022 10:00:30' " +
|
||||
"--end '23 Mar 2022 10:00:30'", exitcode = 2)
|
||||
self.contain("no data")
|
||||
|
||||
# but are ok if we're just counting results
|
||||
self.ok("extract --count /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30' " +
|
||||
"--end '23 Mar 2012 10:00:30'")
|
||||
self.match("0\n")
|
||||
self.ok("extract -c /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30.000001' " +
|
||||
"--end '23 Mar 2012 10:00:30.000001'")
|
||||
self.match("0\n")
|
||||
|
||||
# Check various dumps against stored copies of how they should appear
|
||||
def test(file, start, end, extra=""):
|
||||
self.ok("extract " + extra + " /newton/prep " +
|
||||
"--start '23 Mar 2012 " + start + "' " +
|
||||
"--end '23 Mar 2012 " + end + "'")
|
||||
self.matchfile("tests/data/extract-" + str(file))
|
||||
self.ok("extract --count " + extra + " /newton/prep " +
|
||||
"--start '23 Mar 2012 " + start + "' " +
|
||||
"--end '23 Mar 2012 " + end + "'")
|
||||
self.matchfilecount("tests/data/extract-" + str(file))
|
||||
test(1, "10:00:30", "10:00:31", extra="-a")
|
||||
test(1, "10:00:30.000000", "10:00:31", extra="-a")
|
||||
test(2, "10:00:30.000001", "10:00:31")
|
||||
test(2, "10:00:30.008333", "10:00:31")
|
||||
test(3, "10:00:30.008333", "10:00:30.008334")
|
||||
test(3, "10:00:30.008333", "10:00:30.016667")
|
||||
test(4, "10:00:30.008333", "10:00:30.025")
|
||||
test(5, "10:00:30", "10:00:31", extra="--annotate --bare")
|
||||
test(6, "10:00:30", "10:00:31", extra="-b")
|
||||
|
||||
# all data put in by tests
|
||||
self.ok("extract -a /newton/prep --start 2000-01-01 --end 2020-01-01")
|
||||
eq_(self.captured.count('\n'), 43204)
|
||||
self.ok("extract -c /newton/prep --start 2000-01-01 --end 2020-01-01")
|
||||
self.match("43200\n")
|
||||
|
||||
def test_cmdline_09_truncated(self):
|
||||
# Test truncated responses by overriding the nilmdb max_results
|
||||
server_stop()
|
||||
server_start(max_results = 2)
|
||||
self.ok("list --detail")
|
||||
eq_(self.captured.count('\n'), 11)
|
||||
server_stop()
|
||||
server_start()
|
@@ -1,32 +0,0 @@
|
||||
# Just some helpers for test functions
|
||||
|
||||
import shutil, os
|
||||
|
||||
def myrepr(x):
|
||||
if isinstance(x, basestring):
|
||||
return '"' + x + '"'
|
||||
else:
|
||||
return repr(x)
|
||||
|
||||
def eq_(a, b):
|
||||
if not a == b:
|
||||
raise AssertionError("%s != %s" % (myrepr(a), myrepr(b)))
|
||||
|
||||
def in_(a, b):
|
||||
if a not in b:
|
||||
raise AssertionError("%s not in %s" % (myrepr(a), myrepr(b)))
|
||||
|
||||
def ne_(a, b):
|
||||
if not a != b:
|
||||
raise AssertionError("unexpected %s == %s" % (myrepr(a), myrepr(b)))
|
||||
|
||||
def recursive_unlink(path):
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError:
|
||||
pass
|
||||
|
@@ -1,319 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
import datetime_tz
|
||||
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
import itertools
|
||||
|
||||
from nilmdb.interval import Interval, DBInterval, IntervalSet, IntervalError
|
||||
|
||||
from test_helpers import *
|
||||
import unittest
|
||||
|
||||
def makeset(string):
|
||||
"""Build an IntervalSet from a string, for testing purposes
|
||||
|
||||
Each character is 1 second
|
||||
[ = interval start
|
||||
| = interval end + adjacent start
|
||||
] = interval end
|
||||
. = zero-width interval (identical start and end)
|
||||
anything else is ignored
|
||||
"""
|
||||
iset = IntervalSet()
|
||||
for i, c in enumerate(string):
|
||||
day = i + 10000
|
||||
if (c == "["):
|
||||
start = day
|
||||
elif (c == "|"):
|
||||
iset += Interval(start, day)
|
||||
start = day
|
||||
elif (c == "]"):
|
||||
iset += Interval(start, day)
|
||||
del start
|
||||
elif (c == "."):
|
||||
iset += Interval(day, day)
|
||||
return iset
|
||||
|
||||
class TestInterval:
|
||||
def test_interval(self):
|
||||
# Test Interval class
|
||||
os.environ['TZ'] = "America/New_York"
|
||||
datetime_tz._localtz = None
|
||||
(d1, d2, d3) = [ datetime_tz.datetime_tz.smartparse(x).totimestamp()
|
||||
for x in [ "03/24/2012", "03/25/2012", "03/26/2012" ] ]
|
||||
|
||||
# basic construction
|
||||
i = Interval(d1, d1)
|
||||
i = Interval(d1, d3)
|
||||
eq_(i.start, d1)
|
||||
eq_(i.end, d3)
|
||||
|
||||
# assignment is allowed, but not verified
|
||||
i.start = d2
|
||||
#with assert_raises(IntervalError):
|
||||
# i.end = d1
|
||||
i.start = d1
|
||||
i.end = d2
|
||||
|
||||
# end before start
|
||||
with assert_raises(IntervalError):
|
||||
i = Interval(d3, d1)
|
||||
|
||||
# compare
|
||||
assert(Interval(d1, d2) == Interval(d1, d2))
|
||||
assert(Interval(d1, d2) < Interval(d1, d3))
|
||||
assert(Interval(d1, d3) > Interval(d1, d2))
|
||||
assert(Interval(d1, d2) < Interval(d2, d3))
|
||||
assert(Interval(d1, d3) < Interval(d2, d3))
|
||||
assert(Interval(d2, d2) > Interval(d1, d3))
|
||||
assert(Interval(d3, d3) == Interval(d3, d3))
|
||||
with assert_raises(TypeError): # was AttributeError, that's wrong
|
||||
x = (i == 123)
|
||||
|
||||
# subset
|
||||
assert(Interval(d1, d3).subset(d1, d2) == Interval(d1, d2))
|
||||
with assert_raises(IntervalError):
|
||||
x = Interval(d2, d3).subset(d1, d2)
|
||||
|
||||
# big integers and floats
|
||||
x = Interval(5000111222, 6000111222)
|
||||
eq_(str(x), "[5000111222.0 -> 6000111222.0]")
|
||||
x = Interval(123.45, 234.56)
|
||||
eq_(str(x), "[123.45 -> 234.56]")
|
||||
|
||||
# misc
|
||||
i = Interval(d1, d2)
|
||||
eq_(repr(i), repr(eval(repr(i))))
|
||||
eq_(str(i), "[1332561600.0 -> 1332648000.0]")
|
||||
|
||||
def test_interval_intersect(self):
|
||||
# Test Interval intersections
|
||||
dates = [ 100, 200, 300, 400 ]
|
||||
perm = list(itertools.permutations(dates, 2))
|
||||
prod = list(itertools.product(perm, perm))
|
||||
should_intersect = {
|
||||
False: [4, 5, 8, 20, 48, 56, 60, 96, 97, 100],
|
||||
True: [0, 1, 2, 12, 13, 14, 16, 17, 24, 25, 26, 28, 29,
|
||||
32, 49, 50, 52, 53, 61, 62, 64, 65, 68, 98, 101, 104]
|
||||
}
|
||||
for i,((a,b),(c,d)) in enumerate(prod):
|
||||
try:
|
||||
i1 = Interval(a, b)
|
||||
i2 = Interval(c, d)
|
||||
eq_(i1.intersects(i2), i2.intersects(i1))
|
||||
in_(i, should_intersect[i1.intersects(i2)])
|
||||
except IntervalError:
|
||||
assert(i not in should_intersect[True] and
|
||||
i not in should_intersect[False])
|
||||
with assert_raises(AttributeError):
|
||||
x = i1.intersects(1234)
|
||||
|
||||
def test_intervalset_construct(self):
|
||||
# Test IntervalSet construction
|
||||
dates = [ 100, 200, 300, 400 ]
|
||||
|
||||
a = Interval(dates[0], dates[1])
|
||||
b = Interval(dates[1], dates[2])
|
||||
c = Interval(dates[0], dates[2])
|
||||
d = Interval(dates[2], dates[3])
|
||||
|
||||
iseta = IntervalSet(a)
|
||||
isetb = IntervalSet([a, b])
|
||||
isetc = IntervalSet([a])
|
||||
ne_(iseta, isetb)
|
||||
eq_(iseta, isetc)
|
||||
with assert_raises(TypeError):
|
||||
x = iseta != 3
|
||||
ne_(IntervalSet(a), IntervalSet(b))
|
||||
|
||||
# test iterator
|
||||
for interval in iseta:
|
||||
pass
|
||||
|
||||
# overlap
|
||||
with assert_raises(IntervalError):
|
||||
x = IntervalSet([a, b, c])
|
||||
|
||||
# bad types
|
||||
with assert_raises(Exception):
|
||||
x = IntervalSet([1, 2])
|
||||
|
||||
iset = IntervalSet(isetb) # test iterator
|
||||
eq_(iset, isetb)
|
||||
eq_(len(iset), 2)
|
||||
eq_(len(IntervalSet()), 0)
|
||||
|
||||
# Test adding
|
||||
iset = IntervalSet(a)
|
||||
iset += IntervalSet(b)
|
||||
eq_(iset, IntervalSet([a, b]))
|
||||
iset = IntervalSet(a)
|
||||
iset += b
|
||||
eq_(iset, IntervalSet([a, b]))
|
||||
iset = IntervalSet(a) + IntervalSet(b)
|
||||
eq_(iset, IntervalSet([a, b]))
|
||||
iset = IntervalSet(b) + a
|
||||
eq_(iset, IntervalSet([a, b]))
|
||||
|
||||
# A set consisting of [0-1],[1-2] should match a set consisting of [0-2]
|
||||
eq_(IntervalSet([a,b]), IntervalSet([c]))
|
||||
# Etc
|
||||
ne_(IntervalSet([a,d]), IntervalSet([c]))
|
||||
ne_(IntervalSet([c]), IntervalSet([a,d]))
|
||||
ne_(IntervalSet([c,d]), IntervalSet([b,d]))
|
||||
|
||||
# misc
|
||||
eq_(repr(iset), repr(eval(repr(iset))))
|
||||
eq_(str(iset), "[[100.0 -> 200.0], [200.0 -> 300.0]]")
|
||||
|
||||
def test_intervalset_geniset(self):
|
||||
# Test basic iset construction
|
||||
assert(makeset(" [----] ") ==
|
||||
makeset(" [-|--] "))
|
||||
|
||||
assert(makeset("[] [--] ") +
|
||||
makeset(" [] [--]") ==
|
||||
makeset("[|] [-----]"))
|
||||
|
||||
assert(makeset(" [-------]") ==
|
||||
makeset(" [-|-----|"))
|
||||
|
||||
|
||||
def test_intervalset_intersect(self):
|
||||
# Test intersection (&)
|
||||
with assert_raises(TypeError): # was AttributeError
|
||||
x = makeset("[--]") & 1234
|
||||
|
||||
assert(makeset("[---------]") &
|
||||
makeset(" [---] ") ==
|
||||
makeset(" [---] "))
|
||||
|
||||
assert(makeset(" [---] ") &
|
||||
makeset("[---------]") ==
|
||||
makeset(" [---] "))
|
||||
|
||||
assert(makeset(" [-----]") &
|
||||
makeset(" [-----] ") ==
|
||||
makeset(" [--] "))
|
||||
|
||||
assert(makeset(" [--] [--]") &
|
||||
makeset(" [------] ") ==
|
||||
makeset(" [-] [-] "))
|
||||
|
||||
assert(makeset(" [---]") &
|
||||
makeset(" [--] ") ==
|
||||
makeset(" "))
|
||||
|
||||
assert(makeset(" [---]") &
|
||||
makeset(" [----] ") ==
|
||||
makeset(" . "))
|
||||
|
||||
assert(makeset(" [-|---]") &
|
||||
makeset(" [-----|-] ") ==
|
||||
makeset(" [----] "))
|
||||
|
||||
assert(makeset(" [-|-] ") &
|
||||
makeset(" [-|--|--] ") ==
|
||||
makeset(" [---] "))
|
||||
|
||||
assert(makeset(" [----][--]") &
|
||||
makeset("[-] [--] []") ==
|
||||
makeset(" [] [-]. []"))
|
||||
|
||||
class TestIntervalDB:
|
||||
def test_dbinterval(self):
|
||||
# Test DBInterval class
|
||||
i = DBInterval(100, 200, 100, 200, 10000, 20000)
|
||||
eq_(i.start, 100)
|
||||
eq_(i.end, 200)
|
||||
eq_(i.db_start, 100)
|
||||
eq_(i.db_end, 200)
|
||||
eq_(i.db_startpos, 10000)
|
||||
eq_(i.db_endpos, 20000)
|
||||
eq_(repr(i), repr(eval(repr(i))))
|
||||
|
||||
# end before start
|
||||
with assert_raises(IntervalError):
|
||||
i = DBInterval(200, 100, 100, 200, 10000, 20000)
|
||||
|
||||
# db_start too late
|
||||
with assert_raises(IntervalError):
|
||||
i = DBInterval(100, 200, 150, 200, 10000, 20000)
|
||||
|
||||
# db_end too soon
|
||||
with assert_raises(IntervalError):
|
||||
i = DBInterval(100, 200, 100, 150, 10000, 20000)
|
||||
|
||||
# actual start, end can be a subset
|
||||
a = DBInterval(150, 200, 100, 200, 10000, 20000)
|
||||
b = DBInterval(100, 150, 100, 200, 10000, 20000)
|
||||
c = DBInterval(150, 150, 100, 200, 10000, 20000)
|
||||
|
||||
# Make a set of DBIntervals
|
||||
iseta = IntervalSet([a, b])
|
||||
isetc = IntervalSet(c)
|
||||
assert(iseta.intersects(a))
|
||||
assert(iseta.intersects(b))
|
||||
|
||||
# Test subset
|
||||
with assert_raises(IntervalError):
|
||||
x = a.subset(150, 250)
|
||||
|
||||
# Subset of those IntervalSets should still contain DBIntervals
|
||||
for i in IntervalSet(iseta.intersection(Interval(125,250))):
|
||||
assert(isinstance(i, DBInterval))
|
||||
|
||||
class TestIntervalTree:
|
||||
|
||||
def test_interval_tree(self):
|
||||
import random
|
||||
random.seed(1234)
|
||||
|
||||
# make a set of 500 intervals
|
||||
iset = IntervalSet()
|
||||
j = 500
|
||||
for i in random.sample(xrange(j),j):
|
||||
interval = Interval(i, i+1)
|
||||
iset += interval
|
||||
|
||||
# remove about half of them
|
||||
for i in random.sample(xrange(j),j):
|
||||
if random.randint(0,1):
|
||||
iset -= Interval(i, i+1)
|
||||
|
||||
# try removing an interval that doesn't exist
|
||||
with assert_raises(IntervalError):
|
||||
iset -= Interval(1234,5678)
|
||||
|
||||
# show the graph
|
||||
if False:
|
||||
iset.tree.render_dot_live()
|
||||
|
||||
class TestIntervalSpeed:
|
||||
@unittest.skip("this is slow")
|
||||
def test_interval_speed(self):
|
||||
import yappi
|
||||
import time
|
||||
import aplotter
|
||||
import random
|
||||
|
||||
print
|
||||
yappi.start()
|
||||
speeds = {}
|
||||
for j in [ 2**x for x in range(5,18) ]:
|
||||
start = time.time()
|
||||
iset = IntervalSet()
|
||||
for i in random.sample(xrange(j),j):
|
||||
interval = Interval(i, i+1)
|
||||
iset += interval
|
||||
speed = (time.time() - start) * 1000000.0
|
||||
printf("%d: %g μs (%g μs each)\n", j, speed, speed/j)
|
||||
speeds[j] = speed
|
||||
aplotter.plot(speeds.keys(), speeds.values(), plot_slope=True)
|
||||
yappi.stop()
|
||||
yappi.print_stats(sort_type=yappi.SORTTYPE_TTOT, limit=10)
|
||||
|
@@ -1,54 +0,0 @@
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
|
||||
import nose
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
import threading
|
||||
import time
|
||||
|
||||
from test_helpers import *
|
||||
|
||||
import nilmdb.iteratorizer
|
||||
|
||||
def func_with_callback(a, b, callback):
|
||||
callback(a)
|
||||
callback(b)
|
||||
callback(a+b)
|
||||
|
||||
class TestIteratorizer(object):
|
||||
def test(self):
|
||||
|
||||
# First try it with a normal callback
|
||||
self.result = ""
|
||||
def cb(x):
|
||||
self.result += str(x)
|
||||
func_with_callback(1, 2, cb)
|
||||
eq_(self.result, "123")
|
||||
|
||||
# Now make it an iterator
|
||||
it = nilmdb.iteratorizer.Iteratorizer(lambda x:
|
||||
func_with_callback(1, 2, x))
|
||||
result = ""
|
||||
for i in it:
|
||||
result += str(i)
|
||||
eq_(result, "123")
|
||||
|
||||
# Make sure things work when an exception occurs
|
||||
it = nilmdb.iteratorizer.Iteratorizer(lambda x:
|
||||
func_with_callback(1, "a", x))
|
||||
result = ""
|
||||
with assert_raises(TypeError) as e:
|
||||
for i in it:
|
||||
result += str(i)
|
||||
eq_(result, "1a")
|
||||
|
||||
# Now try to trigger the case where we stop iterating
|
||||
# mid-generator, and expect the iteratorizer to clean up after
|
||||
# itself. This doesn't have a particular result in the test,
|
||||
# but gains coverage.
|
||||
def foo():
|
||||
it = nilmdb.iteratorizer.Iteratorizer(lambda x:
|
||||
func_with_callback(1, 2, x))
|
||||
it.next()
|
||||
foo()
|
@@ -1,250 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import nilmdb
|
||||
|
||||
from nilmdb.printf import *
|
||||
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
import distutils.version
|
||||
import itertools
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import cherrypy
|
||||
import threading
|
||||
import urllib2
|
||||
from urllib2 import urlopen, HTTPError
|
||||
import Queue
|
||||
import cStringIO
|
||||
import random
|
||||
import unittest
|
||||
|
||||
from test_helpers import *
|
||||
|
||||
from nilmdb.layout import *
|
||||
|
||||
class TestLayouts(object):
|
||||
# Some nilmdb.layout tests. Not complete, just fills in missing
|
||||
# coverage.
|
||||
def test_layouts(self):
|
||||
x = nilmdb.layout.get_named("PrepData").description()
|
||||
y = nilmdb.layout.get_named("float32_8").description()
|
||||
eq_(repr(x), repr(y))
|
||||
|
||||
def test_parsing(self):
|
||||
self.real_t_parsing("PrepData", "RawData", "RawNotchedData")
|
||||
self.real_t_parsing("float32_8", "uint16_6", "uint16_9")
|
||||
def real_t_parsing(self, name_prep, name_raw, name_rawnotch):
|
||||
# invalid layouts
|
||||
with assert_raises(TypeError) as e:
|
||||
parser = Parser("NoSuchLayout")
|
||||
with assert_raises(TypeError) as e:
|
||||
parser = Parser("float32")
|
||||
|
||||
# too little data
|
||||
parser = Parser(name_prep)
|
||||
data = ( "1234567890.000000 1.1 2.2 3.3 4.4 5.5\n" +
|
||||
"1234567890.100000 1.1 2.2 3.3 4.4 5.5\n")
|
||||
with assert_raises(ParserError) as e:
|
||||
parser.parse(data)
|
||||
in_("error", str(e.exception))
|
||||
|
||||
# too much data
|
||||
parser = Parser(name_prep)
|
||||
data = ( "1234567890.000000 1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8 9.9\n" +
|
||||
"1234567890.100000 1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8 9.9\n")
|
||||
with assert_raises(ParserError) as e:
|
||||
parser.parse(data)
|
||||
in_("error", str(e.exception))
|
||||
|
||||
# just right
|
||||
parser = Parser(name_prep)
|
||||
data = ( "1234567890.000000 1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8\n" +
|
||||
"1234567890.100000 1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8\n")
|
||||
parser.parse(data)
|
||||
eq_(parser.min_timestamp, 1234567890.0)
|
||||
eq_(parser.max_timestamp, 1234567890.1)
|
||||
eq_(parser.data, [[1234567890.0,1.1,2.2,3.3,4.4,5.5,6.6,7.7,8.8],
|
||||
[1234567890.1,1.1,2.2,3.3,4.4,5.5,6.6,7.7,8.8]])
|
||||
|
||||
# try RawData too, with clamping
|
||||
parser = Parser(name_raw)
|
||||
data = ( "1234567890.000000 1 2 3 4 5 6\n" +
|
||||
"1234567890.100000 1 2 3 4 5 6\n" )
|
||||
parser.parse(data)
|
||||
eq_(parser.data, [[1234567890.0,1,2,3,4,5,6],
|
||||
[1234567890.1,1,2,3,4,5,6]])
|
||||
|
||||
# pass an instantiated class
|
||||
parser = Parser(get_named(name_rawnotch))
|
||||
data = ( "1234567890.000000 1 2 3 4 5 6 7 8 9\n" +
|
||||
"1234567890.100000 1 2 3 4 5 6 7 8 9\n" )
|
||||
parser.parse(data)
|
||||
|
||||
# non-monotonic
|
||||
parser = Parser(name_raw)
|
||||
data = ( "1234567890.100000 1 2 3 4 5 6\n" +
|
||||
"1234567890.000000 1 2 3 4 5 6\n" )
|
||||
with assert_raises(ParserError) as e:
|
||||
parser.parse(data)
|
||||
in_("not monotonically increasing", str(e.exception))
|
||||
|
||||
# RawData with values out of bounds
|
||||
parser = Parser(name_raw)
|
||||
data = ( "1234567890.000000 1 2 3 4 500000 6\n" +
|
||||
"1234567890.100000 1 2 3 4 5 6\n" )
|
||||
with assert_raises(ParserError) as e:
|
||||
parser.parse(data)
|
||||
in_("value out of range", str(e.exception))
|
||||
|
||||
# Empty data should work but is useless
|
||||
parser = Parser(name_raw)
|
||||
data = ""
|
||||
parser.parse(data)
|
||||
assert(parser.min_timestamp is None)
|
||||
assert(parser.max_timestamp is None)
|
||||
|
||||
def test_formatting(self):
|
||||
self.real_t_formatting("PrepData", "RawData", "RawNotchedData")
|
||||
self.real_t_formatting("float32_8", "uint16_6", "uint16_9")
|
||||
def real_t_formatting(self, name_prep, name_raw, name_rawnotch):
|
||||
# invalid layout
|
||||
with assert_raises(TypeError) as e:
|
||||
formatter = Formatter("NoSuchLayout")
|
||||
|
||||
# too little data
|
||||
formatter = Formatter(name_prep)
|
||||
data = [ [ 1234567890.000000, 1.1, 2.2, 3.3, 4.4, 5.5 ],
|
||||
[ 1234567890.100000, 1.1, 2.2, 3.3, 4.4, 5.5 ] ]
|
||||
with assert_raises(FormatterError) as e:
|
||||
formatter.format(data)
|
||||
in_("error", str(e.exception))
|
||||
|
||||
# too much data
|
||||
formatter = Formatter(name_prep)
|
||||
data = [ [ 1234567890.000000, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
|
||||
[ 1234567890.100000, 1, 2, 3, 4, 5, 6, 7, 8, 9 ] ]
|
||||
with assert_raises(FormatterError) as e:
|
||||
formatter.format(data)
|
||||
in_("error", str(e.exception))
|
||||
|
||||
# just right
|
||||
formatter = Formatter(name_prep)
|
||||
data = [ [ 1234567890.000000, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8 ],
|
||||
[ 1234567890.100000, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8 ] ]
|
||||
text = formatter.format(data)
|
||||
eq_(text,
|
||||
"1234567890.000000 1.100000 2.200000 3.300000 4.400000 " +
|
||||
"5.500000 6.600000 7.700000 8.800000\n" +
|
||||
"1234567890.100000 1.100000 2.200000 3.300000 4.400000 " +
|
||||
"5.500000 6.600000 7.700000 8.800000\n")
|
||||
|
||||
# try RawData too
|
||||
formatter = Formatter(name_raw)
|
||||
data = [ [ 1234567890.000000, 1, 2, 3, 4, 5, 6 ],
|
||||
[ 1234567890.100000, 1, 2, 3, 4, 5, 6 ] ]
|
||||
text = formatter.format(data)
|
||||
eq_(text,
|
||||
"1234567890.000000 1 2 3 4 5 6\n" +
|
||||
"1234567890.100000 1 2 3 4 5 6\n")
|
||||
|
||||
# pass an instantiated class
|
||||
formatter = Formatter(get_named(name_rawnotch))
|
||||
data = [ [ 1234567890.000000, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
|
||||
[ 1234567890.100000, 1, 2, 3, 4, 5, 6, 7, 8, 9 ] ]
|
||||
text = formatter.format(data)
|
||||
eq_(text,
|
||||
"1234567890.000000 1 2 3 4 5 6 7 8 9\n" +
|
||||
"1234567890.100000 1 2 3 4 5 6 7 8 9\n")
|
||||
|
||||
# Empty data should work but is useless
|
||||
formatter = Formatter(name_raw)
|
||||
data = []
|
||||
text = formatter.format(data)
|
||||
eq_(text, "")
|
||||
|
||||
def test_roundtrip(self):
|
||||
self.real_t_roundtrip("PrepData", "RawData", "RawNotchedData")
|
||||
self.real_t_roundtrip("float32_8", "uint16_6", "uint16_9")
|
||||
def real_t_roundtrip(self, name_prep, name_raw, name_rawnotch):
|
||||
# Verify that textual data passed into the Parser, and then
|
||||
# back through the Formatter, then back into the Parser,
|
||||
# gives identical parsed representations
|
||||
random.seed(12345)
|
||||
|
||||
def do_roundtrip(layout, datagen):
|
||||
for i in range(100):
|
||||
rows = random.randint(1,100)
|
||||
data = ""
|
||||
ts = 1234567890
|
||||
for r in range(rows):
|
||||
ts += random.uniform(0,1)
|
||||
row = sprintf("%f", ts) + " "
|
||||
row += " ".join(datagen())
|
||||
row += "\n"
|
||||
data += row
|
||||
parser1 = Parser(layout)
|
||||
formatter = Formatter(layout)
|
||||
parser2 = Parser(layout)
|
||||
parser1.parse(data)
|
||||
parser2.parse(formatter.format(parser1.data))
|
||||
eq_(parser1.data, parser2.data)
|
||||
|
||||
def datagen():
|
||||
return [ sprintf("%f", random.uniform(-1000,1000))
|
||||
for x in range(8) ]
|
||||
do_roundtrip(name_prep, datagen)
|
||||
|
||||
def datagen():
|
||||
return [ sprintf("%d", random.randint(0,65535))
|
||||
for x in range(6) ]
|
||||
do_roundtrip(name_raw, datagen)
|
||||
|
||||
def datagen():
|
||||
return [ sprintf("%d", random.randint(0,65535))
|
||||
for x in range(9) ]
|
||||
do_roundtrip(name_rawnotch, datagen)
|
||||
|
||||
class TestLayoutSpeed:
|
||||
@unittest.skip("this is slow")
|
||||
def test_layout_speed(self):
|
||||
import time
|
||||
|
||||
random.seed(54321)
|
||||
|
||||
def do_speedtest(layout, datagen, rows = 5000, times = 100):
|
||||
# Build data once
|
||||
data = ""
|
||||
ts = 1234567890
|
||||
for r in range(rows):
|
||||
ts += random.uniform(0,1)
|
||||
row = sprintf("%f", ts) + " "
|
||||
row += " ".join(datagen())
|
||||
row += "\n"
|
||||
data += row
|
||||
|
||||
# Do lots of roundtrips
|
||||
start = time.time()
|
||||
for i in range(times):
|
||||
parser = Parser(layout)
|
||||
formatter = Formatter(layout)
|
||||
parser.parse(data)
|
||||
data = formatter.format(parser.data)
|
||||
elapsed = time.time() - start
|
||||
printf("roundtrip %s: %d ms, %.1f μs/row, %d rows/sec\n",
|
||||
layout,
|
||||
elapsed * 1e3,
|
||||
(elapsed * 1e6) / (rows * times),
|
||||
(rows * times) / elapsed)
|
||||
|
||||
print ""
|
||||
def datagen():
|
||||
return [ sprintf("%f", random.uniform(-1000,1000))
|
||||
for x in range(10) ]
|
||||
do_speedtest("float32_10", datagen)
|
||||
|
||||
def datagen():
|
||||
return [ sprintf("%d", random.randint(0,65535))
|
||||
for x in range(10) ]
|
||||
do_speedtest("uint16_10", datagen)
|
@@ -1,201 +0,0 @@
|
||||
import nilmdb
|
||||
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
import distutils.version
|
||||
import simplejson as json
|
||||
import itertools
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import cherrypy
|
||||
import threading
|
||||
import urllib2
|
||||
from urllib2 import urlopen, HTTPError
|
||||
import Queue
|
||||
import cStringIO
|
||||
|
||||
testdb = "tests/testdb"
|
||||
|
||||
#@atexit.register
|
||||
#def cleanup():
|
||||
# os.unlink(testdb)
|
||||
|
||||
from test_helpers import *
|
||||
|
||||
class Test00Nilmdb(object): # named 00 so it runs first
|
||||
def test_NilmDB(self):
|
||||
recursive_unlink(testdb)
|
||||
|
||||
with assert_raises(IOError):
|
||||
nilmdb.NilmDB("/nonexistant-db/foo")
|
||||
|
||||
db = nilmdb.NilmDB(testdb)
|
||||
db.close()
|
||||
db = nilmdb.NilmDB(testdb, sync=False)
|
||||
db.close()
|
||||
|
||||
# test timer, just to get coverage
|
||||
capture = cStringIO.StringIO()
|
||||
old = sys.stdout
|
||||
sys.stdout = capture
|
||||
with nilmdb.Timer("test"):
|
||||
nilmdb.timer.time.sleep(0.01)
|
||||
sys.stdout = old
|
||||
in_("test: ", capture.getvalue())
|
||||
|
||||
def test_stream(self):
|
||||
db = nilmdb.NilmDB(testdb, sync=False)
|
||||
eq_(db.stream_list(), [])
|
||||
|
||||
# Bad path
|
||||
with assert_raises(ValueError):
|
||||
db.stream_create("foo/bar/baz", "PrepData")
|
||||
with assert_raises(ValueError):
|
||||
db.stream_create("/foo", "PrepData")
|
||||
# Bad layout type
|
||||
with assert_raises(ValueError):
|
||||
db.stream_create("/newton/prep", "NoSuchLayout")
|
||||
db.stream_create("/newton/prep", "PrepData")
|
||||
db.stream_create("/newton/raw", "RawData")
|
||||
db.stream_create("/newton/zzz/rawnotch", "RawNotchedData")
|
||||
|
||||
# Verify we got 3 streams
|
||||
eq_(db.stream_list(), [ ["/newton/prep", "PrepData"],
|
||||
["/newton/raw", "RawData"],
|
||||
["/newton/zzz/rawnotch", "RawNotchedData"]
|
||||
])
|
||||
# Match just one type or one path
|
||||
eq_(db.stream_list(layout="RawData"), [ ["/newton/raw", "RawData"] ])
|
||||
eq_(db.stream_list(path="/newton/raw"), [ ["/newton/raw", "RawData"] ])
|
||||
|
||||
# Verify that columns were made right
|
||||
eq_(len(db.h5file.getNode("/newton/prep").cols), 9)
|
||||
eq_(len(db.h5file.getNode("/newton/raw").cols), 7)
|
||||
eq_(len(db.h5file.getNode("/newton/zzz/rawnotch").cols), 10)
|
||||
assert(not db.h5file.getNode("/newton/prep").colindexed["timestamp"])
|
||||
assert(not db.h5file.getNode("/newton/prep").colindexed["c1"])
|
||||
|
||||
# Set / get metadata
|
||||
eq_(db.stream_get_metadata("/newton/prep"), {})
|
||||
eq_(db.stream_get_metadata("/newton/raw"), {})
|
||||
meta1 = { "description": "The Data",
|
||||
"v_scale": "1.234" }
|
||||
meta2 = { "description": "The Data" }
|
||||
meta3 = { "v_scale": "1.234" }
|
||||
db.stream_set_metadata("/newton/prep", meta1)
|
||||
db.stream_update_metadata("/newton/prep", {})
|
||||
db.stream_update_metadata("/newton/raw", meta2)
|
||||
db.stream_update_metadata("/newton/raw", meta3)
|
||||
eq_(db.stream_get_metadata("/newton/prep"), meta1)
|
||||
eq_(db.stream_get_metadata("/newton/raw"), meta1)
|
||||
|
||||
db.close()
|
||||
|
||||
class TestBlockingServer(object):
|
||||
def setUp(self):
|
||||
self.db = nilmdb.NilmDB(testdb, sync=False)
|
||||
|
||||
def tearDown(self):
|
||||
self.db.close()
|
||||
|
||||
def test_blocking_server(self):
|
||||
# Start web app on a custom port
|
||||
self.server = nilmdb.Server(self.db, host = "127.0.0.1",
|
||||
port = 12380, stoppable = True)
|
||||
|
||||
# Run it
|
||||
event = threading.Event()
|
||||
def run_server():
|
||||
self.server.start(blocking = True, event = event)
|
||||
thread = threading.Thread(target = run_server)
|
||||
thread.start()
|
||||
event.wait(timeout = 2)
|
||||
|
||||
# Send request to exit.
|
||||
req = urlopen("http://127.0.0.1:12380/exit/", timeout = 1)
|
||||
|
||||
# Wait for it
|
||||
thread.join()
|
||||
|
||||
def geturl(path):
|
||||
req = urlopen("http://127.0.0.1:12380" + path, timeout = 10)
|
||||
return req.read()
|
||||
|
||||
def getjson(path):
|
||||
return json.loads(geturl(path))
|
||||
|
||||
class TestServer(object):
|
||||
|
||||
def setUp(self):
|
||||
# Start web app on a custom port
|
||||
self.db = nilmdb.NilmDB(testdb, sync=False)
|
||||
self.server = nilmdb.Server(self.db, host = "127.0.0.1",
|
||||
port = 12380, stoppable = False)
|
||||
self.server.start(blocking = False)
|
||||
|
||||
def tearDown(self):
|
||||
# Close web app
|
||||
self.server.stop()
|
||||
self.db.close()
|
||||
|
||||
def test_server(self):
|
||||
# Make sure we can't force an exit, and test other 404 errors
|
||||
for url in [ "/exit", "/", "/favicon.ico" ]:
|
||||
with assert_raises(HTTPError) as e:
|
||||
geturl(url)
|
||||
eq_(e.exception.code, 404)
|
||||
|
||||
# Check version
|
||||
eq_(distutils.version.StrictVersion(getjson("/version")),
|
||||
distutils.version.StrictVersion(self.server.version))
|
||||
|
||||
def test_stream_list(self):
|
||||
# Known streams that got populated by an earlier test (test_nilmdb)
|
||||
streams = getjson("/stream/list")
|
||||
|
||||
eq_(streams, [
|
||||
['/newton/prep', 'PrepData'],
|
||||
['/newton/raw', 'RawData'],
|
||||
['/newton/zzz/rawnotch', 'RawNotchedData'],
|
||||
])
|
||||
|
||||
streams = getjson("/stream/list?layout=RawData")
|
||||
eq_(streams, [['/newton/raw', 'RawData']])
|
||||
|
||||
streams = getjson("/stream/list?layout=NoSuchLayout")
|
||||
eq_(streams, [])
|
||||
|
||||
|
||||
def test_stream_metadata(self):
|
||||
with assert_raises(HTTPError) as e:
|
||||
getjson("/stream/get_metadata?path=foo")
|
||||
eq_(e.exception.code, 404)
|
||||
|
||||
data = getjson("/stream/get_metadata?path=/newton/prep")
|
||||
eq_(data, {'description': 'The Data', 'v_scale': '1.234'})
|
||||
|
||||
data = getjson("/stream/get_metadata?path=/newton/prep"
|
||||
"&key=v_scale")
|
||||
eq_(data, {'v_scale': '1.234'})
|
||||
|
||||
data = getjson("/stream/get_metadata?path=/newton/prep"
|
||||
"&key=v_scale&key=description")
|
||||
eq_(data, {'description': 'The Data', 'v_scale': '1.234'})
|
||||
|
||||
data = getjson("/stream/get_metadata?path=/newton/prep"
|
||||
"&key=v_scale&key=foo")
|
||||
eq_(data, {'foo': None, 'v_scale': '1.234'})
|
||||
|
||||
data = getjson("/stream/get_metadata?path=/newton/prep"
|
||||
"&key=foo")
|
||||
eq_(data, {'foo': None})
|
||||
|
||||
|
||||
def test_insert(self):
|
||||
# GET instead of POST (no body)
|
||||
# (actual POST test is done by client code)
|
||||
with assert_raises(HTTPError) as e:
|
||||
getjson("/stream/insert?path=/newton/prep")
|
||||
eq_(e.exception.code, 400)
|
||||
|
@@ -1,27 +0,0 @@
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
from cStringIO import StringIO
|
||||
import sys
|
||||
|
||||
from test_helpers import *
|
||||
|
||||
class TestPrintf(object):
|
||||
def test_printf(self):
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = test1 = StringIO()
|
||||
test2 = StringIO()
|
||||
test3 = ""
|
||||
try:
|
||||
printf("hello, world: %d", 123)
|
||||
fprintf(test2, "hello too: %d", 123)
|
||||
test3 = sprintf("hello three: %d", 123)
|
||||
except:
|
||||
sys.stdout = old_stdout
|
||||
raise
|
||||
sys.stdout = old_stdout
|
||||
eq_(test1.getvalue(), "hello, world: 123")
|
||||
eq_(test2.getvalue(), "hello too: 123")
|
||||
eq_(test3, "hello three: 123")
|
@@ -1,75 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
|
||||
from nilmdb.rbtree import RBTree, RBNode
|
||||
|
||||
from test_helpers import *
|
||||
import unittest
|
||||
|
||||
render = False
|
||||
|
||||
class TestRBTree:
|
||||
def test_rbtree(self):
|
||||
rb = RBTree()
|
||||
rb.insert(RBNode(None, 10000, 10001))
|
||||
rb.insert(RBNode(None, 10004, 10007))
|
||||
rb.insert(RBNode(None, 10001, 10002))
|
||||
s = rb.render_dot()
|
||||
# There was a typo that gave the RBTree a loop in this case.
|
||||
# Verify that the dot isn't too big.
|
||||
assert(len(s.splitlines()) < 30)
|
||||
|
||||
def test_rbtree_big(self):
|
||||
import random
|
||||
random.seed(1234)
|
||||
|
||||
# make a set of 500 intervals, inserted in order
|
||||
rb = RBTree()
|
||||
j = 500
|
||||
for i in xrange(j):
|
||||
rb.insert(RBNode(None, i, i+1))
|
||||
|
||||
# show the graph
|
||||
if render:
|
||||
rb.render_dot_live("in-order insert")
|
||||
|
||||
# remove about half of them
|
||||
for i in random.sample(xrange(j),j):
|
||||
if random.randint(0,1):
|
||||
rb.delete(rb.find(i, i+1))
|
||||
|
||||
# show the graph
|
||||
if render:
|
||||
rb.render_dot_live("in-order insert, random delete")
|
||||
|
||||
# make a set of 500 intervals, inserted at random
|
||||
rb = RBTree()
|
||||
j = 500
|
||||
for i in random.sample(xrange(j),j):
|
||||
rb.insert(RBNode(None, i, i+1))
|
||||
|
||||
# show the graph
|
||||
if render:
|
||||
rb.render_dot_live("random insert")
|
||||
|
||||
# remove about half of them
|
||||
for i in random.sample(xrange(j),j):
|
||||
if random.randint(0,1):
|
||||
rb.delete(rb.find(i, i+1))
|
||||
|
||||
# show the graph
|
||||
if render:
|
||||
rb.render_dot_live("random insert, random delete")
|
||||
|
||||
# in-order insert of 250 more
|
||||
for i in xrange(250):
|
||||
rb.insert(RBNode(None, i+500, i+501))
|
||||
|
||||
# show the graph
|
||||
if render:
|
||||
rb.render_dot_live("random insert, random delete, in-order insert")
|
@@ -1,72 +0,0 @@
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
|
||||
import nose
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
import threading
|
||||
import time
|
||||
|
||||
from test_helpers import *
|
||||
|
||||
#raise nose.exc.SkipTest("Skip these")
|
||||
|
||||
class Foo(object):
|
||||
val = 0
|
||||
|
||||
def fail(self):
|
||||
raise Exception("you asked me to do this")
|
||||
|
||||
def test(self, debug = False):
|
||||
# purposely not thread-safe
|
||||
oldval = self.val
|
||||
newval = oldval + 1
|
||||
time.sleep(0.05)
|
||||
self.val = newval
|
||||
if debug:
|
||||
printf("[%s] value changed: %d -> %d\n",
|
||||
threading.current_thread().name, oldval, newval)
|
||||
|
||||
class Base(object):
|
||||
|
||||
def test_wrapping(self):
|
||||
self.foo.test()
|
||||
with assert_raises(Exception):
|
||||
self.foo.fail()
|
||||
|
||||
def test_threaded(self):
|
||||
def func(foo):
|
||||
foo.test()
|
||||
threads = []
|
||||
for i in xrange(20):
|
||||
threads.append(threading.Thread(target = func, args = (self.foo,)))
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
self.verify_result()
|
||||
|
||||
class TestUnserialized(Base):
|
||||
def setUp(self):
|
||||
self.foo = Foo()
|
||||
|
||||
def verify_result(self):
|
||||
# This should have failed to increment properly
|
||||
assert(self.foo.val != 20)
|
||||
|
||||
class TestSerialized(Base):
|
||||
def setUp(self):
|
||||
self.realfoo = Foo()
|
||||
self.foo = nilmdb.serializer.WrapObject(self.realfoo)
|
||||
|
||||
def tearDown(self):
|
||||
del self.foo
|
||||
|
||||
def verify_result(self):
|
||||
# This should have worked
|
||||
eq_(self.realfoo.val, 20)
|
||||
|
||||
def test_attribute(self):
|
||||
# Can't wrap attributes yet
|
||||
with assert_raises(TypeError):
|
||||
self.foo.val
|
@@ -1,91 +0,0 @@
|
||||
import nilmdb
|
||||
from nilmdb.printf import *
|
||||
|
||||
import datetime_tz
|
||||
|
||||
from nose.tools import *
|
||||
from nose.tools import assert_raises
|
||||
import os
|
||||
import sys
|
||||
import cStringIO
|
||||
|
||||
from test_helpers import *
|
||||
|
||||
class TestTimestamper(object):
|
||||
|
||||
# Not a very comprehensive test, but it's good enough.
|
||||
|
||||
def test_timestamper(self):
|
||||
def join(list):
|
||||
return "\n".join(list) + "\n"
|
||||
|
||||
start = datetime_tz.datetime_tz.smartparse("03/24/2012").totimestamp()
|
||||
lines_in = [ "hello", "world", "hello world", "# commented out" ]
|
||||
lines_out = [ "1332561600.000000 hello",
|
||||
"1332561600.000125 world",
|
||||
"1332561600.000250 hello world" ]
|
||||
|
||||
# full
|
||||
input = cStringIO.StringIO(join(lines_in))
|
||||
ts = nilmdb.timestamper.TimestamperRate(input, start, 8000)
|
||||
foo = ts.readlines()
|
||||
eq_(foo, join(lines_out))
|
||||
in_("TimestamperRate(..., start=", str(ts))
|
||||
|
||||
# first 30 or so bytes means the first 2 lines
|
||||
input = cStringIO.StringIO(join(lines_in))
|
||||
ts = nilmdb.timestamper.TimestamperRate(input, start, 8000)
|
||||
foo = ts.readlines(30)
|
||||
eq_(foo, join(lines_out[0:2]))
|
||||
|
||||
# stop iteration early
|
||||
input = cStringIO.StringIO(join(lines_in))
|
||||
ts = nilmdb.timestamper.TimestamperRate(input, start, 8000,
|
||||
1332561600.000200)
|
||||
foo = ""
|
||||
for line in ts:
|
||||
foo += line
|
||||
eq_(foo, join(lines_out[0:2]))
|
||||
|
||||
# stop iteration early (readlines)
|
||||
input = cStringIO.StringIO(join(lines_in))
|
||||
ts = nilmdb.timestamper.TimestamperRate(input, start, 8000,
|
||||
1332561600.000200)
|
||||
foo = ts.readlines()
|
||||
eq_(foo, join(lines_out[0:2]))
|
||||
|
||||
# stop iteration really early
|
||||
input = cStringIO.StringIO(join(lines_in))
|
||||
ts = nilmdb.timestamper.TimestamperRate(input, start, 8000,
|
||||
1332561600.000000)
|
||||
foo = ts.readlines()
|
||||
eq_(foo, "")
|
||||
|
||||
# use iterator
|
||||
input = cStringIO.StringIO(join(lines_in))
|
||||
ts = nilmdb.timestamper.TimestamperRate(input, start, 8000)
|
||||
foo = ""
|
||||
for line in ts:
|
||||
foo += line
|
||||
eq_(foo, join(lines_out))
|
||||
|
||||
# check that TimestamperNow gives similar result
|
||||
input = cStringIO.StringIO(join(lines_in))
|
||||
ts = nilmdb.timestamper.TimestamperNow(input)
|
||||
foo = ts.readlines()
|
||||
ne_(foo, join(lines_out))
|
||||
eq_(len(foo), len(join(lines_out)))
|
||||
eq_(str(ts), "TimestamperNow(...)")
|
||||
|
||||
# Test passing a file (should be empty)
|
||||
ts = nilmdb.timestamper.TimestamperNow("/dev/null")
|
||||
for line in ts:
|
||||
raise AssertionError
|
||||
ts.close()
|
||||
|
||||
# Test the null timestamper
|
||||
input = cStringIO.StringIO(join(lines_out)) # note: lines_out
|
||||
ts = nilmdb.timestamper.TimestamperNull(input)
|
||||
foo = ts.readlines()
|
||||
eq_(foo, join(lines_out))
|
||||
eq_(str(ts), "TimestamperNull(...)")
|
@@ -1,54 +0,0 @@
|
||||
nosetests
|
||||
|
||||
32: 386 μs (12.0625 μs each)
|
||||
64: 672.102 μs (10.5016 μs each)
|
||||
128: 1510.86 μs (11.8036 μs each)
|
||||
256: 2782.11 μs (10.8676 μs each)
|
||||
512: 5591.87 μs (10.9216 μs each)
|
||||
1024: 12812.1 μs (12.5119 μs each)
|
||||
2048: 21835.1 μs (10.6617 μs each)
|
||||
4096: 46059.1 μs (11.2449 μs each)
|
||||
8192: 114127 μs (13.9315 μs each)
|
||||
16384: 181217 μs (11.0606 μs each)
|
||||
32768: 419649 μs (12.8067 μs each)
|
||||
65536: 804320 μs (12.2729 μs each)
|
||||
131072: 1.73534e+06 μs (13.2396 μs each)
|
||||
262144: 3.74451e+06 μs (14.2842 μs each)
|
||||
524288: 8.8694e+06 μs (16.917 μs each)
|
||||
1048576: 1.69993e+07 μs (16.2118 μs each)
|
||||
2097152: 3.29387e+07 μs (15.7064 μs each)
|
||||
|
|
||||
+3.29387e+07 *
|
||||
| ----
|
||||
| -----
|
||||
| ----
|
||||
| -----
|
||||
| -----
|
||||
| ----
|
||||
| -----
|
||||
| -----
|
||||
| ----
|
||||
| -----
|
||||
| ----
|
||||
| -----
|
||||
| ---
|
||||
| ---
|
||||
| ---
|
||||
| -------
|
||||
---+386---------------------------------------------------------------------+---
|
||||
+32 +2.09715e+06
|
||||
|
||||
name #n tsub ttot tavg
|
||||
..vl/lees/bucket/nilm/nilmdb/nilmdb/interval.py.__iadd__:184 4194272 10.025323 30.262723 0.000007
|
||||
..evl/lees/bucket/nilm/nilmdb/nilmdb/interval.py.__init__:27 4194272 24.715377 24.715377 0.000006
|
||||
../lees/bucket/nilm/nilmdb/nilmdb/interval.py.intersects:239 4194272 6.705053 12.577620 0.000003
|
||||
..im/devl/lees/bucket/nilm/nilmdb/tests/aplotter.py.plot:404 1 0.000048 0.001412 0.001412
|
||||
../lees/bucket/nilm/nilmdb/tests/aplotter.py.plot_double:311 1 0.000106 0.001346 0.001346
|
||||
..vl/lees/bucket/nilm/nilmdb/tests/aplotter.py.plot_data:201 1 0.000098 0.000672 0.000672
|
||||
..vl/lees/bucket/nilm/nilmdb/tests/aplotter.py.plot_line:241 16 0.000298 0.000496 0.000031
|
||||
..jim/devl/lees/bucket/nilm/nilmdb/nilmdb/printf.py.printf:4 17 0.000252 0.000334 0.000020
|
||||
..vl/lees/bucket/nilm/nilmdb/tests/aplotter.py.transposed:39 1 0.000229 0.000235 0.000235
|
||||
..vl/lees/bucket/nilm/nilmdb/tests/aplotter.py.y_reversed:45 1 0.000151 0.000174 0.000174
|
||||
|
||||
name tid fname ttot scnt
|
||||
_MainThread 47269783682784 ..b/python2.7/threading.py.setprofile:88 64.746000 1
|
20
timeit.sh
20
timeit.sh
@@ -1,20 +0,0 @@
|
||||
./nilmtool.py create /bpnilm/2/raw RawData
|
||||
|
||||
if true; then
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s 20110513-110000 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s 20110513-120001 /bpnilm/2/raw
|
||||
else
|
||||
for i in $(seq 2000 2050); do
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-010001 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-020002 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-030003 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-040004 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-050005 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-060006 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-070007 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-080008 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-090009 /bpnilm/2/raw
|
||||
time zcat /home/jim/bpnilm-data/snapshot-1-20110513-110002.raw.gz | ./nilmtool.py insert -s ${i}0101-100010 /bpnilm/2/raw
|
||||
done
|
||||
fi
|
||||
|
Reference in New Issue
Block a user