Compare commits
97 Commits
nilmdb-0.2
...
nilmdb-1.2
Author | SHA1 | Date | |
---|---|---|---|
19d27c31bc | |||
28310fe886 | |||
1ccc2bce7e | |||
00237e30b2 | |||
521ff88f7c | |||
64897a1dd1 | |||
41ce8480bb | |||
204a6ecb15 | |||
5db3b186a4 | |||
fe640cf421 | |||
ca67c79fe4 | |||
8917bcd4bf | |||
a75ec98673 | |||
e476338d61 | |||
d752b882f2 | |||
ade27773e6 | |||
0c1a1d2388 | |||
e3f335dfe5 | |||
7a191c0ebb | |||
55bf11e393 | |||
e90dcd10f3 | |||
7d44f4eaa0 | |||
f541432d44 | |||
aa4e32f78a | |||
2bc1416c00 | |||
68bbbf757d | |||
3df96fdfdd | |||
740ab76eaf | |||
ce13a47fea | |||
50a4a60786 | |||
14afa02db6 | |||
cc990d6ce4 | |||
0f5162e0c0 | |||
b26cd52f8c | |||
236d925a1d | |||
a4a4bc61ba | |||
3d82888580 | |||
749b878904 | |||
f396e3934c | |||
dd7594b5fa | |||
4ac1beee6d | |||
8c0ce736d8 | |||
8858c9426f | |||
9123ccb583 | |||
5dce851bef | |||
5b0441de6b | |||
317c53ab6f | |||
7db4411462 | |||
422317850e | |||
965537d8cb | |||
0dcdec5949 | |||
7fce305a1d | |||
dfbbe23512 | |||
7761a91242 | |||
9b06e46bf1 | |||
171e6f1871 | |||
1431e41d16 | |||
a49c655816 | |||
30e3ffc0e9 | |||
db7211c3a9 | |||
c6d57cf5c3 | |||
ca5253ddee | |||
e19da84b2e | |||
3e8e3542fd | |||
2f7365412d | |||
bba9ad131e | |||
ee24380d1f | |||
bfcd91acf8 | |||
d97291d4d3 | |||
a61fbbcf45 | |||
5adc8fd0a7 | |||
251a486c28 | |||
1edb96a0bd | |||
52e674a192 | |||
e241c13bf1 | |||
b53ff31212 | |||
2045e89f24 | |||
841b2dab5c | |||
d634f7d3cf | |||
1593e181a3 | |||
8e781506de | |||
f6a2c7620a | |||
6c30e5ab2f | |||
810eac4e61 | |||
d9bb3ab7ab | |||
21d0e90bd9 | |||
f071d749ce | |||
d95c354595 | |||
9bcd8183f6 | |||
5c531d8273 | |||
3fe3e2ca95 | |||
f01e781469 | |||
e6180a5a81 | |||
a9d31b46ed | |||
b01f23ed99 | |||
842bf21411 | |||
750d9e3c38 |
@@ -7,4 +7,4 @@
|
|||||||
exclude_lines =
|
exclude_lines =
|
||||||
pragma: no cover
|
pragma: no cover
|
||||||
if 0:
|
if 0:
|
||||||
omit = nilmdb/utils/datetime_tz*
|
omit = nilmdb/utils/datetime_tz*,nilmdb/scripts,nilmdb/_version.py
|
||||||
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
nilmdb/_version.py export-subst
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -18,6 +18,10 @@ nilmdb/server/rbtree.so
|
|||||||
dist/
|
dist/
|
||||||
nilmdb.egg-info/
|
nilmdb.egg-info/
|
||||||
|
|
||||||
|
# This gets generated as needed by setup.py
|
||||||
|
MANIFEST.in
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
# Misc
|
# Misc
|
||||||
timeit*out
|
timeit*out
|
||||||
|
|
||||||
|
250
.pylintrc
Normal file
250
.pylintrc
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
# -*- conf -*-
|
||||||
|
[MASTER]
|
||||||
|
|
||||||
|
# Specify a configuration file.
|
||||||
|
#rcfile=
|
||||||
|
|
||||||
|
# Python code to execute, usually for sys.path manipulation such as
|
||||||
|
# pygtk.require().
|
||||||
|
#init-hook=
|
||||||
|
|
||||||
|
# Profiled execution.
|
||||||
|
profile=no
|
||||||
|
|
||||||
|
# Add files or directories to the blacklist. They should be base names, not
|
||||||
|
# paths.
|
||||||
|
ignore=datetime_tz
|
||||||
|
|
||||||
|
# Pickle collected data for later comparisons.
|
||||||
|
persistent=no
|
||||||
|
|
||||||
|
# List of plugins (as comma separated values of python modules names) to load,
|
||||||
|
# usually to register additional checkers.
|
||||||
|
load-plugins=
|
||||||
|
|
||||||
|
|
||||||
|
[MESSAGES CONTROL]
|
||||||
|
|
||||||
|
# Enable the message, report, category or checker with the given id(s). You can
|
||||||
|
# either give multiple identifier separated by comma (,) or put this option
|
||||||
|
# multiple time.
|
||||||
|
#enable=
|
||||||
|
|
||||||
|
# Disable the message, report, category or checker with the given id(s). You
|
||||||
|
# can either give multiple identifier separated by comma (,) or put this option
|
||||||
|
# multiple time (only on the command line, not in the configuration file where
|
||||||
|
# it should appear only once).
|
||||||
|
disable=C0111,R0903,R0201,R0914,R0912,W0142,W0703,W0702
|
||||||
|
|
||||||
|
|
||||||
|
[REPORTS]
|
||||||
|
|
||||||
|
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||||
|
# (visual studio) and html
|
||||||
|
output-format=parseable
|
||||||
|
|
||||||
|
# Include message's id in output
|
||||||
|
include-ids=yes
|
||||||
|
|
||||||
|
# Put messages in a separate file for each module / package specified on the
|
||||||
|
# command line instead of printing them on stdout. Reports (if any) will be
|
||||||
|
# written in a file name "pylint_global.[txt|html]".
|
||||||
|
files-output=no
|
||||||
|
|
||||||
|
# Tells whether to display a full report or only the messages
|
||||||
|
reports=yes
|
||||||
|
|
||||||
|
# Python expression which should return a note less than 10 (10 is the highest
|
||||||
|
# note). You have access to the variables errors warning, statement which
|
||||||
|
# respectively contain the number of errors / warnings messages and the total
|
||||||
|
# number of statements analyzed. This is used by the global evaluation report
|
||||||
|
# (RP0004).
|
||||||
|
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||||
|
|
||||||
|
# Add a comment according to your evaluation note. This is used by the global
|
||||||
|
# evaluation report (RP0004).
|
||||||
|
comment=no
|
||||||
|
|
||||||
|
|
||||||
|
[SIMILARITIES]
|
||||||
|
|
||||||
|
# Minimum lines number of a similarity.
|
||||||
|
min-similarity-lines=4
|
||||||
|
|
||||||
|
# Ignore comments when computing similarities.
|
||||||
|
ignore-comments=yes
|
||||||
|
|
||||||
|
# Ignore docstrings when computing similarities.
|
||||||
|
ignore-docstrings=yes
|
||||||
|
|
||||||
|
|
||||||
|
[TYPECHECK]
|
||||||
|
|
||||||
|
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||||
|
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||||
|
ignore-mixin-members=yes
|
||||||
|
|
||||||
|
# List of classes names for which member attributes should not be checked
|
||||||
|
# (useful for classes with attributes dynamically set).
|
||||||
|
ignored-classes=SQLObject
|
||||||
|
|
||||||
|
# When zope mode is activated, add a predefined set of Zope acquired attributes
|
||||||
|
# to generated-members.
|
||||||
|
zope=no
|
||||||
|
|
||||||
|
# List of members which are set dynamically and missed by pylint inference
|
||||||
|
# system, and so shouldn't trigger E0201 when accessed. Python regular
|
||||||
|
# expressions are accepted.
|
||||||
|
generated-members=REQUEST,acl_users,aq_parent
|
||||||
|
|
||||||
|
|
||||||
|
[FORMAT]
|
||||||
|
|
||||||
|
# Maximum number of characters on a single line.
|
||||||
|
max-line-length=80
|
||||||
|
|
||||||
|
# Maximum number of lines in a module
|
||||||
|
max-module-lines=1000
|
||||||
|
|
||||||
|
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||||
|
# tab).
|
||||||
|
indent-string=' '
|
||||||
|
|
||||||
|
|
||||||
|
[MISCELLANEOUS]
|
||||||
|
|
||||||
|
# List of note tags to take in consideration, separated by a comma.
|
||||||
|
notes=FIXME,XXX,TODO
|
||||||
|
|
||||||
|
|
||||||
|
[VARIABLES]
|
||||||
|
|
||||||
|
# Tells whether we should check for unused import in __init__ files.
|
||||||
|
init-import=no
|
||||||
|
|
||||||
|
# A regular expression matching the beginning of the name of dummy variables
|
||||||
|
# (i.e. not used).
|
||||||
|
dummy-variables-rgx=_|dummy
|
||||||
|
|
||||||
|
# List of additional names supposed to be defined in builtins. Remember that
|
||||||
|
# you should avoid to define new builtins when possible.
|
||||||
|
additional-builtins=
|
||||||
|
|
||||||
|
|
||||||
|
[BASIC]
|
||||||
|
|
||||||
|
# Required attributes for module, separated by a comma
|
||||||
|
required-attributes=
|
||||||
|
|
||||||
|
# List of builtins function names that should not be used, separated by a comma
|
||||||
|
bad-functions=apply,input
|
||||||
|
|
||||||
|
# Regular expression which should only match correct module names
|
||||||
|
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct module level names
|
||||||
|
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|version)$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct class names
|
||||||
|
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct function names
|
||||||
|
function-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct method names
|
||||||
|
method-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct instance attribute names
|
||||||
|
attr-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct argument names
|
||||||
|
argument-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct variable names
|
||||||
|
variable-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct list comprehension /
|
||||||
|
# generator expression variable names
|
||||||
|
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||||
|
|
||||||
|
# Good variable names which should always be accepted, separated by a comma
|
||||||
|
good-names=i,j,k,ex,Run,_
|
||||||
|
|
||||||
|
# Bad variable names which should always be refused, separated by a comma
|
||||||
|
bad-names=foo,bar,baz,toto,tutu,tata
|
||||||
|
|
||||||
|
# Regular expression which should only match functions or classes name which do
|
||||||
|
# not require a docstring
|
||||||
|
no-docstring-rgx=__.*__
|
||||||
|
|
||||||
|
|
||||||
|
[CLASSES]
|
||||||
|
|
||||||
|
# List of interface methods to ignore, separated by a comma. This is used for
|
||||||
|
# instance to not check methods defines in Zope's Interface base class.
|
||||||
|
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
|
||||||
|
|
||||||
|
# List of method names used to declare (i.e. assign) instance attributes.
|
||||||
|
defining-attr-methods=__init__,__new__,setUp
|
||||||
|
|
||||||
|
# List of valid names for the first argument in a class method.
|
||||||
|
valid-classmethod-first-arg=cls
|
||||||
|
|
||||||
|
|
||||||
|
[DESIGN]
|
||||||
|
|
||||||
|
# Maximum number of arguments for function / method
|
||||||
|
max-args=5
|
||||||
|
|
||||||
|
# Argument names that match this expression will be ignored. Default to name
|
||||||
|
# with leading underscore
|
||||||
|
ignored-argument-names=_.*
|
||||||
|
|
||||||
|
# Maximum number of locals for function / method body
|
||||||
|
max-locals=15
|
||||||
|
|
||||||
|
# Maximum number of return / yield for function / method body
|
||||||
|
max-returns=6
|
||||||
|
|
||||||
|
# Maximum number of branch for function / method body
|
||||||
|
max-branchs=12
|
||||||
|
|
||||||
|
# Maximum number of statements in function / method body
|
||||||
|
max-statements=50
|
||||||
|
|
||||||
|
# Maximum number of parents for a class (see R0901).
|
||||||
|
max-parents=7
|
||||||
|
|
||||||
|
# Maximum number of attributes for a class (see R0902).
|
||||||
|
max-attributes=7
|
||||||
|
|
||||||
|
# Minimum number of public methods for a class (see R0903).
|
||||||
|
min-public-methods=2
|
||||||
|
|
||||||
|
# Maximum number of public methods for a class (see R0904).
|
||||||
|
max-public-methods=20
|
||||||
|
|
||||||
|
|
||||||
|
[IMPORTS]
|
||||||
|
|
||||||
|
# Deprecated modules which should not be used, separated by a comma
|
||||||
|
deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
|
||||||
|
|
||||||
|
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||||
|
# given file (report RP0402 must not be disabled)
|
||||||
|
import-graph=
|
||||||
|
|
||||||
|
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||||
|
# not be disabled)
|
||||||
|
ext-import-graph=
|
||||||
|
|
||||||
|
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||||
|
# not be disabled)
|
||||||
|
int-import-graph=
|
||||||
|
|
||||||
|
|
||||||
|
[EXCEPTIONS]
|
||||||
|
|
||||||
|
# Exceptions that will emit a warning when being caught. Defaults to
|
||||||
|
# "Exception"
|
||||||
|
overgeneral-exceptions=Exception
|
34
Makefile
34
Makefile
@@ -1,12 +1,42 @@
|
|||||||
|
# By default, run the tests.
|
||||||
all: test
|
all: test
|
||||||
|
|
||||||
|
version:
|
||||||
|
python setup.py version
|
||||||
|
|
||||||
|
build:
|
||||||
|
python setup.py build_ext --inplace
|
||||||
|
|
||||||
|
dist: sdist
|
||||||
|
sdist:
|
||||||
|
python setup.py sdist
|
||||||
|
|
||||||
|
install:
|
||||||
|
python setup.py install
|
||||||
|
|
||||||
|
docs:
|
||||||
|
make -C docs
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
pylint -f parseable nilmdb
|
pylint --rcfile=.pylintrc nilmdb
|
||||||
|
|
||||||
test:
|
test:
|
||||||
python runtests.py
|
ifeq ($(INSIDE_EMACS), t)
|
||||||
|
# Use the slightly more flexible script
|
||||||
|
python tests/runtests.py
|
||||||
|
else
|
||||||
|
# Let setup.py check dependencies, build stuff, and run the test
|
||||||
|
python setup.py nosetests
|
||||||
|
endif
|
||||||
|
|
||||||
clean::
|
clean::
|
||||||
find . -name '*pyc' | xargs rm -f
|
find . -name '*pyc' | xargs rm -f
|
||||||
rm -f .coverage
|
rm -f .coverage
|
||||||
rm -rf tests/*testdb*
|
rm -rf tests/*testdb*
|
||||||
|
rm -rf nilmdb.egg-info/ build/ nilmdb/server/*.so MANIFEST.in
|
||||||
|
make -C docs clean
|
||||||
|
|
||||||
|
gitclean::
|
||||||
|
git clean -dXf
|
||||||
|
|
||||||
|
.PHONY: all version build dist sdist install docs lint test clean
|
||||||
|
18
README.txt
18
README.txt
@@ -3,8 +3,24 @@ by Jim Paris <jim@jtan.com>
|
|||||||
|
|
||||||
Prerequisites:
|
Prerequisites:
|
||||||
|
|
||||||
sudo apt-get install python2.7 python-cherrypy3 python-decorator python-nose python-coverage python-setuptools
|
# Runtime and build environments
|
||||||
|
sudo apt-get install python2.7 python2.7-dev python-setuptools cython
|
||||||
|
|
||||||
|
# Base NilmDB dependencies
|
||||||
|
sudo apt-get install python-cherrypy3 python-decorator python-simplejson
|
||||||
|
sudo apt-get install python-requests python-dateutil python-tz python-psutil
|
||||||
|
|
||||||
|
# Tools for running tests
|
||||||
|
sudo apt-get install python-nose python-coverage
|
||||||
|
|
||||||
|
Test:
|
||||||
|
python setup.py nosetests
|
||||||
|
|
||||||
Install:
|
Install:
|
||||||
|
|
||||||
python setup.py install
|
python setup.py install
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
nilmdb-server --help
|
||||||
|
nilmtool --help
|
||||||
|
@@ -1,4 +1,8 @@
|
|||||||
"""Main NilmDB import"""
|
"""Main NilmDB import"""
|
||||||
|
|
||||||
from server import NilmDB, Server
|
from nilmdb.server import NilmDB, Server
|
||||||
from client import Client
|
from nilmdb.client import Client
|
||||||
|
|
||||||
|
from nilmdb._version import get_versions
|
||||||
|
__version__ = get_versions()['version']
|
||||||
|
del get_versions
|
||||||
|
197
nilmdb/_version.py
Normal file
197
nilmdb/_version.py
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
|
||||||
|
IN_LONG_VERSION_PY = True
|
||||||
|
# This file helps to compute a version number in source trees obtained from
|
||||||
|
# git-archive tarball (such as those provided by githubs download-from-tag
|
||||||
|
# feature). Distribution tarballs (build by setup.py sdist) and build
|
||||||
|
# directories (produced by setup.py build) will contain a much shorter file
|
||||||
|
# that just contains the computed version number.
|
||||||
|
|
||||||
|
# This file is released into the public domain. Generated by
|
||||||
|
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
|
||||||
|
|
||||||
|
# these strings will be replaced by git during git-archive
|
||||||
|
git_refnames = "$Format:%d$"
|
||||||
|
git_full = "$Format:%H$"
|
||||||
|
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def run_command(args, cwd=None, verbose=False):
|
||||||
|
try:
|
||||||
|
# remember shell=False, so use git.cmd on windows, not just git
|
||||||
|
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
|
||||||
|
except EnvironmentError:
|
||||||
|
e = sys.exc_info()[1]
|
||||||
|
if verbose:
|
||||||
|
print("unable to run %s" % args[0])
|
||||||
|
print(e)
|
||||||
|
return None
|
||||||
|
stdout = p.communicate()[0].strip()
|
||||||
|
if sys.version >= '3':
|
||||||
|
stdout = stdout.decode()
|
||||||
|
if p.returncode != 0:
|
||||||
|
if verbose:
|
||||||
|
print("unable to run %s (error)" % args[0])
|
||||||
|
return None
|
||||||
|
return stdout
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
def get_expanded_variables(versionfile_source):
|
||||||
|
# the code embedded in _version.py can just fetch the value of these
|
||||||
|
# variables. When used from setup.py, we don't want to import
|
||||||
|
# _version.py, so we do it with a regexp instead. This function is not
|
||||||
|
# used from _version.py.
|
||||||
|
variables = {}
|
||||||
|
try:
|
||||||
|
for line in open(versionfile_source,"r").readlines():
|
||||||
|
if line.strip().startswith("git_refnames ="):
|
||||||
|
mo = re.search(r'=\s*"(.*)"', line)
|
||||||
|
if mo:
|
||||||
|
variables["refnames"] = mo.group(1)
|
||||||
|
if line.strip().startswith("git_full ="):
|
||||||
|
mo = re.search(r'=\s*"(.*)"', line)
|
||||||
|
if mo:
|
||||||
|
variables["full"] = mo.group(1)
|
||||||
|
except EnvironmentError:
|
||||||
|
pass
|
||||||
|
return variables
|
||||||
|
|
||||||
|
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
|
||||||
|
refnames = variables["refnames"].strip()
|
||||||
|
if refnames.startswith("$Format"):
|
||||||
|
if verbose:
|
||||||
|
print("variables are unexpanded, not using")
|
||||||
|
return {} # unexpanded, so not in an unpacked git-archive tarball
|
||||||
|
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
||||||
|
for ref in list(refs):
|
||||||
|
if not re.search(r'\d', ref):
|
||||||
|
if verbose:
|
||||||
|
print("discarding '%s', no digits" % ref)
|
||||||
|
refs.discard(ref)
|
||||||
|
# Assume all version tags have a digit. git's %d expansion
|
||||||
|
# behaves like git log --decorate=short and strips out the
|
||||||
|
# refs/heads/ and refs/tags/ prefixes that would let us
|
||||||
|
# distinguish between branches and tags. By ignoring refnames
|
||||||
|
# without digits, we filter out many common branch names like
|
||||||
|
# "release" and "stabilization", as well as "HEAD" and "master".
|
||||||
|
if verbose:
|
||||||
|
print("remaining refs: %s" % ",".join(sorted(refs)))
|
||||||
|
for ref in sorted(refs):
|
||||||
|
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
||||||
|
if ref.startswith(tag_prefix):
|
||||||
|
r = ref[len(tag_prefix):]
|
||||||
|
if verbose:
|
||||||
|
print("picking %s" % r)
|
||||||
|
return { "version": r,
|
||||||
|
"full": variables["full"].strip() }
|
||||||
|
# no suitable tags, so we use the full revision id
|
||||||
|
if verbose:
|
||||||
|
print("no suitable tags, using full revision id")
|
||||||
|
return { "version": variables["full"].strip(),
|
||||||
|
"full": variables["full"].strip() }
|
||||||
|
|
||||||
|
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
|
||||||
|
# this runs 'git' from the root of the source tree. That either means
|
||||||
|
# someone ran a setup.py command (and this code is in versioneer.py, so
|
||||||
|
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
|
||||||
|
# the source tree), or someone ran a project-specific entry point (and
|
||||||
|
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
|
||||||
|
# containing directory is somewhere deeper in the source tree). This only
|
||||||
|
# gets called if the git-archive 'subst' variables were *not* expanded,
|
||||||
|
# and _version.py hasn't already been rewritten with a short version
|
||||||
|
# string, meaning we're inside a checked out source tree.
|
||||||
|
|
||||||
|
try:
|
||||||
|
here = os.path.abspath(__file__)
|
||||||
|
except NameError:
|
||||||
|
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
|
||||||
|
return {} # not always correct
|
||||||
|
|
||||||
|
# versionfile_source is the relative path from the top of the source tree
|
||||||
|
# (where the .git directory might live) to this file. Invert this to find
|
||||||
|
# the root from __file__.
|
||||||
|
root = here
|
||||||
|
if IN_LONG_VERSION_PY:
|
||||||
|
for i in range(len(versionfile_source.split("/"))):
|
||||||
|
root = os.path.dirname(root)
|
||||||
|
else:
|
||||||
|
root = os.path.dirname(here)
|
||||||
|
if not os.path.exists(os.path.join(root, ".git")):
|
||||||
|
if verbose:
|
||||||
|
print("no .git in %s" % root)
|
||||||
|
return {}
|
||||||
|
|
||||||
|
GIT = "git"
|
||||||
|
if sys.platform == "win32":
|
||||||
|
GIT = "git.cmd"
|
||||||
|
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
|
||||||
|
cwd=root)
|
||||||
|
if stdout is None:
|
||||||
|
return {}
|
||||||
|
if not stdout.startswith(tag_prefix):
|
||||||
|
if verbose:
|
||||||
|
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
|
||||||
|
return {}
|
||||||
|
tag = stdout[len(tag_prefix):]
|
||||||
|
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
|
||||||
|
if stdout is None:
|
||||||
|
return {}
|
||||||
|
full = stdout.strip()
|
||||||
|
if tag.endswith("-dirty"):
|
||||||
|
full += "-dirty"
|
||||||
|
return {"version": tag, "full": full}
|
||||||
|
|
||||||
|
|
||||||
|
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
|
||||||
|
if IN_LONG_VERSION_PY:
|
||||||
|
# We're running from _version.py. If it's from a source tree
|
||||||
|
# (execute-in-place), we can work upwards to find the root of the
|
||||||
|
# tree, and then check the parent directory for a version string. If
|
||||||
|
# it's in an installed application, there's no hope.
|
||||||
|
try:
|
||||||
|
here = os.path.abspath(__file__)
|
||||||
|
except NameError:
|
||||||
|
# py2exe/bbfreeze/non-CPython don't have __file__
|
||||||
|
return {} # without __file__, we have no hope
|
||||||
|
# versionfile_source is the relative path from the top of the source
|
||||||
|
# tree to _version.py. Invert this to find the root from __file__.
|
||||||
|
root = here
|
||||||
|
for i in range(len(versionfile_source.split("/"))):
|
||||||
|
root = os.path.dirname(root)
|
||||||
|
else:
|
||||||
|
# we're running from versioneer.py, which means we're running from
|
||||||
|
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
|
||||||
|
here = os.path.abspath(sys.argv[0])
|
||||||
|
root = os.path.dirname(here)
|
||||||
|
|
||||||
|
# Source tarballs conventionally unpack into a directory that includes
|
||||||
|
# both the project name and a version string.
|
||||||
|
dirname = os.path.basename(root)
|
||||||
|
if not dirname.startswith(parentdir_prefix):
|
||||||
|
if verbose:
|
||||||
|
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
|
||||||
|
(root, dirname, parentdir_prefix))
|
||||||
|
return None
|
||||||
|
return {"version": dirname[len(parentdir_prefix):], "full": ""}
|
||||||
|
|
||||||
|
tag_prefix = "nilmdb-"
|
||||||
|
parentdir_prefix = "nilmdb-"
|
||||||
|
versionfile_source = "nilmdb/_version.py"
|
||||||
|
|
||||||
|
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
|
||||||
|
variables = { "refnames": git_refnames, "full": git_full }
|
||||||
|
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
|
||||||
|
verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = default
|
||||||
|
return ver
|
||||||
|
|
@@ -1,4 +1,4 @@
|
|||||||
"""nilmdb.client"""
|
"""nilmdb.client"""
|
||||||
|
|
||||||
from .client import Client
|
from nilmdb.client.client import Client
|
||||||
from .errors import *
|
from nilmdb.client.errors import ClientError, ServerError, Error
|
||||||
|
@@ -5,34 +5,38 @@
|
|||||||
import nilmdb
|
import nilmdb
|
||||||
import nilmdb.utils
|
import nilmdb.utils
|
||||||
import nilmdb.client.httpclient
|
import nilmdb.client.httpclient
|
||||||
from nilmdb.utils.printf import *
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import sys
|
|
||||||
import re
|
|
||||||
import os
|
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import itertools
|
import contextlib
|
||||||
|
|
||||||
version = "1.0"
|
|
||||||
|
|
||||||
def float_to_string(f):
|
def float_to_string(f):
|
||||||
# Use repr to maintain full precision in the string output.
|
"""Use repr to maintain full precision in the string output."""
|
||||||
return repr(float(f))
|
return repr(float(f))
|
||||||
|
|
||||||
|
def extract_timestamp(line):
|
||||||
|
"""Extract just the timestamp from a line of data text"""
|
||||||
|
return float(line.split()[0])
|
||||||
|
|
||||||
class Client(object):
|
class Client(object):
|
||||||
"""Main client interface to the Nilm database."""
|
"""Main client interface to the Nilm database."""
|
||||||
|
|
||||||
client_version = version
|
|
||||||
|
|
||||||
def __init__(self, url):
|
def __init__(self, url):
|
||||||
self.http = nilmdb.client.httpclient.HTTPClient(url)
|
self.http = nilmdb.client.httpclient.HTTPClient(url)
|
||||||
|
|
||||||
|
# __enter__/__exit__ allow this class to be a context manager
|
||||||
|
def __enter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
|
self.close()
|
||||||
|
|
||||||
def _json_param(self, data):
|
def _json_param(self, data):
|
||||||
"""Return compact json-encoded version of parameter"""
|
"""Return compact json-encoded version of parameter"""
|
||||||
return json.dumps(data, separators=(',',':'))
|
return json.dumps(data, separators=(',',':'))
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
|
"""Close the connection; safe to call multiple times"""
|
||||||
self.http.close()
|
self.http.close()
|
||||||
|
|
||||||
def geturl(self):
|
def geturl(self):
|
||||||
@@ -43,20 +47,19 @@ class Client(object):
|
|||||||
"""Return server version"""
|
"""Return server version"""
|
||||||
return self.http.get("version")
|
return self.http.get("version")
|
||||||
|
|
||||||
def dbpath(self):
|
def dbinfo(self):
|
||||||
"""Return server database path"""
|
"""Return server database info (path, size, free space)
|
||||||
return self.http.get("dbpath")
|
as a dictionary."""
|
||||||
|
return self.http.get("dbinfo")
|
||||||
|
|
||||||
def dbsize(self):
|
def stream_list(self, path = None, layout = None, extent = False):
|
||||||
"""Return server database size as human readable string"""
|
|
||||||
return self.http.get("dbsize")
|
|
||||||
|
|
||||||
def stream_list(self, path = None, layout = None):
|
|
||||||
params = {}
|
params = {}
|
||||||
if path is not None:
|
if path is not None:
|
||||||
params["path"] = path
|
params["path"] = path
|
||||||
if layout is not None:
|
if layout is not None:
|
||||||
params["layout"] = layout
|
params["layout"] = layout
|
||||||
|
if extent:
|
||||||
|
params["extent"] = 1
|
||||||
return self.http.get("stream/list", params)
|
return self.http.get("stream/list", params)
|
||||||
|
|
||||||
def stream_get_metadata(self, path, keys = None):
|
def stream_get_metadata(self, path, keys = None):
|
||||||
@@ -72,7 +75,7 @@ class Client(object):
|
|||||||
"path": path,
|
"path": path,
|
||||||
"data": self._json_param(data)
|
"data": self._json_param(data)
|
||||||
}
|
}
|
||||||
return self.http.get("stream/set_metadata", params)
|
return self.http.post("stream/set_metadata", params)
|
||||||
|
|
||||||
def stream_update_metadata(self, path, data):
|
def stream_update_metadata(self, path, data):
|
||||||
"""Update stream metadata from a dictionary"""
|
"""Update stream metadata from a dictionary"""
|
||||||
@@ -80,18 +83,18 @@ class Client(object):
|
|||||||
"path": path,
|
"path": path,
|
||||||
"data": self._json_param(data)
|
"data": self._json_param(data)
|
||||||
}
|
}
|
||||||
return self.http.get("stream/update_metadata", params)
|
return self.http.post("stream/update_metadata", params)
|
||||||
|
|
||||||
def stream_create(self, path, layout):
|
def stream_create(self, path, layout):
|
||||||
"""Create a new stream"""
|
"""Create a new stream"""
|
||||||
params = { "path": path,
|
params = { "path": path,
|
||||||
"layout" : layout }
|
"layout" : layout }
|
||||||
return self.http.get("stream/create", params)
|
return self.http.post("stream/create", params)
|
||||||
|
|
||||||
def stream_destroy(self, path):
|
def stream_destroy(self, path):
|
||||||
"""Delete stream and its contents"""
|
"""Delete stream and its contents"""
|
||||||
params = { "path": path }
|
params = { "path": path }
|
||||||
return self.http.get("stream/destroy", params)
|
return self.http.post("stream/destroy", params)
|
||||||
|
|
||||||
def stream_remove(self, path, start = None, end = None):
|
def stream_remove(self, path, start = None, end = None):
|
||||||
"""Remove data from the specified time range"""
|
"""Remove data from the specified time range"""
|
||||||
@@ -102,79 +105,47 @@ class Client(object):
|
|||||||
params["start"] = float_to_string(start)
|
params["start"] = float_to_string(start)
|
||||||
if end is not None:
|
if end is not None:
|
||||||
params["end"] = float_to_string(end)
|
params["end"] = float_to_string(end)
|
||||||
return self.http.get("stream/remove", params)
|
return self.http.post("stream/remove", params)
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def stream_insert_context(self, path, start = None, end = None):
|
||||||
|
"""Return a context manager that allows data to be efficiently
|
||||||
|
inserted into a stream in a piecewise manner. Data is be provided
|
||||||
|
as single lines, and is aggregated and sent to the server in larger
|
||||||
|
chunks as necessary. Data lines must match the database layout for
|
||||||
|
the given path, and end with a newline.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
with client.stream_insert_context('/path', start, end) as ctx:
|
||||||
|
ctx.insert_line('1234567890.0 1 2 3 4\\n')
|
||||||
|
ctx.insert_line('1234567891.0 1 2 3 4\\n')
|
||||||
|
|
||||||
|
For more details, see help for nilmdb.client.client.StreamInserter
|
||||||
|
|
||||||
|
This may make multiple requests to the server, if the data is
|
||||||
|
large enough or enough time has passed between insertions.
|
||||||
|
"""
|
||||||
|
ctx = StreamInserter(self, path, start, end)
|
||||||
|
yield ctx
|
||||||
|
ctx.finalize()
|
||||||
|
|
||||||
def stream_insert(self, path, data, start = None, end = None):
|
def stream_insert(self, path, data, start = None, end = None):
|
||||||
"""Insert data into a stream. data should be a file-like object
|
"""Insert rows of data into a stream. data should be an
|
||||||
that provides ASCII data that matches the database layout for path.
|
iterable object that provides ASCII data that matches the
|
||||||
|
database layout for path. See stream_insert_context for
|
||||||
|
details on the 'start' and 'end' parameters."""
|
||||||
|
with self.stream_insert_context(path, start, end) as ctx:
|
||||||
|
ctx.insert_iter(data)
|
||||||
|
return ctx.last_response
|
||||||
|
|
||||||
start and end are the starting and ending timestamp of this
|
def stream_insert_block(self, path, block, start, end):
|
||||||
stream; all timestamps t in the data must satisfy 'start <= t
|
"""Insert an entire block of data into a stream. Like
|
||||||
< end'. If left unspecified, 'start' is the timestamp of the
|
stream_insert, except 'block' contains multiple lines of ASCII
|
||||||
first line of data, and 'end' is the timestamp on the last line
|
text and is sent in one single chunk."""
|
||||||
of data, plus a small delta of 1μs.
|
params = { "path": path,
|
||||||
"""
|
"start": float_to_string(start),
|
||||||
params = { "path": path }
|
"end": float_to_string(end) }
|
||||||
|
return self.http.put("stream/insert", block, params)
|
||||||
# See design.md for a discussion of how much data to send.
|
|
||||||
# These are soft limits -- actual data might be rounded up.
|
|
||||||
max_data = 1048576
|
|
||||||
max_time = 30
|
|
||||||
end_epsilon = 1e-6
|
|
||||||
|
|
||||||
|
|
||||||
def extract_timestamp(line):
|
|
||||||
return float(line.split()[0])
|
|
||||||
|
|
||||||
def sendit():
|
|
||||||
# If we have more data after this, use the timestamp of
|
|
||||||
# the next line as the end. Otherwise, use the given
|
|
||||||
# overall end time, or add end_epsilon to the last data
|
|
||||||
# point.
|
|
||||||
if nextline:
|
|
||||||
block_end = extract_timestamp(nextline)
|
|
||||||
if end and block_end > end:
|
|
||||||
# This is unexpected, but we'll defer to the server
|
|
||||||
# to return an error in this case.
|
|
||||||
block_end = end
|
|
||||||
elif end:
|
|
||||||
block_end = end
|
|
||||||
else:
|
|
||||||
block_end = extract_timestamp(line) + end_epsilon
|
|
||||||
|
|
||||||
# Send it
|
|
||||||
params["start"] = float_to_string(block_start)
|
|
||||||
params["end"] = float_to_string(block_end)
|
|
||||||
return self.http.put("stream/insert", block_data, params)
|
|
||||||
|
|
||||||
clock_start = time.time()
|
|
||||||
block_data = ""
|
|
||||||
block_start = start
|
|
||||||
result = None
|
|
||||||
for (line, nextline) in nilmdb.utils.misc.pairwise(data):
|
|
||||||
# If we don't have a starting time, extract it from the first line
|
|
||||||
if block_start is None:
|
|
||||||
block_start = extract_timestamp(line)
|
|
||||||
|
|
||||||
clock_elapsed = time.time() - clock_start
|
|
||||||
block_data += line
|
|
||||||
|
|
||||||
# If we have enough data, or enough time has elapsed,
|
|
||||||
# send this block to the server, and empty things out
|
|
||||||
# for the next block.
|
|
||||||
if (len(block_data) > max_data) or (clock_elapsed > max_time):
|
|
||||||
result = sendit()
|
|
||||||
block_start = None
|
|
||||||
block_data = ""
|
|
||||||
clock_start = time.time()
|
|
||||||
|
|
||||||
# One last block?
|
|
||||||
if len(block_data):
|
|
||||||
result = sendit()
|
|
||||||
|
|
||||||
# Return the most recent JSON result we got back, or None if
|
|
||||||
# we didn't make any requests.
|
|
||||||
return result
|
|
||||||
|
|
||||||
def stream_intervals(self, path, start = None, end = None):
|
def stream_intervals(self, path, start = None, end = None):
|
||||||
"""
|
"""
|
||||||
@@ -187,7 +158,7 @@ class Client(object):
|
|||||||
params["start"] = float_to_string(start)
|
params["start"] = float_to_string(start)
|
||||||
if end is not None:
|
if end is not None:
|
||||||
params["end"] = float_to_string(end)
|
params["end"] = float_to_string(end)
|
||||||
return self.http.get_gen("stream/intervals", params, retjson = True)
|
return self.http.get_gen("stream/intervals", params)
|
||||||
|
|
||||||
def stream_extract(self, path, start = None, end = None, count = False):
|
def stream_extract(self, path, start = None, end = None, count = False):
|
||||||
"""
|
"""
|
||||||
@@ -195,8 +166,8 @@ class Client(object):
|
|||||||
lines of ASCII-formatted data that matches the database
|
lines of ASCII-formatted data that matches the database
|
||||||
layout for the given path.
|
layout for the given path.
|
||||||
|
|
||||||
Specify count=True to just get a count of values rather than
|
Specify count = True to return a count of matching data points
|
||||||
the actual data.
|
rather than the actual data. The output format is unchanged.
|
||||||
"""
|
"""
|
||||||
params = {
|
params = {
|
||||||
"path": path,
|
"path": path,
|
||||||
@@ -207,5 +178,203 @@ class Client(object):
|
|||||||
params["end"] = float_to_string(end)
|
params["end"] = float_to_string(end)
|
||||||
if count:
|
if count:
|
||||||
params["count"] = 1
|
params["count"] = 1
|
||||||
|
return self.http.get_gen("stream/extract", params)
|
||||||
|
|
||||||
return self.http.get_gen("stream/extract", params, retjson = False)
|
def stream_count(self, path, start = None, end = None):
|
||||||
|
"""
|
||||||
|
Return the number of rows of data in the stream that satisfy
|
||||||
|
the given timestamps.
|
||||||
|
"""
|
||||||
|
counts = list(self.stream_extract(path, start, end, count = True))
|
||||||
|
return int(counts[0])
|
||||||
|
|
||||||
|
class StreamInserter(object):
|
||||||
|
"""Object returned by stream_insert_context() that manages
|
||||||
|
the insertion of rows of data into a particular path.
|
||||||
|
|
||||||
|
The basic data flow is that we are filling a contiguous interval
|
||||||
|
on the server, with no gaps, that extends from timestamp 'start'
|
||||||
|
to timestamp 'end'. Data timestamps satisfy 'start <= t < end'.
|
||||||
|
Data is provided by the user one line at a time with
|
||||||
|
.insert_line() or .insert_iter().
|
||||||
|
|
||||||
|
1. The first inserted line begins a new interval that starts at
|
||||||
|
'start'. If 'start' is not given, it is deduced from the first
|
||||||
|
line's timestamp.
|
||||||
|
|
||||||
|
2. Subsequent lines go into the same contiguous interval. As lines
|
||||||
|
are inserted, this routine may make multiple insertion requests to
|
||||||
|
the server, but will structure the timestamps to leave no gaps.
|
||||||
|
|
||||||
|
3. The current contiguous interval can be completed by manually
|
||||||
|
calling .finalize(), which the context manager will also do
|
||||||
|
automatically. This will send any remaining data to the server,
|
||||||
|
using the 'end' timestamp to end the interval.
|
||||||
|
|
||||||
|
After a .finalize(), inserting new data goes back to step 1.
|
||||||
|
|
||||||
|
.update_start() can be called before step 1 to change the start
|
||||||
|
time for the interval. .update_end() can be called before step 3
|
||||||
|
to change the end time for the interval.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# See design.md for a discussion of how much data to send.
|
||||||
|
# These are soft limits -- actual data might be rounded up.
|
||||||
|
# We send when we have a certain amount of data queued, or
|
||||||
|
# when a certain amount of time has passed since the last send.
|
||||||
|
_max_data = 2 * 1024 * 1024
|
||||||
|
_max_time = 30
|
||||||
|
|
||||||
|
# Delta to add to the final timestamp, if "end" wasn't given
|
||||||
|
_end_epsilon = 1e-6
|
||||||
|
|
||||||
|
def __init__(self, client, path, start = None, end = None):
|
||||||
|
"""'http' is the httpclient object. 'path' is the database
|
||||||
|
path to insert to. 'start' and 'end' are used for the first
|
||||||
|
contiguous interval."""
|
||||||
|
self.last_response = None
|
||||||
|
|
||||||
|
self._client = client
|
||||||
|
self._path = path
|
||||||
|
|
||||||
|
# Start and end for the overall contiguous interval we're
|
||||||
|
# filling
|
||||||
|
self._interval_start = start
|
||||||
|
self._interval_end = end
|
||||||
|
|
||||||
|
# Data for the specific block we're building up to send
|
||||||
|
self._block_data = []
|
||||||
|
self._block_len = 0
|
||||||
|
self._block_start = None
|
||||||
|
|
||||||
|
# Time of last request
|
||||||
|
self._last_time = time.time()
|
||||||
|
|
||||||
|
# We keep a buffer of the two most recently inserted lines.
|
||||||
|
# Only the older one actually gets processed; the newer one
|
||||||
|
# is used to "look-ahead" to the next timestamp if we need
|
||||||
|
# to internally split an insertion into two requests.
|
||||||
|
self._line_old = None
|
||||||
|
self._line_new = None
|
||||||
|
|
||||||
|
def insert_iter(self, iter):
|
||||||
|
"""Insert all lines of ASCII formatted data from the given
|
||||||
|
iterable. Lines must be terminated with '\\n'."""
|
||||||
|
for line in iter:
|
||||||
|
self.insert_line(line)
|
||||||
|
|
||||||
|
def insert_line(self, line, allow_intermediate = True):
|
||||||
|
"""Insert a single line of ASCII formatted data. Line
|
||||||
|
must be terminated with '\\n'."""
|
||||||
|
if line and (len(line) < 1 or line[-1] != '\n'):
|
||||||
|
raise ValueError("lines must end in with a newline character")
|
||||||
|
|
||||||
|
# Store this new line, but process the previous (old) one.
|
||||||
|
# This lets us "look ahead" to the next line.
|
||||||
|
self._line_old = self._line_new
|
||||||
|
self._line_new = line
|
||||||
|
if self._line_old is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
# If starting a new block, pull out the timestamp if needed.
|
||||||
|
if self._block_start is None:
|
||||||
|
if self._interval_start is not None:
|
||||||
|
# User provided a start timestamp. Use it once, then
|
||||||
|
# clear it for the next block.
|
||||||
|
self._block_start = self._interval_start
|
||||||
|
self._interval_start = None
|
||||||
|
else:
|
||||||
|
# Extract timestamp from the first row
|
||||||
|
self._block_start = extract_timestamp(self._line_old)
|
||||||
|
|
||||||
|
# Save the line
|
||||||
|
self._block_data.append(self._line_old)
|
||||||
|
self._block_len += len(self._line_old)
|
||||||
|
|
||||||
|
if allow_intermediate:
|
||||||
|
# Send an intermediate block to the server if needed.
|
||||||
|
elapsed = time.time() - self._last_time
|
||||||
|
if (self._block_len > self._max_data) or (elapsed > self._max_time):
|
||||||
|
self._send_block_intermediate()
|
||||||
|
|
||||||
|
def update_start(self, start):
|
||||||
|
"""Update the start time for the next contiguous interval.
|
||||||
|
Call this before starting to insert data for a new interval,
|
||||||
|
for example, after .finalize()"""
|
||||||
|
self._interval_start = start
|
||||||
|
|
||||||
|
def update_end(self, end):
|
||||||
|
"""Update the end time for the current contiguous interval.
|
||||||
|
Call this before .finalize()"""
|
||||||
|
self._interval_end = end
|
||||||
|
|
||||||
|
def finalize(self):
|
||||||
|
"""Stop filling the current contiguous interval.
|
||||||
|
All outstanding data will be sent, and the interval end
|
||||||
|
time of the interval will be taken from the 'end' argument
|
||||||
|
used when initializing this class, or the most recent
|
||||||
|
value passed to update_end(), or the last timestamp plus
|
||||||
|
a small epsilon value if no other endpoint was provided.
|
||||||
|
|
||||||
|
If more data is inserted after a finalize(), it will become
|
||||||
|
part of a new interval and there may be a gap left in-between."""
|
||||||
|
# Special marker tells insert_line that this is the end
|
||||||
|
self.insert_line(None, allow_intermediate = False)
|
||||||
|
|
||||||
|
if self._block_len > 0:
|
||||||
|
# We have data pending, so send the final block
|
||||||
|
self._send_block_final()
|
||||||
|
elif None not in (self._interval_start, self._interval_end):
|
||||||
|
# We have no data, but enough information to create an
|
||||||
|
# empty interval.
|
||||||
|
self._block_start = self._interval_start
|
||||||
|
self._interval_start = None
|
||||||
|
self._send_block_final()
|
||||||
|
else:
|
||||||
|
# No data, and no timestamps to use to create an empty
|
||||||
|
# interval.
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Make sure both timestamps are emptied for future intervals.
|
||||||
|
self._interval_start = None
|
||||||
|
self._interval_end = None
|
||||||
|
|
||||||
|
def _send_block_intermediate(self):
|
||||||
|
"""Send data, when we still have more data to send.
|
||||||
|
Use the timestamp from the next line, so that the blocks
|
||||||
|
are contiguous."""
|
||||||
|
block_end = extract_timestamp(self._line_new)
|
||||||
|
if self._interval_end is not None and block_end > self._interval_end:
|
||||||
|
# Something's fishy -- the timestamp we found is after
|
||||||
|
# the user's specified end. Limit it here, and the
|
||||||
|
# server will return an error.
|
||||||
|
block_end = self._interval_end
|
||||||
|
self._send_block(block_end)
|
||||||
|
|
||||||
|
def _send_block_final(self):
|
||||||
|
"""Send data, when this is the last block for the interval.
|
||||||
|
There is no next line, so figure out the actual interval end
|
||||||
|
using interval_end or end_epsilon."""
|
||||||
|
if self._interval_end is not None:
|
||||||
|
# Use the user's specified end timestamp
|
||||||
|
block_end = self._interval_end
|
||||||
|
# Clear it in case we send more intervals in the future.
|
||||||
|
self._interval_end = None
|
||||||
|
else:
|
||||||
|
# Add an epsilon to the last timestamp we saw
|
||||||
|
block_end = extract_timestamp(self._line_old) + self._end_epsilon
|
||||||
|
self._send_block(block_end)
|
||||||
|
|
||||||
|
def _send_block(self, block_end):
|
||||||
|
"""Send current block to the server"""
|
||||||
|
self.last_response = self._client.stream_insert_block(
|
||||||
|
self._path, "".join(self._block_data),
|
||||||
|
self._block_start, block_end)
|
||||||
|
|
||||||
|
# Clear out the block
|
||||||
|
self._block_data = []
|
||||||
|
self._block_len = 0
|
||||||
|
self._block_start = None
|
||||||
|
|
||||||
|
# Note when we sent it
|
||||||
|
self._last_time = time.time()
|
||||||
|
@@ -2,17 +2,11 @@
|
|||||||
|
|
||||||
import nilmdb
|
import nilmdb
|
||||||
import nilmdb.utils
|
import nilmdb.utils
|
||||||
from nilmdb.utils.printf import *
|
from nilmdb.client.errors import ClientError, ServerError, Error
|
||||||
from nilmdb.client.errors import *
|
|
||||||
|
|
||||||
import time
|
|
||||||
import sys
|
|
||||||
import re
|
|
||||||
import os
|
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import urlparse
|
import urlparse
|
||||||
import pycurl
|
import requests
|
||||||
import cStringIO
|
|
||||||
|
|
||||||
class HTTPClient(object):
|
class HTTPClient(object):
|
||||||
"""Class to manage and perform HTTP requests from the client"""
|
"""Class to manage and perform HTTP requests from the client"""
|
||||||
@@ -24,28 +18,21 @@ class HTTPClient(object):
|
|||||||
if '://' not in reparsed:
|
if '://' not in reparsed:
|
||||||
reparsed = urlparse.urlparse("http://" + baseurl).geturl()
|
reparsed = urlparse.urlparse("http://" + baseurl).geturl()
|
||||||
self.baseurl = reparsed
|
self.baseurl = reparsed
|
||||||
self.curl = pycurl.Curl()
|
|
||||||
self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)
|
|
||||||
self.curl.setopt(pycurl.FOLLOWLOCATION, 1)
|
|
||||||
self.curl.setopt(pycurl.MAXREDIRS, 5)
|
|
||||||
self._setup_url()
|
|
||||||
|
|
||||||
def _setup_url(self, url = "", params = ""):
|
# Build Requests session object, enable SSL verification
|
||||||
url = urlparse.urljoin(self.baseurl, url)
|
self.session = requests.Session()
|
||||||
if params:
|
self.session.verify = True
|
||||||
url = urlparse.urljoin(
|
|
||||||
url, "?" + nilmdb.utils.urllib.urlencode(params))
|
|
||||||
self.curl.setopt(pycurl.URL, url)
|
|
||||||
self.url = url
|
|
||||||
|
|
||||||
def _check_error(self, body = None):
|
# Saved response, so that tests can verify a few things.
|
||||||
code = self.curl.getinfo(pycurl.RESPONSE_CODE)
|
self._last_response = {}
|
||||||
if code == 200:
|
|
||||||
return
|
def _handle_error(self, url, code, body):
|
||||||
# Default variables for exception
|
# Default variables for exception. We use the entire body as
|
||||||
args = { "url" : self.url,
|
# the default message, in case we can't extract it from a JSON
|
||||||
|
# response.
|
||||||
|
args = { "url" : url,
|
||||||
"status" : str(code),
|
"status" : str(code),
|
||||||
"message" : None,
|
"message" : body,
|
||||||
"traceback" : None }
|
"traceback" : None }
|
||||||
try:
|
try:
|
||||||
# Fill with server-provided data if we can
|
# Fill with server-provided data if we can
|
||||||
@@ -67,133 +54,68 @@ class HTTPClient(object):
|
|||||||
else:
|
else:
|
||||||
raise Error(**args)
|
raise Error(**args)
|
||||||
|
|
||||||
def _req_generator(self, url, params):
|
|
||||||
"""
|
|
||||||
Like self._req(), but runs the perform in a separate thread.
|
|
||||||
It returns a generator that spits out arbitrary-sized chunks
|
|
||||||
of the resulting data, instead of using the WRITEFUNCTION
|
|
||||||
callback.
|
|
||||||
"""
|
|
||||||
self._setup_url(url, params)
|
|
||||||
self._status = None
|
|
||||||
error_body = ""
|
|
||||||
self._headers = ""
|
|
||||||
def header_callback(data):
|
|
||||||
if self._status is None:
|
|
||||||
self._status = int(data.split(" ")[1])
|
|
||||||
self._headers += data
|
|
||||||
self.curl.setopt(pycurl.HEADERFUNCTION, header_callback)
|
|
||||||
def func(callback):
|
|
||||||
self.curl.setopt(pycurl.WRITEFUNCTION, callback)
|
|
||||||
self.curl.perform()
|
|
||||||
try:
|
|
||||||
with nilmdb.utils.Iteratorizer(func, curl_hack = True) as it:
|
|
||||||
for i in it:
|
|
||||||
if self._status == 200:
|
|
||||||
# If we had a 200 response, yield the data to caller.
|
|
||||||
yield i
|
|
||||||
else:
|
|
||||||
# Otherwise, collect it into an error string.
|
|
||||||
error_body += i
|
|
||||||
except pycurl.error as e:
|
|
||||||
raise ServerError(status = "502 Error",
|
|
||||||
url = self.url,
|
|
||||||
message = e[1])
|
|
||||||
# Raise an exception if there was an error
|
|
||||||
self._check_error(error_body)
|
|
||||||
|
|
||||||
def _req(self, url, params):
|
|
||||||
"""
|
|
||||||
GET or POST that returns raw data. Returns the body
|
|
||||||
data as a string, or raises an error if it contained an error.
|
|
||||||
"""
|
|
||||||
self._setup_url(url, params)
|
|
||||||
body = cStringIO.StringIO()
|
|
||||||
self.curl.setopt(pycurl.WRITEFUNCTION, body.write)
|
|
||||||
self._headers = ""
|
|
||||||
def header_callback(data):
|
|
||||||
self._headers += data
|
|
||||||
self.curl.setopt(pycurl.HEADERFUNCTION, header_callback)
|
|
||||||
try:
|
|
||||||
self.curl.perform()
|
|
||||||
except pycurl.error as e:
|
|
||||||
raise ServerError(status = "502 Error",
|
|
||||||
url = self.url,
|
|
||||||
message = e[1])
|
|
||||||
body_str = body.getvalue()
|
|
||||||
# Raise an exception if there was an error
|
|
||||||
self._check_error(body_str)
|
|
||||||
return body_str
|
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.curl.close()
|
self.session.close()
|
||||||
|
|
||||||
def _iterate_lines(self, it):
|
def _do_req(self, method, url, query_data, body_data, stream):
|
||||||
|
url = urlparse.urljoin(self.baseurl, url)
|
||||||
|
try:
|
||||||
|
response = self.session.request(method, url,
|
||||||
|
params = query_data,
|
||||||
|
data = body_data,
|
||||||
|
stream = stream)
|
||||||
|
except requests.RequestException as e:
|
||||||
|
raise ServerError(status = "502 Error", url = url,
|
||||||
|
message = str(e.message))
|
||||||
|
if response.status_code != 200:
|
||||||
|
self._handle_error(url, response.status_code, response.content)
|
||||||
|
self._last_response = response
|
||||||
|
if response.headers["content-type"] in ("application/json",
|
||||||
|
"application/x-json-stream"):
|
||||||
|
return (response, True)
|
||||||
|
else:
|
||||||
|
return (response, False)
|
||||||
|
|
||||||
|
# Normal versions that return data directly
|
||||||
|
def _req(self, method, url, query = None, body = None):
|
||||||
"""
|
"""
|
||||||
Given an iterator that returns arbitrarily-sized chunks
|
Make a request and return the body data as a string or parsed
|
||||||
of data, return '\n'-delimited lines of text
|
JSON object, or raise an error if it contained an error.
|
||||||
"""
|
"""
|
||||||
partial = ""
|
(response, isjson) = self._do_req(method, url, query, body, False)
|
||||||
for chunk in it:
|
if isjson:
|
||||||
partial += chunk
|
return json.loads(response.content)
|
||||||
lines = partial.split("\n")
|
return response.content
|
||||||
for line in lines[0:-1]:
|
|
||||||
yield line
|
|
||||||
partial = lines[-1]
|
|
||||||
if partial != "":
|
|
||||||
yield partial
|
|
||||||
|
|
||||||
# Non-generator versions
|
def get(self, url, params = None):
|
||||||
def _doreq(self, url, params, retjson):
|
"""Simple GET (parameters in URL)"""
|
||||||
|
return self._req("GET", url, params, None)
|
||||||
|
|
||||||
|
def post(self, url, params = None):
|
||||||
|
"""Simple POST (parameters in body)"""
|
||||||
|
return self._req("POST", url, None, params)
|
||||||
|
|
||||||
|
def put(self, url, data, params = None):
|
||||||
|
"""Simple PUT (parameters in URL, data in body)"""
|
||||||
|
return self._req("PUT", url, params, data)
|
||||||
|
|
||||||
|
# Generator versions that return data one line at a time.
|
||||||
|
def _req_gen(self, method, url, query = None, body = None):
|
||||||
"""
|
"""
|
||||||
Perform a request, and return the body.
|
Make a request and return a generator that gives back strings
|
||||||
|
or JSON decoded lines of the body data, or raise an error if
|
||||||
url: URL to request (relative to baseurl)
|
it contained an eror.
|
||||||
params: dictionary of query parameters
|
|
||||||
retjson: expect JSON and return python objects instead of string
|
|
||||||
"""
|
"""
|
||||||
out = self._req(url, params)
|
(response, isjson) = self._do_req(method, url, query, body, True)
|
||||||
if retjson:
|
for line in response.iter_lines():
|
||||||
return json.loads(out)
|
if isjson:
|
||||||
return out
|
|
||||||
|
|
||||||
def get(self, url, params = None, retjson = True):
|
|
||||||
"""Simple GET"""
|
|
||||||
self.curl.setopt(pycurl.UPLOAD, 0)
|
|
||||||
return self._doreq(url, params, retjson)
|
|
||||||
|
|
||||||
def put(self, url, postdata, params = None, retjson = True):
|
|
||||||
"""Simple PUT"""
|
|
||||||
self.curl.setopt(pycurl.UPLOAD, 1)
|
|
||||||
self._setup_url(url, params)
|
|
||||||
data = cStringIO.StringIO(postdata)
|
|
||||||
self.curl.setopt(pycurl.READFUNCTION, data.read)
|
|
||||||
return self._doreq(url, params, retjson)
|
|
||||||
|
|
||||||
# Generator versions
|
|
||||||
def _doreq_gen(self, url, params, retjson):
|
|
||||||
"""
|
|
||||||
Perform a request, and return lines of the body in a generator.
|
|
||||||
|
|
||||||
url: URL to request (relative to baseurl)
|
|
||||||
params: dictionary of query parameters
|
|
||||||
retjson: expect JSON and yield python objects instead of strings
|
|
||||||
"""
|
|
||||||
for line in self._iterate_lines(self._req_generator(url, params)):
|
|
||||||
if retjson:
|
|
||||||
yield json.loads(line)
|
yield json.loads(line)
|
||||||
else:
|
else:
|
||||||
yield line
|
yield line
|
||||||
|
|
||||||
def get_gen(self, url, params = None, retjson = True):
|
def get_gen(self, url, params = None):
|
||||||
"""Simple GET, returning a generator"""
|
"""Simple GET (parameters in URL) returning a generator"""
|
||||||
self.curl.setopt(pycurl.UPLOAD, 0)
|
return self._req_gen("GET", url, params)
|
||||||
return self._doreq_gen(url, params, retjson)
|
|
||||||
|
|
||||||
def put_gen(self, url, postdata, params = None, retjson = True):
|
# Not much use for a POST or PUT generator, since they don't
|
||||||
"""Simple PUT, returning a generator"""
|
# return much data.
|
||||||
self.curl.setopt(pycurl.UPLOAD, 1)
|
|
||||||
self._setup_url(url, params)
|
|
||||||
data = cStringIO.StringIO(postdata)
|
|
||||||
self.curl.setopt(pycurl.READFUNCTION, data.read)
|
|
||||||
return self._doreq_gen(url, params, retjson)
|
|
||||||
|
@@ -1,3 +1,3 @@
|
|||||||
"""nilmdb.cmdline"""
|
"""nilmdb.cmdline"""
|
||||||
|
|
||||||
from .cmdline import Cmdline
|
from nilmdb.cmdline.cmdline import Cmdline
|
||||||
|
@@ -3,22 +3,19 @@
|
|||||||
import nilmdb
|
import nilmdb
|
||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
from nilmdb.utils import datetime_tz
|
from nilmdb.utils import datetime_tz
|
||||||
|
import nilmdb.utils.time
|
||||||
|
|
||||||
import dateutil.parser
|
|
||||||
import sys
|
import sys
|
||||||
import re
|
import os
|
||||||
import argparse
|
import argparse
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||||
|
|
||||||
version = "1.0"
|
|
||||||
|
|
||||||
# Valid subcommands. Defined in separate files just to break
|
# Valid subcommands. Defined in separate files just to break
|
||||||
# things up -- they're still called with Cmdline as self.
|
# things up -- they're still called with Cmdline as self.
|
||||||
subcommands = [ "info", "create", "list", "metadata", "insert", "extract",
|
subcommands = [ "help", "info", "create", "list", "metadata",
|
||||||
"remove", "destroy" ]
|
"insert", "extract", "remove", "destroy" ]
|
||||||
|
|
||||||
# Import the subcommand modules. Equivalent way of doing this would be
|
# Import the subcommand modules
|
||||||
# from . import info as cmd_info
|
|
||||||
subcmd_mods = {}
|
subcmd_mods = {}
|
||||||
for cmd in subcommands:
|
for cmd in subcommands:
|
||||||
subcmd_mods[cmd] = __import__("nilmdb.cmdline." + cmd, fromlist = [ cmd ])
|
subcmd_mods[cmd] = __import__("nilmdb.cmdline." + cmd, fromlist = [ cmd ])
|
||||||
@@ -30,74 +27,21 @@ class JimArgumentParser(argparse.ArgumentParser):
|
|||||||
|
|
||||||
class Cmdline(object):
|
class Cmdline(object):
|
||||||
|
|
||||||
def __init__(self, argv):
|
def __init__(self, argv = None):
|
||||||
self.argv = argv
|
self.argv = argv or sys.argv[1:]
|
||||||
self.client = None
|
self.client = None
|
||||||
|
self.def_url = os.environ.get("NILMDB_URL", "http://localhost:12380")
|
||||||
|
self.subcmd = {}
|
||||||
|
|
||||||
def arg_time(self, toparse):
|
def arg_time(self, toparse):
|
||||||
"""Parse a time string argument"""
|
"""Parse a time string argument"""
|
||||||
try:
|
try:
|
||||||
return self.parse_time(toparse).totimestamp()
|
return nilmdb.utils.time.parse_time(toparse).totimestamp()
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise argparse.ArgumentTypeError(sprintf("%s \"%s\"",
|
raise argparse.ArgumentTypeError(sprintf("%s \"%s\"",
|
||||||
str(e), toparse))
|
str(e), toparse))
|
||||||
|
|
||||||
def parse_time(self, toparse):
|
|
||||||
"""
|
|
||||||
Parse a free-form time string and return a datetime_tz object.
|
|
||||||
If the string doesn't contain a timestamp, the current local
|
|
||||||
timezone is assumed (e.g. from the TZ env var).
|
|
||||||
"""
|
|
||||||
# If string isn't "now" and doesn't contain at least 4 digits,
|
|
||||||
# consider it invalid. smartparse might otherwise accept
|
|
||||||
# empty strings and strings with just separators.
|
|
||||||
if toparse != "now" and len(re.findall(r"\d", toparse)) < 4:
|
|
||||||
raise ValueError("not enough digits for a timestamp")
|
|
||||||
|
|
||||||
# Try to just parse the time as given
|
|
||||||
try:
|
|
||||||
return datetime_tz.datetime_tz.smartparse(toparse)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Try to extract a substring in a condensed format that we expect
|
|
||||||
# to see in a filename or header comment
|
|
||||||
res = re.search(r"(^|[^\d])(" # non-numeric or SOL
|
|
||||||
r"(199\d|2\d\d\d)" # year
|
|
||||||
r"[-/]?" # separator
|
|
||||||
r"(0[1-9]|1[012])" # month
|
|
||||||
r"[-/]?" # separator
|
|
||||||
r"([012]\d|3[01])" # day
|
|
||||||
r"[-T ]?" # separator
|
|
||||||
r"([01]\d|2[0-3])" # hour
|
|
||||||
r"[:]?" # separator
|
|
||||||
r"([0-5]\d)" # minute
|
|
||||||
r"[:]?" # separator
|
|
||||||
r"([0-5]\d)?" # second
|
|
||||||
r"([-+]\d\d\d\d)?" # timezone
|
|
||||||
r")", toparse)
|
|
||||||
if res is not None:
|
|
||||||
try:
|
|
||||||
return datetime_tz.datetime_tz.smartparse(res.group(2))
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Could also try to successively parse substrings, but let's
|
|
||||||
# just give up for now.
|
|
||||||
raise ValueError("unable to parse timestamp")
|
|
||||||
|
|
||||||
def time_string(self, timestamp):
|
|
||||||
"""
|
|
||||||
Convert a Unix timestamp to a string for printing, using the
|
|
||||||
local timezone for display (e.g. from the TZ env var).
|
|
||||||
"""
|
|
||||||
dt = datetime_tz.datetime_tz.fromtimestamp(timestamp)
|
|
||||||
return dt.strftime("%a, %d %b %Y %H:%M:%S.%f %z")
|
|
||||||
|
|
||||||
def parser_setup(self):
|
def parser_setup(self):
|
||||||
version_string = sprintf("nilmtool %s, client library %s",
|
|
||||||
version, nilmdb.Client.client_version)
|
|
||||||
|
|
||||||
self.parser = JimArgumentParser(add_help = False,
|
self.parser = JimArgumentParser(add_help = False,
|
||||||
formatter_class = def_form)
|
formatter_class = def_form)
|
||||||
|
|
||||||
@@ -105,22 +49,21 @@ class Cmdline(object):
|
|||||||
group.add_argument("-h", "--help", action='help',
|
group.add_argument("-h", "--help", action='help',
|
||||||
help='show this help message and exit')
|
help='show this help message and exit')
|
||||||
group.add_argument("-V", "--version", action="version",
|
group.add_argument("-V", "--version", action="version",
|
||||||
version=version_string)
|
version = nilmdb.__version__)
|
||||||
|
|
||||||
group = self.parser.add_argument_group("Server")
|
group = self.parser.add_argument_group("Server")
|
||||||
group.add_argument("-u", "--url", action="store",
|
group.add_argument("-u", "--url", action="store",
|
||||||
default="http://localhost:12380/",
|
default=self.def_url,
|
||||||
help="NilmDB server URL (default: %(default)s)")
|
help="NilmDB server URL (default: %(default)s)")
|
||||||
|
|
||||||
sub = self.parser.add_subparsers(title="Commands",
|
sub = self.parser.add_subparsers(
|
||||||
dest="command",
|
title="Commands", dest="command",
|
||||||
description="Specify --help after "
|
description="Use 'help command' or 'command --help' for more "
|
||||||
"the command for command-specific "
|
"details on a particular command.")
|
||||||
"options.")
|
|
||||||
|
|
||||||
# Set up subcommands (defined in separate files)
|
# Set up subcommands (defined in separate files)
|
||||||
for cmd in subcommands:
|
for cmd in subcommands:
|
||||||
subcmd_mods[cmd].setup(self, sub)
|
self.subcmd[cmd] = subcmd_mods[cmd].setup(self, sub)
|
||||||
|
|
||||||
def die(self, formatstr, *args):
|
def die(self, formatstr, *args):
|
||||||
fprintf(sys.stderr, formatstr + "\n", *args)
|
fprintf(sys.stderr, formatstr + "\n", *args)
|
||||||
@@ -143,11 +86,13 @@ class Cmdline(object):
|
|||||||
|
|
||||||
self.client = nilmdb.Client(self.args.url)
|
self.client = nilmdb.Client(self.args.url)
|
||||||
|
|
||||||
# Make a test connection to make sure things work
|
# Make a test connection to make sure things work,
|
||||||
try:
|
# unless the particular command requests that we don't.
|
||||||
server_version = self.client.version()
|
if "no_test_connect" not in self.args:
|
||||||
except nilmdb.client.Error as e:
|
try:
|
||||||
self.die("error connecting to server: %s", str(e))
|
server_version = self.client.version()
|
||||||
|
except nilmdb.client.Error as e:
|
||||||
|
self.die("error connecting to server: %s", str(e))
|
||||||
|
|
||||||
# Now dispatch client request to appropriate function. Parser
|
# Now dispatch client request to appropriate function. Parser
|
||||||
# should have ensured that we don't have any unknown commands
|
# should have ensured that we don't have any unknown commands
|
||||||
|
@@ -1,9 +1,7 @@
|
|||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
import nilmdb
|
import nilmdb
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
import textwrap
|
|
||||||
|
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
|
||||||
from argparse import RawDescriptionHelpFormatter as raw_form
|
from argparse import RawDescriptionHelpFormatter as raw_form
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
@@ -28,6 +26,7 @@ Layout types are of the format: type_count
|
|||||||
help="Path (in database) of new stream, e.g. /foo/bar")
|
help="Path (in database) of new stream, e.g. /foo/bar")
|
||||||
group.add_argument("layout",
|
group.add_argument("layout",
|
||||||
help="Layout type for new stream, e.g. float32_8")
|
help="Layout type for new stream, e.g. float32_8")
|
||||||
|
return cmd
|
||||||
|
|
||||||
def cmd_create(self):
|
def cmd_create(self):
|
||||||
"""Create new stream"""
|
"""Create new stream"""
|
||||||
|
@@ -16,6 +16,7 @@ def setup(self, sub):
|
|||||||
group = cmd.add_argument_group("Required arguments")
|
group = cmd.add_argument_group("Required arguments")
|
||||||
group.add_argument("path",
|
group.add_argument("path",
|
||||||
help="Path of the stream to delete, e.g. /foo/bar")
|
help="Path of the stream to delete, e.g. /foo/bar")
|
||||||
|
return cmd
|
||||||
|
|
||||||
def cmd_destroy(self):
|
def cmd_destroy(self):
|
||||||
"""Destroy stream"""
|
"""Destroy stream"""
|
||||||
|
@@ -1,8 +1,6 @@
|
|||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
import nilmdb
|
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
import sys
|
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("extract", help="Extract data",
|
cmd = sub.add_parser("extract", help="Extract data",
|
||||||
@@ -32,6 +30,7 @@ def setup(self, sub):
|
|||||||
help="Show raw timestamps in annotated information")
|
help="Show raw timestamps in annotated information")
|
||||||
group.add_argument("-c", "--count", action="store_true",
|
group.add_argument("-c", "--count", action="store_true",
|
||||||
help="Just output a count of matched data points")
|
help="Just output a count of matched data points")
|
||||||
|
return cmd
|
||||||
|
|
||||||
def cmd_extract_verify(self):
|
def cmd_extract_verify(self):
|
||||||
if self.args.start is not None and self.args.end is not None:
|
if self.args.start is not None and self.args.end is not None:
|
||||||
@@ -47,7 +46,7 @@ def cmd_extract(self):
|
|||||||
if self.args.timestamp_raw:
|
if self.args.timestamp_raw:
|
||||||
time_string = repr
|
time_string = repr
|
||||||
else:
|
else:
|
||||||
time_string = self.time_string
|
time_string = nilmdb.utils.time.format_time
|
||||||
|
|
||||||
if self.args.annotate:
|
if self.args.annotate:
|
||||||
printf("# path: %s\n", self.args.path)
|
printf("# path: %s\n", self.args.path)
|
||||||
|
26
nilmdb/cmdline/help.py
Normal file
26
nilmdb/cmdline/help.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def setup(self, sub):
|
||||||
|
cmd = sub.add_parser("help", help="Show detailed help for a command",
|
||||||
|
description="""
|
||||||
|
Show help for a command. 'help command' is
|
||||||
|
the same as 'command --help'.
|
||||||
|
""")
|
||||||
|
cmd.set_defaults(handler = cmd_help)
|
||||||
|
cmd.set_defaults(no_test_connect = True)
|
||||||
|
cmd.add_argument("command", nargs="?",
|
||||||
|
help="Command to get help about")
|
||||||
|
cmd.add_argument("rest", nargs=argparse.REMAINDER,
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
def cmd_help(self):
|
||||||
|
if self.args.command in self.subcmd:
|
||||||
|
self.subcmd[self.args.command].print_help()
|
||||||
|
else:
|
||||||
|
self.parser.print_help()
|
||||||
|
|
||||||
|
return
|
@@ -1,4 +1,6 @@
|
|||||||
|
import nilmdb
|
||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
|
from nilmdb.utils import human_size
|
||||||
|
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||||
|
|
||||||
@@ -10,11 +12,14 @@ def setup(self, sub):
|
|||||||
version.
|
version.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(handler = cmd_info)
|
cmd.set_defaults(handler = cmd_info)
|
||||||
|
return cmd
|
||||||
|
|
||||||
def cmd_info(self):
|
def cmd_info(self):
|
||||||
"""Print info about the server"""
|
"""Print info about the server"""
|
||||||
printf("Client library version: %s\n", self.client.client_version)
|
printf("Client version: %s\n", nilmdb.__version__)
|
||||||
printf("Server version: %s\n", self.client.version())
|
printf("Server version: %s\n", self.client.version())
|
||||||
printf("Server URL: %s\n", self.client.geturl())
|
printf("Server URL: %s\n", self.client.geturl())
|
||||||
printf("Server database path: %s\n", self.client.dbpath())
|
dbinfo = self.client.dbinfo()
|
||||||
printf("Server database size: %s\n", self.client.dbsize())
|
printf("Server database path: %s\n", dbinfo["path"])
|
||||||
|
printf("Server database size: %s\n", human_size(dbinfo["size"]))
|
||||||
|
printf("Server database free space: %s\n", human_size(dbinfo["free"]))
|
||||||
|
@@ -2,6 +2,7 @@ from nilmdb.utils.printf import *
|
|||||||
import nilmdb
|
import nilmdb
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
import nilmdb.utils.timestamper as timestamper
|
import nilmdb.utils.timestamper as timestamper
|
||||||
|
import nilmdb.utils.time
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -46,6 +47,7 @@ def setup(self, sub):
|
|||||||
help="Path of stream, e.g. /foo/bar")
|
help="Path of stream, e.g. /foo/bar")
|
||||||
group.add_argument("file", nargs="*", default=['-'],
|
group.add_argument("file", nargs="*", default=['-'],
|
||||||
help="File(s) to insert (default: - (stdin))")
|
help="File(s) to insert (default: - (stdin))")
|
||||||
|
return cmd
|
||||||
|
|
||||||
def cmd_insert(self):
|
def cmd_insert(self):
|
||||||
# Find requested stream
|
# Find requested stream
|
||||||
@@ -53,8 +55,6 @@ def cmd_insert(self):
|
|||||||
if len(streams) != 1:
|
if len(streams) != 1:
|
||||||
self.die("error getting stream info for path %s", self.args.path)
|
self.die("error getting stream info for path %s", self.args.path)
|
||||||
|
|
||||||
layout = streams[0][1]
|
|
||||||
|
|
||||||
if self.args.start and len(self.args.file) != 1:
|
if self.args.start and len(self.args.file) != 1:
|
||||||
self.die("error: --start can only be used with one input file")
|
self.die("error: --start can only be used with one input file")
|
||||||
|
|
||||||
@@ -75,7 +75,7 @@ def cmd_insert(self):
|
|||||||
start = self.args.start
|
start = self.args.start
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
start = self.parse_time(filename)
|
start = nilmdb.utils.time.parse_time(filename)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
self.die("error extracting time from filename '%s'",
|
self.die("error extracting time from filename '%s'",
|
||||||
filename)
|
filename)
|
||||||
@@ -93,7 +93,7 @@ def cmd_insert(self):
|
|||||||
|
|
||||||
# Insert the data
|
# Insert the data
|
||||||
try:
|
try:
|
||||||
result = self.client.stream_insert(self.args.path, ts)
|
self.client.stream_insert(self.args.path, ts)
|
||||||
except nilmdb.client.Error as e:
|
except nilmdb.client.Error as e:
|
||||||
# TODO: It would be nice to be able to offer better errors
|
# TODO: It would be nice to be able to offer better errors
|
||||||
# here, particularly in the case of overlap, which just shows
|
# here, particularly in the case of overlap, which just shows
|
||||||
|
@@ -1,6 +1,5 @@
|
|||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
import nilmdb
|
import nilmdb.utils.time
|
||||||
import nilmdb.client
|
|
||||||
|
|
||||||
import fnmatch
|
import fnmatch
|
||||||
import argparse
|
import argparse
|
||||||
@@ -25,11 +24,13 @@ def setup(self, sub):
|
|||||||
group.add_argument("-l", "--layout", default="*",
|
group.add_argument("-l", "--layout", default="*",
|
||||||
help="Match only this stream layout")
|
help="Match only this stream layout")
|
||||||
|
|
||||||
|
group = cmd.add_argument_group("Interval extent")
|
||||||
|
group.add_argument("-E", "--extent", action="store_true",
|
||||||
|
help="Show min/max timestamps in this stream")
|
||||||
|
|
||||||
group = cmd.add_argument_group("Interval details")
|
group = cmd.add_argument_group("Interval details")
|
||||||
group.add_argument("-d", "--detail", action="store_true",
|
group.add_argument("-d", "--detail", action="store_true",
|
||||||
help="Show available data time intervals")
|
help="Show available data time intervals")
|
||||||
group.add_argument("-T", "--timestamp-raw", action="store_true",
|
|
||||||
help="Show raw timestamps in time intervals")
|
|
||||||
group.add_argument("-s", "--start",
|
group.add_argument("-s", "--start",
|
||||||
metavar="TIME", type=self.arg_time,
|
metavar="TIME", type=self.arg_time,
|
||||||
help="Starting timestamp (free-form, inclusive)")
|
help="Starting timestamp (free-form, inclusive)")
|
||||||
@@ -37,6 +38,12 @@ def setup(self, sub):
|
|||||||
metavar="TIME", type=self.arg_time,
|
metavar="TIME", type=self.arg_time,
|
||||||
help="Ending timestamp (free-form, noninclusive)")
|
help="Ending timestamp (free-form, noninclusive)")
|
||||||
|
|
||||||
|
group = cmd.add_argument_group("Misc options")
|
||||||
|
group.add_argument("-T", "--timestamp-raw", action="store_true",
|
||||||
|
help="Show raw timestamps in time intervals or extents")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
|
||||||
def cmd_list_verify(self):
|
def cmd_list_verify(self):
|
||||||
# A hidden "path_positional" argument lets the user leave off the
|
# A hidden "path_positional" argument lets the user leave off the
|
||||||
# "-p" when specifying the path. Handle it here.
|
# "-p" when specifying the path. Handle it here.
|
||||||
@@ -49,31 +56,41 @@ def cmd_list_verify(self):
|
|||||||
self.args.path = self.args.path_positional
|
self.args.path = self.args.path_positional
|
||||||
|
|
||||||
if self.args.start is not None and self.args.end is not None:
|
if self.args.start is not None and self.args.end is not None:
|
||||||
if self.args.start > self.args.end:
|
if self.args.start >= self.args.end:
|
||||||
self.parser.error("start is after end")
|
self.parser.error("start must precede end")
|
||||||
|
|
||||||
|
if self.args.start is not None or self.args.end is not None:
|
||||||
|
if not self.args.detail:
|
||||||
|
self.parser.error("--start and --end only make sense with --detail")
|
||||||
|
|
||||||
def cmd_list(self):
|
def cmd_list(self):
|
||||||
"""List available streams"""
|
"""List available streams"""
|
||||||
streams = self.client.stream_list()
|
streams = self.client.stream_list(extent = True)
|
||||||
|
|
||||||
if self.args.timestamp_raw:
|
if self.args.timestamp_raw:
|
||||||
time_string = repr
|
time_string = repr
|
||||||
else:
|
else:
|
||||||
time_string = self.time_string
|
time_string = nilmdb.utils.time.format_time
|
||||||
|
|
||||||
for (path, layout) in streams:
|
for (path, layout, extent_min, extent_max) in streams:
|
||||||
if not (fnmatch.fnmatch(path, self.args.path) and
|
if not (fnmatch.fnmatch(path, self.args.path) and
|
||||||
fnmatch.fnmatch(layout, self.args.layout)):
|
fnmatch.fnmatch(layout, self.args.layout)):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
printf("%s %s\n", path, layout)
|
printf("%s %s\n", path, layout)
|
||||||
if not self.args.detail:
|
|
||||||
continue
|
|
||||||
|
|
||||||
printed = False
|
if self.args.extent:
|
||||||
for (start, end) in self.client.stream_intervals(path, self.args.start,
|
if extent_min is None or extent_max is None:
|
||||||
self.args.end):
|
printf(" extent: (no data)\n")
|
||||||
printf(" [ %s -> %s ]\n", time_string(start), time_string(end))
|
else:
|
||||||
printed = True
|
printf(" extent: %s -> %s\n",
|
||||||
if not printed:
|
time_string(extent_min), time_string(extent_max))
|
||||||
printf(" (no intervals)\n")
|
|
||||||
|
if self.args.detail:
|
||||||
|
printed = False
|
||||||
|
for (start, end) in self.client.stream_intervals(
|
||||||
|
path, self.args.start, self.args.end):
|
||||||
|
printf(" [ %s -> %s ]\n", time_string(start), time_string(end))
|
||||||
|
printed = True
|
||||||
|
if not printed:
|
||||||
|
printf(" (no intervals)\n")
|
||||||
|
@@ -26,6 +26,7 @@ def setup(self, sub):
|
|||||||
exc.add_argument("-u", "--update", nargs="+", metavar="key=value",
|
exc.add_argument("-u", "--update", nargs="+", metavar="key=value",
|
||||||
help="Update metadata using provided "
|
help="Update metadata using provided "
|
||||||
"key=value pairs")
|
"key=value pairs")
|
||||||
|
return cmd
|
||||||
|
|
||||||
def cmd_metadata(self):
|
def cmd_metadata(self):
|
||||||
"""Manipulate metadata"""
|
"""Manipulate metadata"""
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
import nilmdb
|
import nilmdb
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
import sys
|
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("remove", help="Remove data",
|
cmd = sub.add_parser("remove", help="Remove data",
|
||||||
@@ -9,8 +8,7 @@ def setup(self, sub):
|
|||||||
Remove all data from a specified time range within a
|
Remove all data from a specified time range within a
|
||||||
stream.
|
stream.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(verify = cmd_remove_verify,
|
cmd.set_defaults(handler = cmd_remove)
|
||||||
handler = cmd_remove)
|
|
||||||
|
|
||||||
group = cmd.add_argument_group("Data selection")
|
group = cmd.add_argument_group("Data selection")
|
||||||
group.add_argument("path",
|
group.add_argument("path",
|
||||||
@@ -25,11 +23,7 @@ def setup(self, sub):
|
|||||||
group = cmd.add_argument_group("Output format")
|
group = cmd.add_argument_group("Output format")
|
||||||
group.add_argument("-c", "--count", action="store_true",
|
group.add_argument("-c", "--count", action="store_true",
|
||||||
help="Output number of data points removed")
|
help="Output number of data points removed")
|
||||||
|
return cmd
|
||||||
def cmd_remove_verify(self):
|
|
||||||
if self.args.start is not None and self.args.end is not None:
|
|
||||||
if self.args.start > self.args.end:
|
|
||||||
self.parser.error("start is after end")
|
|
||||||
|
|
||||||
def cmd_remove(self):
|
def cmd_remove(self):
|
||||||
try:
|
try:
|
||||||
|
1
nilmdb/scripts/__init__.py
Normal file
1
nilmdb/scripts/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Command line scripts
|
86
nilmdb/scripts/nilmdb_server.py
Executable file
86
nilmdb/scripts/nilmdb_server.py
Executable file
@@ -0,0 +1,86 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
import nilmdb.server
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point for the 'nilmdb-server' command line script"""
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description = 'Run the NilmDB server',
|
||||||
|
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
|
||||||
|
|
||||||
|
parser.add_argument("-V", "--version", action="version",
|
||||||
|
version = nilmdb.__version__)
|
||||||
|
|
||||||
|
group = parser.add_argument_group("Standard options")
|
||||||
|
group.add_argument('-a', '--address',
|
||||||
|
help = 'Only listen on the given address',
|
||||||
|
default = '0.0.0.0')
|
||||||
|
group.add_argument('-p', '--port', help = 'Listen on the given port',
|
||||||
|
type = int, default = 12380)
|
||||||
|
group.add_argument('-d', '--database', help = 'Database directory',
|
||||||
|
default = os.path.join(os.getcwd(), "db"))
|
||||||
|
group.add_argument('-q', '--quiet', help = 'Silence output',
|
||||||
|
action = 'store_true')
|
||||||
|
group.add_argument('-n', '--nosync', help = 'Use asynchronous '
|
||||||
|
'commits for sqlite transactions',
|
||||||
|
action = 'store_true', default = False)
|
||||||
|
|
||||||
|
group = parser.add_argument_group("Debug options")
|
||||||
|
group.add_argument('-y', '--yappi', help = 'Run under yappi profiler and '
|
||||||
|
'invoke interactive shell afterwards',
|
||||||
|
action = 'store_true')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Create database object. Needs to be serialized before passing
|
||||||
|
# to the Server.
|
||||||
|
db = nilmdb.utils.serializer_proxy(nilmdb.NilmDB)(args.database,
|
||||||
|
sync = not args.nosync)
|
||||||
|
|
||||||
|
# Configure the server
|
||||||
|
if args.quiet:
|
||||||
|
embedded = True
|
||||||
|
else:
|
||||||
|
embedded = False
|
||||||
|
server = nilmdb.server.Server(db,
|
||||||
|
host = args.address,
|
||||||
|
port = args.port,
|
||||||
|
embedded = embedded)
|
||||||
|
|
||||||
|
# Print info
|
||||||
|
if not args.quiet:
|
||||||
|
print "Database: %s" % (os.path.realpath(args.database))
|
||||||
|
if args.address == '0.0.0.0' or args.address == '::':
|
||||||
|
host = socket.getfqdn()
|
||||||
|
else:
|
||||||
|
host = args.address
|
||||||
|
print "Server URL: http://%s:%d/" % ( host, args.port)
|
||||||
|
print "----"
|
||||||
|
|
||||||
|
# Run it
|
||||||
|
if args.yappi:
|
||||||
|
print "Running in yappi"
|
||||||
|
try:
|
||||||
|
import yappi
|
||||||
|
yappi.start()
|
||||||
|
server.start(blocking = True)
|
||||||
|
finally:
|
||||||
|
yappi.stop()
|
||||||
|
yappi.print_stats(sort_type = yappi.SORTTYPE_TTOT, limit = 50)
|
||||||
|
from IPython import embed
|
||||||
|
embed(header = "Use the yappi object to explore further, "
|
||||||
|
"quit to exit")
|
||||||
|
else:
|
||||||
|
server.start(blocking = True)
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
if not args.quiet:
|
||||||
|
print "Closing database"
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
10
nilmdb/scripts/nilmtool.py
Executable file
10
nilmdb/scripts/nilmtool.py
Executable file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
import nilmdb.cmdline
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point for the 'nilmtool' command line script"""
|
||||||
|
nilmdb.cmdline.Cmdline().run()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -1,15 +1,22 @@
|
|||||||
"""nilmdb.server"""
|
"""nilmdb.server"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
# Try to set up pyximport to automatically rebuild Cython modules. If
|
# Try to set up pyximport to automatically rebuild Cython modules. If
|
||||||
# this doesn't work, it's OK, as long as the modules were built externally.
|
# this doesn't work, it's OK, as long as the modules were built externally.
|
||||||
# (e.g. python setup.py build_ext --inplace)
|
# (e.g. python setup.py build_ext --inplace)
|
||||||
try:
|
try: # pragma: no cover
|
||||||
|
import Cython
|
||||||
|
import distutils.version
|
||||||
|
if (distutils.version.LooseVersion(Cython.__version__) <
|
||||||
|
distutils.version.LooseVersion("0.17")): # pragma: no cover
|
||||||
|
raise ImportError("Cython version too old")
|
||||||
import pyximport
|
import pyximport
|
||||||
pyximport.install()
|
pyximport.install(inplace = True, build_in_temp = False)
|
||||||
import layout
|
except (ImportError, TypeError): # pragma: no cover
|
||||||
except: # pragma: no cover
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
from .nilmdb import NilmDB
|
import nilmdb.server.layout
|
||||||
from .server import Server
|
from nilmdb.server.nilmdb import NilmDB
|
||||||
from .errors import *
|
from nilmdb.server.server import Server
|
||||||
|
from nilmdb.server.errors import NilmDBError, StreamError, OverlapError
|
||||||
|
@@ -8,19 +8,27 @@ import nilmdb
|
|||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import cPickle as pickle
|
import cPickle as pickle
|
||||||
import struct
|
import struct
|
||||||
import fnmatch
|
|
||||||
import mmap
|
import mmap
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
# If we have the faulthandler module, use it. All of the mmap stuff
|
||||||
|
# might trigger a SIGSEGV or SIGBUS if we're not careful, and
|
||||||
|
# faulthandler will give a traceback in that case. (the Python
|
||||||
|
# interpreter will still die either way).
|
||||||
|
try: # pragma: no cover
|
||||||
|
import faulthandler
|
||||||
|
faulthandler.enable()
|
||||||
|
except: # pragma: no cover
|
||||||
|
pass
|
||||||
|
|
||||||
# Up to 256 open file descriptors at any given time.
|
# Up to 256 open file descriptors at any given time.
|
||||||
# These variables are global so they can be used in the decorator arguments.
|
# These variables are global so they can be used in the decorator arguments.
|
||||||
table_cache_size = 16
|
table_cache_size = 16
|
||||||
fd_cache_size = 16
|
fd_cache_size = 16
|
||||||
|
|
||||||
@nilmdb.utils.must_close(wrap_verify = True)
|
@nilmdb.utils.must_close(wrap_verify = False)
|
||||||
class BulkData(object):
|
class BulkData(object):
|
||||||
def __init__(self, basepath, **kwargs):
|
def __init__(self, basepath, **kwargs):
|
||||||
self.basepath = basepath
|
self.basepath = basepath
|
||||||
@@ -91,8 +99,7 @@ class BulkData(object):
|
|||||||
"float32": 'f',
|
"float32": 'f',
|
||||||
"float64": 'd',
|
"float64": 'd',
|
||||||
}
|
}
|
||||||
for n in range(layout.count):
|
struct_fmt += struct_mapping[layout.datatype] * layout.count
|
||||||
struct_fmt += struct_mapping[layout.datatype]
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise ValueError("no such layout, or bad data types")
|
raise ValueError("no such layout, or bad data types")
|
||||||
|
|
||||||
@@ -164,7 +171,65 @@ class BulkData(object):
|
|||||||
ospath = os.path.join(self.root, *elements)
|
ospath = os.path.join(self.root, *elements)
|
||||||
return Table(ospath)
|
return Table(ospath)
|
||||||
|
|
||||||
@nilmdb.utils.must_close(wrap_verify = True)
|
@nilmdb.utils.must_close(wrap_verify = False)
|
||||||
|
class File(object):
|
||||||
|
"""Object representing a single file on disk. Data can be appended,
|
||||||
|
or the self.mmap handle can be used for random reads."""
|
||||||
|
|
||||||
|
def __init__(self, root, subdir, filename):
|
||||||
|
# Create path if it doesn't exist
|
||||||
|
try:
|
||||||
|
os.mkdir(os.path.join(root, subdir))
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Open/create file
|
||||||
|
self._f = open(os.path.join(root, subdir, filename), "a+b", 0)
|
||||||
|
|
||||||
|
# Seek to end, and get size
|
||||||
|
self._f.seek(0, 2)
|
||||||
|
self.size = self._f.tell()
|
||||||
|
|
||||||
|
# Open mmap object
|
||||||
|
self.mmap = None
|
||||||
|
self._mmap_reopen()
|
||||||
|
|
||||||
|
def _mmap_reopen(self):
|
||||||
|
if self.size == 0:
|
||||||
|
# Don't mmap if the file is empty; it would fail
|
||||||
|
pass
|
||||||
|
elif self.mmap is None:
|
||||||
|
# Not opened yet, so open it
|
||||||
|
self.mmap = mmap.mmap(self._f.fileno(), 0)
|
||||||
|
else:
|
||||||
|
# Already opened, so just resize it
|
||||||
|
self.mmap.resize(self.size)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self.mmap is not None:
|
||||||
|
self.mmap.close()
|
||||||
|
self._f.close()
|
||||||
|
|
||||||
|
def append(self, data): # pragma: no cover (below version used instead)
|
||||||
|
# Write data, flush it, and resize our mmap accordingly
|
||||||
|
self._f.write(data)
|
||||||
|
self._f.flush()
|
||||||
|
self.size += len(data)
|
||||||
|
self._mmap_reopen()
|
||||||
|
|
||||||
|
def append_pack_iter(self, count, packer, dataiter):
|
||||||
|
# An optimized verison of append, to avoid flushing the file
|
||||||
|
# and resizing the mmap after each data point.
|
||||||
|
try:
|
||||||
|
for i in xrange(count):
|
||||||
|
row = dataiter.next()
|
||||||
|
self._f.write(packer(*row))
|
||||||
|
finally:
|
||||||
|
self._f.flush()
|
||||||
|
self.size = self._f.tell()
|
||||||
|
self._mmap_reopen()
|
||||||
|
|
||||||
|
@nilmdb.utils.must_close(wrap_verify = False)
|
||||||
class Table(object):
|
class Table(object):
|
||||||
"""Tools to help access a single table (data at a specific OS path)."""
|
"""Tools to help access a single table (data at a specific OS path)."""
|
||||||
# See design.md for design details
|
# See design.md for design details
|
||||||
@@ -185,12 +250,12 @@ class Table(object):
|
|||||||
packer = struct.Struct(struct_fmt)
|
packer = struct.Struct(struct_fmt)
|
||||||
rows_per_file = max(file_size // packer.size, 1)
|
rows_per_file = max(file_size // packer.size, 1)
|
||||||
|
|
||||||
format = { "rows_per_file": rows_per_file,
|
fmt = { "rows_per_file": rows_per_file,
|
||||||
"files_per_dir": files_per_dir,
|
"files_per_dir": files_per_dir,
|
||||||
"struct_fmt": struct_fmt,
|
"struct_fmt": struct_fmt,
|
||||||
"version": 1 }
|
"version": 1 }
|
||||||
with open(os.path.join(root, "_format"), "wb") as f:
|
with open(os.path.join(root, "_format"), "wb") as f:
|
||||||
pickle.dump(format, f, 2)
|
pickle.dump(fmt, f, 2)
|
||||||
|
|
||||||
# Normal methods
|
# Normal methods
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
@@ -199,22 +264,22 @@ class Table(object):
|
|||||||
|
|
||||||
# Load the format and build packer
|
# Load the format and build packer
|
||||||
with open(os.path.join(self.root, "_format"), "rb") as f:
|
with open(os.path.join(self.root, "_format"), "rb") as f:
|
||||||
format = pickle.load(f)
|
fmt = pickle.load(f)
|
||||||
|
|
||||||
if format["version"] != 1: # pragma: no cover (just future proofing)
|
if fmt["version"] != 1: # pragma: no cover (just future proofing)
|
||||||
raise NotImplementedError("version " + format["version"] +
|
raise NotImplementedError("version " + fmt["version"] +
|
||||||
" bulk data store not supported")
|
" bulk data store not supported")
|
||||||
|
|
||||||
self.rows_per_file = format["rows_per_file"]
|
self.rows_per_file = fmt["rows_per_file"]
|
||||||
self.files_per_dir = format["files_per_dir"]
|
self.files_per_dir = fmt["files_per_dir"]
|
||||||
self.packer = struct.Struct(format["struct_fmt"])
|
self.packer = struct.Struct(fmt["struct_fmt"])
|
||||||
self.file_size = self.packer.size * self.rows_per_file
|
self.file_size = self.packer.size * self.rows_per_file
|
||||||
|
|
||||||
# Find nrows
|
# Find nrows
|
||||||
self.nrows = self._get_nrows()
|
self.nrows = self._get_nrows()
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.mmap_open.cache_remove_all()
|
self.file_open.cache_remove_all()
|
||||||
|
|
||||||
# Internal helpers
|
# Internal helpers
|
||||||
def _get_nrows(self):
|
def _get_nrows(self):
|
||||||
@@ -278,37 +343,11 @@ class Table(object):
|
|||||||
|
|
||||||
# Cache open files
|
# Cache open files
|
||||||
@nilmdb.utils.lru_cache(size = fd_cache_size,
|
@nilmdb.utils.lru_cache(size = fd_cache_size,
|
||||||
keys = slice(0,3), # exclude newsize
|
onremove = lambda f: f.close())
|
||||||
onremove = lambda x: x.close())
|
def file_open(self, subdir, filename):
|
||||||
def mmap_open(self, subdir, filename, newsize = None):
|
|
||||||
"""Open and map a given 'subdir/filename' (relative to self.root).
|
"""Open and map a given 'subdir/filename' (relative to self.root).
|
||||||
Will be automatically closed when evicted from the cache.
|
Will be automatically closed when evicted from the cache."""
|
||||||
|
return File(self.root, subdir, filename)
|
||||||
If 'newsize' is provided, the file is truncated to the given
|
|
||||||
size before the mapping is returned. (Note that the LRU cache
|
|
||||||
on this function means the truncate will only happen if the
|
|
||||||
object isn't already cached; mmap.resize should be used too.)"""
|
|
||||||
try:
|
|
||||||
os.mkdir(os.path.join(self.root, subdir))
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
f = open(os.path.join(self.root, subdir, filename), "a+", 0)
|
|
||||||
if newsize is not None:
|
|
||||||
# mmap can't map a zero-length file, so this allows the
|
|
||||||
# caller to set the filesize between file creation and
|
|
||||||
# mmap.
|
|
||||||
f.truncate(newsize)
|
|
||||||
mm = mmap.mmap(f.fileno(), 0)
|
|
||||||
return mm
|
|
||||||
|
|
||||||
def mmap_open_resize(self, subdir, filename, newsize):
|
|
||||||
"""Open and map a given 'subdir/filename' (relative to self.root).
|
|
||||||
The file is resized to the given size."""
|
|
||||||
# Pass new size to mmap_open
|
|
||||||
mm = self.mmap_open(subdir, filename, newsize)
|
|
||||||
# In case we got a cached copy, need to call mm.resize too.
|
|
||||||
mm.resize(newsize)
|
|
||||||
return mm
|
|
||||||
|
|
||||||
def append(self, data):
|
def append(self, data):
|
||||||
"""Append the data and flush it to disk.
|
"""Append the data and flush it to disk.
|
||||||
@@ -320,14 +359,11 @@ class Table(object):
|
|||||||
(subdir, fname, offset, count) = self._offset_from_row(self.nrows)
|
(subdir, fname, offset, count) = self._offset_from_row(self.nrows)
|
||||||
if count > remaining:
|
if count > remaining:
|
||||||
count = remaining
|
count = remaining
|
||||||
newsize = offset + count * self.packer.size
|
|
||||||
mm = self.mmap_open_resize(subdir, fname, newsize)
|
f = self.file_open(subdir, fname)
|
||||||
mm.seek(offset)
|
|
||||||
|
|
||||||
# Write the data
|
# Write the data
|
||||||
for i in xrange(count):
|
f.append_pack_iter(count, self.packer.pack, dataiter)
|
||||||
row = dataiter.next()
|
|
||||||
mm.write(self.packer.pack(*row))
|
|
||||||
remaining -= count
|
remaining -= count
|
||||||
self.nrows += count
|
self.nrows += count
|
||||||
|
|
||||||
@@ -354,7 +390,7 @@ class Table(object):
|
|||||||
(subdir, filename, offset, count) = self._offset_from_row(row)
|
(subdir, filename, offset, count) = self._offset_from_row(row)
|
||||||
if count > remaining:
|
if count > remaining:
|
||||||
count = remaining
|
count = remaining
|
||||||
mm = self.mmap_open(subdir, filename)
|
mm = self.file_open(subdir, filename).mmap
|
||||||
for i in xrange(count):
|
for i in xrange(count):
|
||||||
ret.append(list(self.packer.unpack_from(mm, offset)))
|
ret.append(list(self.packer.unpack_from(mm, offset)))
|
||||||
offset += self.packer.size
|
offset += self.packer.size
|
||||||
@@ -366,7 +402,7 @@ class Table(object):
|
|||||||
if key < 0 or key >= self.nrows:
|
if key < 0 or key >= self.nrows:
|
||||||
raise IndexError("Index out of range")
|
raise IndexError("Index out of range")
|
||||||
(subdir, filename, offset, count) = self._offset_from_row(key)
|
(subdir, filename, offset, count) = self._offset_from_row(key)
|
||||||
mm = self.mmap_open(subdir, filename)
|
mm = self.file_open(subdir, filename).mmap
|
||||||
# unpack_from ignores the mmap object's current seek position
|
# unpack_from ignores the mmap object's current seek position
|
||||||
return list(self.packer.unpack_from(mm, offset))
|
return list(self.packer.unpack_from(mm, offset))
|
||||||
|
|
||||||
@@ -413,8 +449,8 @@ class Table(object):
|
|||||||
# are generally easier if we don't have to special-case that.
|
# are generally easier if we don't have to special-case that.
|
||||||
if (len(merged) == 1 and
|
if (len(merged) == 1 and
|
||||||
merged[0][0] == 0 and merged[0][1] == self.rows_per_file):
|
merged[0][0] == 0 and merged[0][1] == self.rows_per_file):
|
||||||
# Close potentially open file in mmap_open LRU cache
|
# Close potentially open file in file_open LRU cache
|
||||||
self.mmap_open.cache_remove(self, subdir, filename)
|
self.file_open.cache_remove(self, subdir, filename)
|
||||||
|
|
||||||
# Delete files
|
# Delete files
|
||||||
os.remove(datafile)
|
os.remove(datafile)
|
||||||
|
@@ -36,7 +36,7 @@ cdef class Interval:
|
|||||||
"""
|
"""
|
||||||
'start' and 'end' are arbitrary floats that represent time
|
'start' and 'end' are arbitrary floats that represent time
|
||||||
"""
|
"""
|
||||||
if start > end:
|
if start >= end:
|
||||||
# Explicitly disallow zero-width intervals (since they're half-open)
|
# Explicitly disallow zero-width intervals (since they're half-open)
|
||||||
raise IntervalError("start %s must precede end %s" % (start, end))
|
raise IntervalError("start %s must precede end %s" % (start, end))
|
||||||
self.start = float(start)
|
self.start = float(start)
|
||||||
|
@@ -4,7 +4,6 @@ import time
|
|||||||
import sys
|
import sys
|
||||||
import inspect
|
import inspect
|
||||||
import cStringIO
|
import cStringIO
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
cdef enum:
|
cdef enum:
|
||||||
max_value_count = 64
|
max_value_count = 64
|
||||||
@@ -42,10 +41,12 @@ class Layout:
|
|||||||
|
|
||||||
if datatype == 'uint16':
|
if datatype == 'uint16':
|
||||||
self.parse = self.parse_uint16
|
self.parse = self.parse_uint16
|
||||||
self.format = self.format_uint16
|
self.format_str = "%.6f" + " %d" * self.count
|
||||||
|
self.format = self.format_generic
|
||||||
elif datatype == 'float32' or datatype == 'float64':
|
elif datatype == 'float32' or datatype == 'float64':
|
||||||
self.parse = self.parse_float64
|
self.parse = self.parse_float64
|
||||||
self.format = self.format_float64
|
self.format_str = "%.6f" + " %f" * self.count
|
||||||
|
self.format = self.format_generic
|
||||||
else:
|
else:
|
||||||
raise KeyError("invalid type")
|
raise KeyError("invalid type")
|
||||||
|
|
||||||
@@ -57,15 +58,15 @@ class Layout:
|
|||||||
cdef double ts
|
cdef double ts
|
||||||
# Return doubles even in float32 case, since they're going into
|
# Return doubles even in float32 case, since they're going into
|
||||||
# a Python array which would upconvert to double anyway.
|
# a Python array which would upconvert to double anyway.
|
||||||
result = []
|
result = [0] * (self.count + 1)
|
||||||
cdef char *end
|
cdef char *end
|
||||||
ts = libc.stdlib.strtod(text, &end)
|
ts = libc.stdlib.strtod(text, &end)
|
||||||
if end == text:
|
if end == text:
|
||||||
raise ValueError("bad timestamp")
|
raise ValueError("bad timestamp")
|
||||||
result.append(ts)
|
result[0] = ts
|
||||||
for n in range(self.count):
|
for n in range(self.count):
|
||||||
text = end
|
text = end
|
||||||
result.append(libc.stdlib.strtod(text, &end))
|
result[n+1] = libc.stdlib.strtod(text, &end)
|
||||||
if end == text:
|
if end == text:
|
||||||
raise ValueError("wrong number of values")
|
raise ValueError("wrong number of values")
|
||||||
n = 0
|
n = 0
|
||||||
@@ -79,18 +80,18 @@ class Layout:
|
|||||||
cdef int n
|
cdef int n
|
||||||
cdef double ts
|
cdef double ts
|
||||||
cdef int v
|
cdef int v
|
||||||
result = []
|
|
||||||
cdef char *end
|
cdef char *end
|
||||||
|
result = [0] * (self.count + 1)
|
||||||
ts = libc.stdlib.strtod(text, &end)
|
ts = libc.stdlib.strtod(text, &end)
|
||||||
if end == text:
|
if end == text:
|
||||||
raise ValueError("bad timestamp")
|
raise ValueError("bad timestamp")
|
||||||
result.append(ts)
|
result[0] = ts
|
||||||
for n in range(self.count):
|
for n in range(self.count):
|
||||||
text = end
|
text = end
|
||||||
v = libc.stdlib.strtol(text, &end, 10)
|
v = libc.stdlib.strtol(text, &end, 10)
|
||||||
if v < 0 or v > 65535:
|
if v < 0 or v > 65535:
|
||||||
raise ValueError("value out of range")
|
raise ValueError("value out of range")
|
||||||
result.append(v)
|
result[n+1] = v
|
||||||
if end == text:
|
if end == text:
|
||||||
raise ValueError("wrong number of values")
|
raise ValueError("wrong number of values")
|
||||||
n = 0
|
n = 0
|
||||||
@@ -101,25 +102,12 @@ class Layout:
|
|||||||
return (ts, result)
|
return (ts, result)
|
||||||
|
|
||||||
# Formatters
|
# Formatters
|
||||||
def format_float64(self, d):
|
def format_generic(self, d):
|
||||||
n = len(d) - 1
|
n = len(d) - 1
|
||||||
if n != self.count:
|
if n != self.count:
|
||||||
raise ValueError("wrong number of values for layout type: "
|
raise ValueError("wrong number of values for layout type: "
|
||||||
"got %d, wanted %d" % (n, self.count))
|
"got %d, wanted %d" % (n, self.count))
|
||||||
s = "%.6f" % d[0]
|
return (self.format_str % tuple(d)) + "\n"
|
||||||
for i in range(n):
|
|
||||||
s += " %f" % d[i+1]
|
|
||||||
return s + "\n"
|
|
||||||
|
|
||||||
def format_uint16(self, d):
|
|
||||||
n = len(d) - 1
|
|
||||||
if n != self.count:
|
|
||||||
raise ValueError("wrong number of values for layout type: "
|
|
||||||
"got %d, wanted %d" % (n, self.count))
|
|
||||||
s = "%.6f" % d[0]
|
|
||||||
for i in range(n):
|
|
||||||
s += " %d" % d[i+1]
|
|
||||||
return s + "\n"
|
|
||||||
|
|
||||||
# Get a layout by name
|
# Get a layout by name
|
||||||
def get_named(typestring):
|
def get_named(typestring):
|
||||||
@@ -154,7 +142,7 @@ class Parser(object):
|
|||||||
layout, into an internal data structure suitable for a
|
layout, into an internal data structure suitable for a
|
||||||
pytables 'table.append(parser.data)'.
|
pytables 'table.append(parser.data)'.
|
||||||
"""
|
"""
|
||||||
cdef double last_ts = 0, ts
|
cdef double last_ts = -1e12, ts
|
||||||
cdef int n = 0, i
|
cdef int n = 0, i
|
||||||
cdef char *line
|
cdef char *line
|
||||||
|
|
||||||
|
@@ -15,11 +15,9 @@ from nilmdb.utils.printf import *
|
|||||||
from nilmdb.server.interval import (Interval, DBInterval,
|
from nilmdb.server.interval import (Interval, DBInterval,
|
||||||
IntervalSet, IntervalError)
|
IntervalSet, IntervalError)
|
||||||
from nilmdb.server import bulkdata
|
from nilmdb.server import bulkdata
|
||||||
from nilmdb.server.errors import *
|
from nilmdb.server.errors import NilmDBError, StreamError, OverlapError
|
||||||
|
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import time
|
|
||||||
import sys
|
|
||||||
import os
|
import os
|
||||||
import errno
|
import errno
|
||||||
import bisect
|
import bisect
|
||||||
@@ -80,7 +78,10 @@ class NilmDB(object):
|
|||||||
verbose = 0
|
verbose = 0
|
||||||
|
|
||||||
def __init__(self, basepath, sync=True, max_results=None,
|
def __init__(self, basepath, sync=True, max_results=None,
|
||||||
bulkdata_args={}):
|
bulkdata_args=None):
|
||||||
|
if bulkdata_args is None:
|
||||||
|
bulkdata_args = {}
|
||||||
|
|
||||||
# set up path
|
# set up path
|
||||||
self.basepath = os.path.abspath(basepath)
|
self.basepath = os.path.abspath(basepath)
|
||||||
|
|
||||||
@@ -96,12 +97,7 @@ class NilmDB(object):
|
|||||||
|
|
||||||
# SQLite database too
|
# SQLite database too
|
||||||
sqlfilename = os.path.join(self.basepath, "data.sql")
|
sqlfilename = os.path.join(self.basepath, "data.sql")
|
||||||
# We use check_same_thread = False, assuming that the rest
|
self.con = sqlite3.connect(sqlfilename, check_same_thread = True)
|
||||||
# of the code (e.g. Server) will be smart and not access this
|
|
||||||
# database from multiple threads simultaneously. Otherwise
|
|
||||||
# false positives will occur when the database is only opened
|
|
||||||
# in one thread, and only accessed in another.
|
|
||||||
self.con = sqlite3.connect(sqlfilename, check_same_thread = False)
|
|
||||||
self._sql_schema_update()
|
self._sql_schema_update()
|
||||||
|
|
||||||
# See big comment at top about the performance implications of this
|
# See big comment at top about the performance implications of this
|
||||||
@@ -141,6 +137,15 @@ class NilmDB(object):
|
|||||||
with self.con:
|
with self.con:
|
||||||
cur.execute("PRAGMA user_version = {v:d}".format(v=version))
|
cur.execute("PRAGMA user_version = {v:d}".format(v=version))
|
||||||
|
|
||||||
|
def _check_user_times(self, start, end):
|
||||||
|
if start is None:
|
||||||
|
start = -1e12
|
||||||
|
if end is None:
|
||||||
|
end = 1e12
|
||||||
|
if start >= end:
|
||||||
|
raise NilmDBError("start must precede end")
|
||||||
|
return (start, end)
|
||||||
|
|
||||||
@nilmdb.utils.lru_cache(size = 16)
|
@nilmdb.utils.lru_cache(size = 16)
|
||||||
def _get_intervals(self, stream_id):
|
def _get_intervals(self, stream_id):
|
||||||
"""
|
"""
|
||||||
@@ -156,7 +161,7 @@ class NilmDB(object):
|
|||||||
iset += DBInterval(start_time, end_time,
|
iset += DBInterval(start_time, end_time,
|
||||||
start_time, end_time,
|
start_time, end_time,
|
||||||
start_pos, end_pos)
|
start_pos, end_pos)
|
||||||
except IntervalError as e: # pragma: no cover
|
except IntervalError: # pragma: no cover
|
||||||
raise NilmDBError("unexpected overlap in ranges table!")
|
raise NilmDBError("unexpected overlap in ranges table!")
|
||||||
|
|
||||||
return iset
|
return iset
|
||||||
@@ -264,28 +269,39 @@ class NilmDB(object):
|
|||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
def stream_list(self, path = None, layout = None):
|
def stream_list(self, path = None, layout = None, extent = False):
|
||||||
"""Return list of [path, layout] lists of all streams
|
"""Return list of lists of all streams in the database.
|
||||||
in the database.
|
|
||||||
|
|
||||||
If path is specified, include only streams with a path that
|
If path is specified, include only streams with a path that
|
||||||
matches the given string.
|
matches the given string.
|
||||||
|
|
||||||
If layout is specified, include only streams with a layout
|
If layout is specified, include only streams with a layout
|
||||||
that matches the given string.
|
that matches the given string.
|
||||||
"""
|
|
||||||
where = "WHERE 1=1"
|
|
||||||
params = ()
|
|
||||||
if layout:
|
|
||||||
where += " AND layout=?"
|
|
||||||
params += (layout,)
|
|
||||||
if path:
|
|
||||||
where += " AND path=?"
|
|
||||||
params += (path,)
|
|
||||||
result = self.con.execute("SELECT path, layout "
|
|
||||||
"FROM streams " + where, params).fetchall()
|
|
||||||
|
|
||||||
return sorted(list(x) for x in result)
|
If extent = False, returns a list of lists containing
|
||||||
|
the path and layout: [ path, layout ]
|
||||||
|
|
||||||
|
If extent = True, returns a list of lists containing the
|
||||||
|
path, layout, and min/max extent of the data:
|
||||||
|
[ path, layout, extent_min, extent_max ]
|
||||||
|
"""
|
||||||
|
params = ()
|
||||||
|
query = "SELECT streams.path, streams.layout"
|
||||||
|
if extent:
|
||||||
|
query += ", min(ranges.start_time), max(ranges.end_time)"
|
||||||
|
query += " FROM streams"
|
||||||
|
if extent:
|
||||||
|
query += " LEFT JOIN ranges ON streams.id = ranges.stream_id"
|
||||||
|
query += " WHERE 1=1"
|
||||||
|
if layout is not None:
|
||||||
|
query += " AND streams.layout=?"
|
||||||
|
params += (layout,)
|
||||||
|
if path is not None:
|
||||||
|
query += " AND streams.path=?"
|
||||||
|
params += (path,)
|
||||||
|
query += " GROUP BY streams.id ORDER BY streams.path"
|
||||||
|
result = self.con.execute(query, params).fetchall()
|
||||||
|
return [ list(x) for x in result ]
|
||||||
|
|
||||||
def stream_intervals(self, path, start = None, end = None):
|
def stream_intervals(self, path, start = None, end = None):
|
||||||
"""
|
"""
|
||||||
@@ -302,7 +318,8 @@ class NilmDB(object):
|
|||||||
"""
|
"""
|
||||||
stream_id = self._stream_id(path)
|
stream_id = self._stream_id(path)
|
||||||
intervals = self._get_intervals(stream_id)
|
intervals = self._get_intervals(stream_id)
|
||||||
requested = Interval(start or 0, end or 1e12)
|
(start, end) = self._check_user_times(start, end)
|
||||||
|
requested = Interval(start, end)
|
||||||
result = []
|
result = []
|
||||||
for n, i in enumerate(intervals.intersection(requested)):
|
for n, i in enumerate(intervals.intersection(requested)):
|
||||||
if n >= self.max_results:
|
if n >= self.max_results:
|
||||||
@@ -395,7 +412,7 @@ class NilmDB(object):
|
|||||||
path: Path at which to add the data
|
path: Path at which to add the data
|
||||||
start: Starting timestamp
|
start: Starting timestamp
|
||||||
end: Ending timestamp
|
end: Ending timestamp
|
||||||
data: Rows of data, to be passed to PyTable's table.append
|
data: Rows of data, to be passed to bulkdata table.append
|
||||||
method. E.g. nilmdb.layout.Parser.data
|
method. E.g. nilmdb.layout.Parser.data
|
||||||
"""
|
"""
|
||||||
# First check for basic overlap using timestamp info given.
|
# First check for basic overlap using timestamp info given.
|
||||||
@@ -416,7 +433,7 @@ class NilmDB(object):
|
|||||||
self._add_interval(stream_id, interval, row_start, row_end)
|
self._add_interval(stream_id, interval, row_start, row_end)
|
||||||
|
|
||||||
# And that's all
|
# And that's all
|
||||||
return "ok"
|
return
|
||||||
|
|
||||||
def _find_start(self, table, dbinterval):
|
def _find_start(self, table, dbinterval):
|
||||||
"""
|
"""
|
||||||
@@ -474,7 +491,8 @@ class NilmDB(object):
|
|||||||
stream_id = self._stream_id(path)
|
stream_id = self._stream_id(path)
|
||||||
table = self.data.getnode(path)
|
table = self.data.getnode(path)
|
||||||
intervals = self._get_intervals(stream_id)
|
intervals = self._get_intervals(stream_id)
|
||||||
requested = Interval(start or 0, end or 1e12)
|
(start, end) = self._check_user_times(start, end)
|
||||||
|
requested = Interval(start, end)
|
||||||
result = []
|
result = []
|
||||||
matched = 0
|
matched = 0
|
||||||
remaining = self.max_results
|
remaining = self.max_results
|
||||||
@@ -520,12 +538,10 @@ class NilmDB(object):
|
|||||||
stream_id = self._stream_id(path)
|
stream_id = self._stream_id(path)
|
||||||
table = self.data.getnode(path)
|
table = self.data.getnode(path)
|
||||||
intervals = self._get_intervals(stream_id)
|
intervals = self._get_intervals(stream_id)
|
||||||
to_remove = Interval(start or 0, end or 1e12)
|
(start, end) = self._check_user_times(start, end)
|
||||||
|
to_remove = Interval(start, end)
|
||||||
removed = 0
|
removed = 0
|
||||||
|
|
||||||
if start == end:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# Can't remove intervals from within the iterator, so we need to
|
# Can't remove intervals from within the iterator, so we need to
|
||||||
# remember what's currently in the intersection now.
|
# remember what's currently in the intersection now.
|
||||||
all_candidates = list(intervals.intersection(to_remove, orig = True))
|
all_candidates = list(intervals.intersection(to_remove, orig = True))
|
||||||
|
@@ -5,29 +5,20 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import nilmdb
|
import nilmdb
|
||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
from nilmdb.server.errors import *
|
from nilmdb.server.errors import NilmDBError
|
||||||
|
|
||||||
import cherrypy
|
import cherrypy
|
||||||
import sys
|
import sys
|
||||||
import time
|
|
||||||
import os
|
import os
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import decorator
|
import decorator
|
||||||
import traceback
|
import traceback
|
||||||
|
import psutil
|
||||||
try:
|
|
||||||
import cherrypy
|
|
||||||
cherrypy.tools.json_out
|
|
||||||
except: # pragma: no cover
|
|
||||||
sys.stderr.write("Cherrypy 3.2+ required\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
class NilmApp(object):
|
class NilmApp(object):
|
||||||
def __init__(self, db):
|
def __init__(self, db):
|
||||||
self.db = db
|
self.db = db
|
||||||
|
|
||||||
version = "1.2"
|
|
||||||
|
|
||||||
# Decorators
|
# Decorators
|
||||||
def chunked_response(func):
|
def chunked_response(func):
|
||||||
"""Decorator to enable chunked responses."""
|
"""Decorator to enable chunked responses."""
|
||||||
@@ -57,7 +48,7 @@ def workaround_cp_bug_1200(func, *args, **kwargs): # pragma: no cover
|
|||||||
try:
|
try:
|
||||||
for val in func(*args, **kwargs):
|
for val in func(*args, **kwargs):
|
||||||
yield val
|
yield val
|
||||||
except (LookupError, UnicodeError) as e:
|
except (LookupError, UnicodeError):
|
||||||
raise Exception("bug workaround; real exception is:\n" +
|
raise Exception("bug workaround; real exception is:\n" +
|
||||||
traceback.format_exc())
|
traceback.format_exc())
|
||||||
|
|
||||||
@@ -80,13 +71,23 @@ def exception_to_httperror(*expected):
|
|||||||
# care of that.
|
# care of that.
|
||||||
return decorator.decorator(wrapper)
|
return decorator.decorator(wrapper)
|
||||||
|
|
||||||
|
# Custom Cherrypy tools
|
||||||
|
def allow_methods(methods):
|
||||||
|
method = cherrypy.request.method.upper()
|
||||||
|
if method not in methods:
|
||||||
|
if method in cherrypy.request.methods_with_bodies:
|
||||||
|
cherrypy.request.body.read()
|
||||||
|
allowed = ', '.join(methods)
|
||||||
|
cherrypy.response.headers['Allow'] = allowed
|
||||||
|
raise cherrypy.HTTPError(405, method + " not allowed; use " + allowed)
|
||||||
|
cherrypy.tools.allow_methods = cherrypy.Tool('before_handler', allow_methods)
|
||||||
|
|
||||||
# CherryPy apps
|
# CherryPy apps
|
||||||
class Root(NilmApp):
|
class Root(NilmApp):
|
||||||
"""Root application for NILM database"""
|
"""Root application for NILM database"""
|
||||||
|
|
||||||
def __init__(self, db, version):
|
def __init__(self, db):
|
||||||
super(Root, self).__init__(db)
|
super(Root, self).__init__(db)
|
||||||
self.server_version = version
|
|
||||||
|
|
||||||
# /
|
# /
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@@ -102,38 +103,46 @@ class Root(NilmApp):
|
|||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
def version(self):
|
def version(self):
|
||||||
return self.server_version
|
return nilmdb.__version__
|
||||||
|
|
||||||
# /dbpath
|
# /dbinfo
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
def dbpath(self):
|
def dbinfo(self):
|
||||||
return self.db.get_basepath()
|
"""Return a dictionary with the database path,
|
||||||
|
size of the database in bytes, and free disk space in bytes"""
|
||||||
# /dbsize
|
path = self.db.get_basepath()
|
||||||
@cherrypy.expose
|
return { "path": path,
|
||||||
@cherrypy.tools.json_out()
|
"size": nilmdb.utils.du(path),
|
||||||
def dbsize(self):
|
"free": psutil.disk_usage(path).free }
|
||||||
return nilmdb.utils.du(self.db.get_basepath())
|
|
||||||
|
|
||||||
class Stream(NilmApp):
|
class Stream(NilmApp):
|
||||||
"""Stream-specific operations"""
|
"""Stream-specific operations"""
|
||||||
|
|
||||||
# /stream/list
|
# /stream/list
|
||||||
# /stream/list?layout=PrepData
|
# /stream/list?layout=PrepData
|
||||||
# /stream/list?path=/newton/prep
|
# /stream/list?path=/newton/prep&extent=1
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
def list(self, path = None, layout = None):
|
def list(self, path = None, layout = None, extent = None):
|
||||||
"""List all streams in the database. With optional path or
|
"""List all streams in the database. With optional path or
|
||||||
layout parameter, just list streams that match the given path
|
layout parameter, just list streams that match the given path
|
||||||
or layout"""
|
or layout.
|
||||||
return self.db.stream_list(path, layout)
|
|
||||||
|
If extent is not given, returns a list of lists containing
|
||||||
|
the path and layout: [ path, layout ]
|
||||||
|
|
||||||
|
If extent is provided, returns a list of lists containing the
|
||||||
|
path, layout, and min/max extent of the data:
|
||||||
|
[ path, layout, extent_min, extent_max ]
|
||||||
|
"""
|
||||||
|
return self.db.stream_list(path, layout, bool(extent))
|
||||||
|
|
||||||
# /stream/create?path=/newton/prep&layout=PrepData
|
# /stream/create?path=/newton/prep&layout=PrepData
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError, ValueError)
|
@exception_to_httperror(NilmDBError, ValueError)
|
||||||
|
@cherrypy.tools.allow_methods(methods = ["POST"])
|
||||||
def create(self, path, layout):
|
def create(self, path, layout):
|
||||||
"""Create a new stream in the database. Provide path
|
"""Create a new stream in the database. Provide path
|
||||||
and one of the nilmdb.layout.layouts keys.
|
and one of the nilmdb.layout.layouts keys.
|
||||||
@@ -144,6 +153,7 @@ class Stream(NilmApp):
|
|||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError)
|
@exception_to_httperror(NilmDBError)
|
||||||
|
@cherrypy.tools.allow_methods(methods = ["POST"])
|
||||||
def destroy(self, path):
|
def destroy(self, path):
|
||||||
"""Delete a stream and its associated data."""
|
"""Delete a stream and its associated data."""
|
||||||
return self.db.stream_destroy(path)
|
return self.db.stream_destroy(path)
|
||||||
@@ -176,29 +186,29 @@ class Stream(NilmApp):
|
|||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError, LookupError, TypeError)
|
@exception_to_httperror(NilmDBError, LookupError, TypeError)
|
||||||
|
@cherrypy.tools.allow_methods(methods = ["POST"])
|
||||||
def set_metadata(self, path, data):
|
def set_metadata(self, path, data):
|
||||||
"""Set metadata for the named stream, replacing any
|
"""Set metadata for the named stream, replacing any
|
||||||
existing metadata. Data should be a json-encoded
|
existing metadata. Data should be a json-encoded
|
||||||
dictionary"""
|
dictionary"""
|
||||||
data_dict = json.loads(data)
|
data_dict = json.loads(data)
|
||||||
self.db.stream_set_metadata(path, data_dict)
|
self.db.stream_set_metadata(path, data_dict)
|
||||||
return "ok"
|
|
||||||
|
|
||||||
# /stream/update_metadata?path=/newton/prep&data=<json>
|
# /stream/update_metadata?path=/newton/prep&data=<json>
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError, LookupError, TypeError)
|
@exception_to_httperror(NilmDBError, LookupError, TypeError)
|
||||||
|
@cherrypy.tools.allow_methods(methods = ["POST"])
|
||||||
def update_metadata(self, path, data):
|
def update_metadata(self, path, data):
|
||||||
"""Update metadata for the named stream. Data
|
"""Update metadata for the named stream. Data
|
||||||
should be a json-encoded dictionary"""
|
should be a json-encoded dictionary"""
|
||||||
data_dict = json.loads(data)
|
data_dict = json.loads(data)
|
||||||
self.db.stream_update_metadata(path, data_dict)
|
self.db.stream_update_metadata(path, data_dict)
|
||||||
return "ok"
|
|
||||||
|
|
||||||
# /stream/insert?path=/newton/prep
|
# /stream/insert?path=/newton/prep
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
#@cherrypy.tools.disable_prb()
|
@cherrypy.tools.allow_methods(methods = ["PUT"])
|
||||||
def insert(self, path, start, end):
|
def insert(self, path, start, end):
|
||||||
"""
|
"""
|
||||||
Insert new data into the database. Provide textual data
|
Insert new data into the database. Provide textual data
|
||||||
@@ -206,12 +216,9 @@ class Stream(NilmApp):
|
|||||||
"""
|
"""
|
||||||
# Important that we always read the input before throwing any
|
# Important that we always read the input before throwing any
|
||||||
# errors, to keep lengths happy for persistent connections.
|
# errors, to keep lengths happy for persistent connections.
|
||||||
# However, CherryPy 3.2.2 has a bug where this fails for GET
|
# Note that CherryPy 3.2.2 has a bug where this fails for GET
|
||||||
# requests, so catch that. (issue #1134)
|
# requests, if we ever want to handle those (issue #1134)
|
||||||
try:
|
body = cherrypy.request.body.read()
|
||||||
body = cherrypy.request.body.read()
|
|
||||||
except TypeError:
|
|
||||||
raise cherrypy.HTTPError("400 Bad Request", "No request body")
|
|
||||||
|
|
||||||
# Check path and get layout
|
# Check path and get layout
|
||||||
streams = self.db.stream_list(path = path)
|
streams = self.db.stream_list(path = path)
|
||||||
@@ -228,37 +235,36 @@ class Stream(NilmApp):
|
|||||||
"error parsing input data: " +
|
"error parsing input data: " +
|
||||||
e.message)
|
e.message)
|
||||||
|
|
||||||
if (not parser.min_timestamp or not parser.max_timestamp or
|
|
||||||
not len(parser.data)):
|
|
||||||
raise cherrypy.HTTPError("400 Bad Request",
|
|
||||||
"no data provided")
|
|
||||||
|
|
||||||
# Check limits
|
# Check limits
|
||||||
start = float(start)
|
start = float(start)
|
||||||
end = float(end)
|
end = float(end)
|
||||||
if parser.min_timestamp < start:
|
if start >= end:
|
||||||
|
raise cherrypy.HTTPError("400 Bad Request",
|
||||||
|
"start must precede end")
|
||||||
|
if parser.min_timestamp is not None and parser.min_timestamp < start:
|
||||||
raise cherrypy.HTTPError("400 Bad Request", "Data timestamp " +
|
raise cherrypy.HTTPError("400 Bad Request", "Data timestamp " +
|
||||||
repr(parser.min_timestamp) +
|
repr(parser.min_timestamp) +
|
||||||
" < start time " + repr(start))
|
" < start time " + repr(start))
|
||||||
if parser.max_timestamp >= end:
|
if parser.max_timestamp is not None and parser.max_timestamp >= end:
|
||||||
raise cherrypy.HTTPError("400 Bad Request", "Data timestamp " +
|
raise cherrypy.HTTPError("400 Bad Request", "Data timestamp " +
|
||||||
repr(parser.max_timestamp) +
|
repr(parser.max_timestamp) +
|
||||||
" >= end time " + repr(end))
|
" >= end time " + repr(end))
|
||||||
|
|
||||||
# Now do the nilmdb insert, passing it the parser full of data.
|
# Now do the nilmdb insert, passing it the parser full of data.
|
||||||
try:
|
try:
|
||||||
result = self.db.stream_insert(path, start, end, parser.data)
|
self.db.stream_insert(path, start, end, parser.data)
|
||||||
except NilmDBError as e:
|
except NilmDBError as e:
|
||||||
raise cherrypy.HTTPError("400 Bad Request", e.message)
|
raise cherrypy.HTTPError("400 Bad Request", e.message)
|
||||||
|
|
||||||
# Done
|
# Done
|
||||||
return "ok"
|
return
|
||||||
|
|
||||||
# /stream/remove?path=/newton/prep
|
# /stream/remove?path=/newton/prep
|
||||||
# /stream/remove?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
# /stream/remove?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError)
|
@exception_to_httperror(NilmDBError)
|
||||||
|
@cherrypy.tools.allow_methods(methods = ["POST"])
|
||||||
def remove(self, path, start = None, end = None):
|
def remove(self, path, start = None, end = None):
|
||||||
"""
|
"""
|
||||||
Remove data from the backend database. Removes all data in
|
Remove data from the backend database. Removes all data in
|
||||||
@@ -270,26 +276,25 @@ class Stream(NilmApp):
|
|||||||
if end is not None:
|
if end is not None:
|
||||||
end = float(end)
|
end = float(end)
|
||||||
if start is not None and end is not None:
|
if start is not None and end is not None:
|
||||||
if end < start:
|
if start >= end:
|
||||||
raise cherrypy.HTTPError("400 Bad Request",
|
raise cherrypy.HTTPError("400 Bad Request",
|
||||||
"end before start")
|
"start must precede end")
|
||||||
return self.db.stream_remove(path, start, end)
|
return self.db.stream_remove(path, start, end)
|
||||||
|
|
||||||
# /stream/intervals?path=/newton/prep
|
# /stream/intervals?path=/newton/prep
|
||||||
# /stream/intervals?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
# /stream/intervals?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@chunked_response
|
@chunked_response
|
||||||
@response_type("text/plain")
|
@response_type("application/x-json-stream")
|
||||||
def intervals(self, path, start = None, end = None):
|
def intervals(self, path, start = None, end = None):
|
||||||
"""
|
"""
|
||||||
Get intervals from backend database. Streams the resulting
|
Get intervals from backend database. Streams the resulting
|
||||||
intervals as JSON strings separated by newlines. This may
|
intervals as JSON strings separated by CR LF pairs. This may
|
||||||
make multiple requests to the nilmdb backend to avoid causing
|
make multiple requests to the nilmdb backend to avoid causing
|
||||||
it to block for too long.
|
it to block for too long.
|
||||||
|
|
||||||
Note that the response type is set to 'text/plain' even
|
Note that the response type is the non-standard
|
||||||
though we're sending back JSON; this is because we're not
|
'application/x-json-stream' for lack of a better option.
|
||||||
really returning a single JSON object.
|
|
||||||
"""
|
"""
|
||||||
if start is not None:
|
if start is not None:
|
||||||
start = float(start)
|
start = float(start)
|
||||||
@@ -297,9 +302,9 @@ class Stream(NilmApp):
|
|||||||
end = float(end)
|
end = float(end)
|
||||||
|
|
||||||
if start is not None and end is not None:
|
if start is not None and end is not None:
|
||||||
if end < start:
|
if start >= end:
|
||||||
raise cherrypy.HTTPError("400 Bad Request",
|
raise cherrypy.HTTPError("400 Bad Request",
|
||||||
"end before start")
|
"start must precede end")
|
||||||
|
|
||||||
streams = self.db.stream_list(path = path)
|
streams = self.db.stream_list(path = path)
|
||||||
if len(streams) != 1:
|
if len(streams) != 1:
|
||||||
@@ -309,8 +314,8 @@ class Stream(NilmApp):
|
|||||||
def content(start, end):
|
def content(start, end):
|
||||||
# Note: disable chunked responses to see tracebacks from here.
|
# Note: disable chunked responses to see tracebacks from here.
|
||||||
while True:
|
while True:
|
||||||
(intervals, restart) = self.db.stream_intervals(path,start,end)
|
(ints, restart) = self.db.stream_intervals(path, start, end)
|
||||||
response = ''.join([ json.dumps(i) + "\n" for i in intervals ])
|
response = ''.join([ json.dumps(i) + "\r\n" for i in ints ])
|
||||||
yield response
|
yield response
|
||||||
if restart == 0:
|
if restart == 0:
|
||||||
break
|
break
|
||||||
@@ -337,9 +342,9 @@ class Stream(NilmApp):
|
|||||||
|
|
||||||
# Check parameters
|
# Check parameters
|
||||||
if start is not None and end is not None:
|
if start is not None and end is not None:
|
||||||
if end < start:
|
if start >= end:
|
||||||
raise cherrypy.HTTPError("400 Bad Request",
|
raise cherrypy.HTTPError("400 Bad Request",
|
||||||
"end before start")
|
"start must precede end")
|
||||||
|
|
||||||
# Check path and get layout
|
# Check path and get layout
|
||||||
streams = self.db.stream_list(path = path)
|
streams = self.db.stream_list(path = path)
|
||||||
@@ -387,31 +392,47 @@ class Server(object):
|
|||||||
fast_shutdown = False, # don't wait for clients to disconn.
|
fast_shutdown = False, # don't wait for clients to disconn.
|
||||||
force_traceback = False # include traceback in all errors
|
force_traceback = False # include traceback in all errors
|
||||||
):
|
):
|
||||||
self.version = version
|
# Save server version, just for verification during tests
|
||||||
|
self.version = nilmdb.__version__
|
||||||
|
|
||||||
# Need to wrap DB object in a serializer because we'll call
|
|
||||||
# into it from separate threads.
|
|
||||||
self.embedded = embedded
|
self.embedded = embedded
|
||||||
self.db = nilmdb.utils.Serializer(db)
|
self.db = db
|
||||||
|
if not getattr(db, "_thread_safe", None):
|
||||||
|
raise KeyError("Database object " + str(db) + " doesn't claim "
|
||||||
|
"to be thread safe. You should pass "
|
||||||
|
"nilmdb.utils.serializer_proxy(NilmDB)(args) "
|
||||||
|
"rather than NilmDB(args).")
|
||||||
|
|
||||||
|
# Build up global server configuration
|
||||||
cherrypy.config.update({
|
cherrypy.config.update({
|
||||||
'server.socket_host': host,
|
'server.socket_host': host,
|
||||||
'server.socket_port': port,
|
'server.socket_port': port,
|
||||||
'engine.autoreload_on': False,
|
'engine.autoreload_on': False,
|
||||||
'server.max_request_body_size': 4*1024*1024,
|
'server.max_request_body_size': 8*1024*1024,
|
||||||
'error_page.default': self.json_error_page,
|
|
||||||
})
|
})
|
||||||
if self.embedded:
|
if self.embedded:
|
||||||
cherrypy.config.update({ 'environment': 'embedded' })
|
cherrypy.config.update({ 'environment': 'embedded' })
|
||||||
|
|
||||||
|
# Build up application specific configuration
|
||||||
|
app_config = {}
|
||||||
|
app_config.update({
|
||||||
|
'error_page.default': self.json_error_page,
|
||||||
|
})
|
||||||
|
|
||||||
# Send a permissive Access-Control-Allow-Origin (CORS) header
|
# Send a permissive Access-Control-Allow-Origin (CORS) header
|
||||||
# with all responses so that browsers can send cross-domain
|
# with all responses so that browsers can send cross-domain
|
||||||
# requests to this server.
|
# requests to this server.
|
||||||
cherrypy.config.update({ 'response.headers.Access-Control-Allow-Origin':
|
app_config.update({ 'response.headers.Access-Control-Allow-Origin':
|
||||||
'*' })
|
'*' })
|
||||||
|
|
||||||
|
# Only allow GET and HEAD by default. Individual handlers
|
||||||
|
# can override.
|
||||||
|
app_config.update({ 'tools.allow_methods.on': True,
|
||||||
|
'tools.allow_methods.methods': ['GET', 'HEAD'] })
|
||||||
|
|
||||||
# Send tracebacks in error responses. They're hidden by the
|
# Send tracebacks in error responses. They're hidden by the
|
||||||
# error_page function for client errors (code 400-499).
|
# error_page function for client errors (code 400-499).
|
||||||
cherrypy.config.update({ 'request.show_tracebacks' : True })
|
app_config.update({ 'request.show_tracebacks' : True })
|
||||||
self.force_traceback = force_traceback
|
self.force_traceback = force_traceback
|
||||||
|
|
||||||
# Patch CherryPy error handler to never pad out error messages.
|
# Patch CherryPy error handler to never pad out error messages.
|
||||||
@@ -419,11 +440,13 @@ class Server(object):
|
|||||||
# error messages.
|
# error messages.
|
||||||
cherrypy._cperror._ie_friendly_error_sizes = {}
|
cherrypy._cperror._ie_friendly_error_sizes = {}
|
||||||
|
|
||||||
cherrypy.tree.apps = {}
|
# Build up the application and mount it
|
||||||
cherrypy.tree.mount(Root(self.db, self.version), "/")
|
root = Root(self.db)
|
||||||
cherrypy.tree.mount(Stream(self.db), "/stream")
|
root.stream = Stream(self.db)
|
||||||
if stoppable:
|
if stoppable:
|
||||||
cherrypy.tree.mount(Exiter(), "/exit")
|
root.exit = Exiter()
|
||||||
|
cherrypy.tree.apps = {}
|
||||||
|
cherrypy.tree.mount(root, "/", config = { "/" : app_config })
|
||||||
|
|
||||||
# Shutdowns normally wait for clients to disconnect. To speed
|
# Shutdowns normally wait for clients to disconnect. To speed
|
||||||
# up tests, set fast_shutdown = True
|
# up tests, set fast_shutdown = True
|
||||||
@@ -444,7 +467,7 @@ class Server(object):
|
|||||||
if not self.force_traceback:
|
if not self.force_traceback:
|
||||||
if code >= 400 and code <= 499:
|
if code >= 400 and code <= 499:
|
||||||
errordata["traceback"] = ""
|
errordata["traceback"] = ""
|
||||||
except Exception as e: # pragma: no cover
|
except Exception: # pragma: no cover
|
||||||
pass
|
pass
|
||||||
# Override the response type, which was previously set to text/html
|
# Override the response type, which was previously set to text/html
|
||||||
cherrypy.serving.response.headers['Content-Type'] = (
|
cherrypy.serving.response.headers['Content-Type'] = (
|
||||||
|
@@ -1,11 +1,10 @@
|
|||||||
"""NilmDB utilities"""
|
"""NilmDB utilities"""
|
||||||
|
|
||||||
from .timer import Timer
|
from nilmdb.utils.timer import Timer
|
||||||
from .iteratorizer import Iteratorizer
|
from nilmdb.utils.iteratorizer import Iteratorizer
|
||||||
from .serializer import Serializer
|
from nilmdb.utils.serializer import serializer_proxy
|
||||||
from .lrucache import lru_cache
|
from nilmdb.utils.lrucache import lru_cache
|
||||||
from .diskusage import du
|
from nilmdb.utils.diskusage import du, human_size
|
||||||
from .mustclose import must_close
|
from nilmdb.utils.mustclose import must_close
|
||||||
from .urllib import urlencode
|
from nilmdb.utils import atomic
|
||||||
from . import misc
|
import nilmdb.utils.threadsafety
|
||||||
from . import atomic
|
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
from math import log
|
from math import log
|
||||||
|
|
||||||
def sizeof_fmt(num):
|
def human_size(num):
|
||||||
"""Human friendly file size"""
|
"""Human friendly file size"""
|
||||||
unit_list = zip(['bytes', 'kiB', 'MiB', 'GiB', 'TiB'], [0, 0, 1, 2, 2])
|
unit_list = zip(['bytes', 'kiB', 'MiB', 'GiB', 'TiB'], [0, 0, 1, 2, 2])
|
||||||
if num > 1:
|
if num > 1:
|
||||||
@@ -15,15 +15,11 @@ def sizeof_fmt(num):
|
|||||||
if num == 1: # pragma: no cover
|
if num == 1: # pragma: no cover
|
||||||
return '1 byte'
|
return '1 byte'
|
||||||
|
|
||||||
def du_bytes(path):
|
def du(path):
|
||||||
"""Like du -sb, returns total size of path in bytes."""
|
"""Like du -sb, returns total size of path in bytes."""
|
||||||
size = os.path.getsize(path)
|
size = os.path.getsize(path)
|
||||||
if os.path.isdir(path):
|
if os.path.isdir(path):
|
||||||
for file in os.listdir(path):
|
for thisfile in os.listdir(path):
|
||||||
filepath = os.path.join(path, file)
|
filepath = os.path.join(path, thisfile)
|
||||||
size += du_bytes(filepath)
|
size += du(filepath)
|
||||||
return size
|
return size
|
||||||
|
|
||||||
def du(path):
|
|
||||||
"""Like du -sh, returns total size of path as a human-readable string."""
|
|
||||||
return sizeof_fmt(du_bytes(path))
|
|
||||||
|
@@ -16,6 +16,7 @@ class IteratorizerThread(threading.Thread):
|
|||||||
callback (provided by this class) as an argument
|
callback (provided by this class) as an argument
|
||||||
"""
|
"""
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
|
self.name = "Iteratorizer-" + function.__name__ + "-" + self.name
|
||||||
self.function = function
|
self.function = function
|
||||||
self.queue = queue
|
self.queue = queue
|
||||||
self.die = False
|
self.die = False
|
||||||
@@ -95,5 +96,5 @@ def Iteratorizer(function, curl_hack = False):
|
|||||||
while thread.isAlive():
|
while thread.isAlive():
|
||||||
try:
|
try:
|
||||||
queue.get(True, 0.01)
|
queue.get(True, 0.01)
|
||||||
except:
|
except: # pragma: no cover
|
||||||
pass
|
pass
|
||||||
|
@@ -5,7 +5,6 @@
|
|||||||
|
|
||||||
import collections
|
import collections
|
||||||
import decorator
|
import decorator
|
||||||
import warnings
|
|
||||||
|
|
||||||
def lru_cache(size = 10, onremove = None, keys = slice(None)):
|
def lru_cache(size = 10, onremove = None, keys = slice(None)):
|
||||||
"""Least-recently-used cache decorator.
|
"""Least-recently-used cache decorator.
|
||||||
|
@@ -1,8 +0,0 @@
|
|||||||
import itertools
|
|
||||||
|
|
||||||
def pairwise(iterable):
|
|
||||||
"s -> (s0,s1), (s1,s2), ..., (sn,None)"
|
|
||||||
a, b = itertools.tee(iterable)
|
|
||||||
next(b, None)
|
|
||||||
return itertools.izip_longest(a, b)
|
|
||||||
|
|
@@ -12,15 +12,12 @@ def must_close(errorfile = sys.stderr, wrap_verify = False):
|
|||||||
already been called."""
|
already been called."""
|
||||||
def class_decorator(cls):
|
def class_decorator(cls):
|
||||||
|
|
||||||
# Helper to replace a class method with a wrapper function,
|
def wrap_class_method(wrapper):
|
||||||
# while maintaining argument specs etc.
|
try:
|
||||||
def wrap_class_method(wrapper_func):
|
orig = getattr(cls, wrapper.__name__).im_func
|
||||||
method = wrapper_func.__name__
|
except:
|
||||||
if method in cls.__dict__:
|
orig = lambda x: None
|
||||||
orig = getattr(cls, method).im_func
|
setattr(cls, wrapper.__name__, decorator.decorator(wrapper, orig))
|
||||||
else:
|
|
||||||
orig = lambda self: None
|
|
||||||
setattr(cls, method, decorator.decorator(wrapper_func, orig))
|
|
||||||
|
|
||||||
@wrap_class_method
|
@wrap_class_method
|
||||||
def __init__(orig, self, *args, **kwargs):
|
def __init__(orig, self, *args, **kwargs):
|
||||||
@@ -38,7 +35,8 @@ def must_close(errorfile = sys.stderr, wrap_verify = False):
|
|||||||
|
|
||||||
@wrap_class_method
|
@wrap_class_method
|
||||||
def close(orig, self, *args, **kwargs):
|
def close(orig, self, *args, **kwargs):
|
||||||
del self._must_close
|
if "_must_close" in self.__dict__:
|
||||||
|
del self._must_close
|
||||||
return orig(self, *args, **kwargs)
|
return orig(self, *args, **kwargs)
|
||||||
|
|
||||||
# Optionally wrap all other functions
|
# Optionally wrap all other functions
|
||||||
|
@@ -1,6 +1,10 @@
|
|||||||
import Queue
|
import Queue
|
||||||
import threading
|
import threading
|
||||||
import sys
|
import sys
|
||||||
|
import decorator
|
||||||
|
import inspect
|
||||||
|
import types
|
||||||
|
import functools
|
||||||
|
|
||||||
# This file provides a class that will wrap an object and serialize
|
# This file provides a class that will wrap an object and serialize
|
||||||
# all calls to its methods. All calls to that object will be queued
|
# all calls to its methods. All calls to that object will be queued
|
||||||
@@ -12,8 +16,9 @@ import sys
|
|||||||
class SerializerThread(threading.Thread):
|
class SerializerThread(threading.Thread):
|
||||||
"""Thread that retrieves call information from the queue, makes the
|
"""Thread that retrieves call information from the queue, makes the
|
||||||
call, and returns the results."""
|
call, and returns the results."""
|
||||||
def __init__(self, call_queue):
|
def __init__(self, classname, call_queue):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
|
self.name = "Serializer-" + classname + "-" + self.name
|
||||||
self.call_queue = call_queue
|
self.call_queue = call_queue
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
@@ -22,51 +27,83 @@ class SerializerThread(threading.Thread):
|
|||||||
# Terminate if result_queue is None
|
# Terminate if result_queue is None
|
||||||
if result_queue is None:
|
if result_queue is None:
|
||||||
return
|
return
|
||||||
|
exception = None
|
||||||
|
result = None
|
||||||
try:
|
try:
|
||||||
result = func(*args, **kwargs) # wrapped
|
result = func(*args, **kwargs) # wrapped
|
||||||
except:
|
except:
|
||||||
result_queue.put((sys.exc_info(), None))
|
exception = sys.exc_info()
|
||||||
|
# Ensure we delete these before returning a result, so
|
||||||
|
# we don't unncessarily hold onto a reference while
|
||||||
|
# we're waiting for the next call.
|
||||||
|
del func, args, kwargs
|
||||||
|
result_queue.put((exception, result))
|
||||||
|
del exception, result
|
||||||
|
|
||||||
|
def serializer_proxy(obj_or_type):
|
||||||
|
"""Wrap the given object or type in a SerializerObjectProxy.
|
||||||
|
|
||||||
|
Returns a SerializerObjectProxy object that proxies all method
|
||||||
|
calls to the object, as well as attribute retrievals.
|
||||||
|
|
||||||
|
The proxied requests, including instantiation, are performed in a
|
||||||
|
single thread and serialized between caller threads.
|
||||||
|
"""
|
||||||
|
class SerializerCallProxy(object):
|
||||||
|
def __init__(self, call_queue, func, objectproxy):
|
||||||
|
self.call_queue = call_queue
|
||||||
|
self.func = func
|
||||||
|
# Need to hold a reference to object proxy so it doesn't
|
||||||
|
# go away (and kill the thread) until after get called.
|
||||||
|
self.objectproxy = objectproxy
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
result_queue = Queue.Queue()
|
||||||
|
self.call_queue.put((result_queue, self.func, args, kwargs))
|
||||||
|
( exc_info, result ) = result_queue.get()
|
||||||
|
if exc_info is None:
|
||||||
|
return result
|
||||||
else:
|
else:
|
||||||
result_queue.put((None, result))
|
raise exc_info[0], exc_info[1], exc_info[2]
|
||||||
|
|
||||||
class WrapCall(object):
|
class SerializerObjectProxy(object):
|
||||||
"""Wrap a callable using the given queues"""
|
def __init__(self, obj_or_type, *args, **kwargs):
|
||||||
|
self.__object = obj_or_type
|
||||||
|
try:
|
||||||
|
if type(obj_or_type) in (types.TypeType, types.ClassType):
|
||||||
|
classname = obj_or_type.__name__
|
||||||
|
else:
|
||||||
|
classname = obj_or_type.__class__.__name__
|
||||||
|
except AttributeError: # pragma: no cover
|
||||||
|
classname = "???"
|
||||||
|
self.__call_queue = Queue.Queue()
|
||||||
|
self.__thread = SerializerThread(classname, self.__call_queue)
|
||||||
|
self.__thread.daemon = True
|
||||||
|
self.__thread.start()
|
||||||
|
self._thread_safe = True
|
||||||
|
|
||||||
def __init__(self, call_queue, result_queue, func):
|
def __getattr__(self, key):
|
||||||
self.call_queue = call_queue
|
if key.startswith("_SerializerObjectProxy__"): # pragma: no cover
|
||||||
self.result_queue = result_queue
|
raise AttributeError
|
||||||
self.func = func
|
attr = getattr(self.__object, key)
|
||||||
|
if not callable(attr):
|
||||||
|
getter = SerializerCallProxy(self.__call_queue, getattr, self)
|
||||||
|
return getter(self.__object, key)
|
||||||
|
r = SerializerCallProxy(self.__call_queue, attr, self)
|
||||||
|
return r
|
||||||
|
|
||||||
def __call__(self, *args, **kwargs):
|
def __call__(self, *args, **kwargs):
|
||||||
self.call_queue.put((self.result_queue, self.func, args, kwargs))
|
"""Call this to instantiate the type, if a type was passed
|
||||||
( exc_info, result ) = self.result_queue.get()
|
to serializer_proxy. Otherwise, pass the call through."""
|
||||||
if exc_info is None:
|
ret = SerializerCallProxy(self.__call_queue,
|
||||||
return result
|
self.__object, self)(*args, **kwargs)
|
||||||
else:
|
if type(self.__object) in (types.TypeType, types.ClassType):
|
||||||
raise exc_info[0], exc_info[1], exc_info[2]
|
# Instantiation
|
||||||
|
self.__object = ret
|
||||||
|
return self
|
||||||
|
return ret
|
||||||
|
|
||||||
class WrapObject(object):
|
def __del__(self):
|
||||||
"""Wrap all calls to methods in a target object with WrapCall"""
|
self.__call_queue.put((None, None, None, None))
|
||||||
|
self.__thread.join()
|
||||||
|
|
||||||
def __init__(self, target):
|
return SerializerObjectProxy(obj_or_type)
|
||||||
self.__wrap_target = target
|
|
||||||
self.__wrap_call_queue = Queue.Queue()
|
|
||||||
self.__wrap_serializer = SerializerThread(self.__wrap_call_queue)
|
|
||||||
self.__wrap_serializer.daemon = True
|
|
||||||
self.__wrap_serializer.start()
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
"""Wrap methods of self.__wrap_target in a WrapCall instance"""
|
|
||||||
func = getattr(self.__wrap_target, key)
|
|
||||||
if not callable(func):
|
|
||||||
raise TypeError("Can't serialize attribute %r (type: %s)"
|
|
||||||
% (key, type(func)))
|
|
||||||
result_queue = Queue.Queue()
|
|
||||||
return WrapCall(self.__wrap_call_queue, result_queue, func)
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
self.__wrap_call_queue.put((None, None, None, None))
|
|
||||||
self.__wrap_serializer.join()
|
|
||||||
|
|
||||||
# Just an alias
|
|
||||||
Serializer = WrapObject
|
|
||||||
|
109
nilmdb/utils/threadsafety.py
Normal file
109
nilmdb/utils/threadsafety.py
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
from nilmdb.utils.printf import *
|
||||||
|
import threading
|
||||||
|
import warnings
|
||||||
|
import types
|
||||||
|
|
||||||
|
def verify_proxy(obj_or_type, exception = False, check_thread = True,
|
||||||
|
check_concurrent = True):
|
||||||
|
"""Wrap the given object or type in a VerifyObjectProxy.
|
||||||
|
|
||||||
|
Returns a VerifyObjectProxy that proxies all method calls to the
|
||||||
|
given object, as well as attribute retrievals.
|
||||||
|
|
||||||
|
When calling methods, the following checks are performed. If
|
||||||
|
exception is True, an exception is raised. Otherwise, a warning
|
||||||
|
is printed.
|
||||||
|
|
||||||
|
check_thread = True # Warn/fail if two different threads call methods.
|
||||||
|
check_concurrent = True # Warn/fail if two functions are concurrently
|
||||||
|
# run through this proxy
|
||||||
|
"""
|
||||||
|
class Namespace(object):
|
||||||
|
pass
|
||||||
|
class VerifyCallProxy(object):
|
||||||
|
def __init__(self, func, parent_namespace):
|
||||||
|
self.func = func
|
||||||
|
self.parent_namespace = parent_namespace
|
||||||
|
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
p = self.parent_namespace
|
||||||
|
this = threading.current_thread()
|
||||||
|
try:
|
||||||
|
callee = self.func.__name__
|
||||||
|
except AttributeError:
|
||||||
|
callee = "???"
|
||||||
|
|
||||||
|
if p.thread is None:
|
||||||
|
p.thread = this
|
||||||
|
p.thread_callee = callee
|
||||||
|
|
||||||
|
if check_thread and p.thread != this:
|
||||||
|
err = sprintf("unsafe threading: %s called %s.%s,"
|
||||||
|
" but %s called %s.%s",
|
||||||
|
p.thread.name, p.classname, p.thread_callee,
|
||||||
|
this.name, p.classname, callee)
|
||||||
|
if exception:
|
||||||
|
raise AssertionError(err)
|
||||||
|
else: # pragma: no cover
|
||||||
|
warnings.warn(err)
|
||||||
|
|
||||||
|
need_concur_unlock = False
|
||||||
|
if check_concurrent:
|
||||||
|
if p.concur_lock.acquire(False) == False:
|
||||||
|
err = sprintf("unsafe concurrency: %s called %s.%s "
|
||||||
|
"while %s is still in %s.%s",
|
||||||
|
this.name, p.classname, callee,
|
||||||
|
p.concur_tname, p.classname, p.concur_callee)
|
||||||
|
if exception:
|
||||||
|
raise AssertionError(err)
|
||||||
|
else: # pragma: no cover
|
||||||
|
warnings.warn(err)
|
||||||
|
else:
|
||||||
|
p.concur_tname = this.name
|
||||||
|
p.concur_callee = callee
|
||||||
|
need_concur_unlock = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
ret = self.func(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
if need_concur_unlock:
|
||||||
|
p.concur_lock.release()
|
||||||
|
return ret
|
||||||
|
|
||||||
|
class VerifyObjectProxy(object):
|
||||||
|
def __init__(self, obj_or_type, *args, **kwargs):
|
||||||
|
p = Namespace()
|
||||||
|
self.__ns = p
|
||||||
|
p.thread = None
|
||||||
|
p.thread_callee = None
|
||||||
|
p.concur_lock = threading.Lock()
|
||||||
|
p.concur_tname = None
|
||||||
|
p.concur_callee = None
|
||||||
|
self.__obj = obj_or_type
|
||||||
|
try:
|
||||||
|
if type(obj_or_type) in (types.TypeType, types.ClassType):
|
||||||
|
p.classname = self.__obj.__name__
|
||||||
|
else:
|
||||||
|
p.classname = self.__obj.__class__.__name__
|
||||||
|
except AttributeError: # pragma: no cover
|
||||||
|
p.classname = "???"
|
||||||
|
|
||||||
|
def __getattr__(self, key):
|
||||||
|
if key.startswith("_VerifyObjectProxy__"): # pragma: no cover
|
||||||
|
raise AttributeError
|
||||||
|
attr = getattr(self.__obj, key)
|
||||||
|
if not callable(attr):
|
||||||
|
return VerifyCallProxy(getattr, self.__ns)(self.__obj, key)
|
||||||
|
return VerifyCallProxy(attr, self.__ns)
|
||||||
|
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
"""Call this to instantiate the type, if a type was passed
|
||||||
|
to verify_proxy. Otherwise, pass the call through."""
|
||||||
|
ret = VerifyCallProxy(self.__obj, self.__ns)(*args, **kwargs)
|
||||||
|
if type(self.__obj) in (types.TypeType, types.ClassType):
|
||||||
|
# Instantiation
|
||||||
|
self.__obj = ret
|
||||||
|
return self
|
||||||
|
return ret
|
||||||
|
|
||||||
|
return VerifyObjectProxy(obj_or_type)
|
54
nilmdb/utils/time.py
Normal file
54
nilmdb/utils/time.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
from nilmdb.utils import datetime_tz
|
||||||
|
import re
|
||||||
|
|
||||||
|
def parse_time(toparse):
|
||||||
|
"""
|
||||||
|
Parse a free-form time string and return a datetime_tz object.
|
||||||
|
If the string doesn't contain a timestamp, the current local
|
||||||
|
timezone is assumed (e.g. from the TZ env var).
|
||||||
|
"""
|
||||||
|
# If string isn't "now" and doesn't contain at least 4 digits,
|
||||||
|
# consider it invalid. smartparse might otherwise accept
|
||||||
|
# empty strings and strings with just separators.
|
||||||
|
if toparse != "now" and len(re.findall(r"\d", toparse)) < 4:
|
||||||
|
raise ValueError("not enough digits for a timestamp")
|
||||||
|
|
||||||
|
# Try to just parse the time as given
|
||||||
|
try:
|
||||||
|
return datetime_tz.datetime_tz.smartparse(toparse)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Try to extract a substring in a condensed format that we expect
|
||||||
|
# to see in a filename or header comment
|
||||||
|
res = re.search(r"(^|[^\d])(" # non-numeric or SOL
|
||||||
|
r"(199\d|2\d\d\d)" # year
|
||||||
|
r"[-/]?" # separator
|
||||||
|
r"(0[1-9]|1[012])" # month
|
||||||
|
r"[-/]?" # separator
|
||||||
|
r"([012]\d|3[01])" # day
|
||||||
|
r"[-T ]?" # separator
|
||||||
|
r"([01]\d|2[0-3])" # hour
|
||||||
|
r"[:]?" # separator
|
||||||
|
r"([0-5]\d)" # minute
|
||||||
|
r"[:]?" # separator
|
||||||
|
r"([0-5]\d)?" # second
|
||||||
|
r"([-+]\d\d\d\d)?" # timezone
|
||||||
|
r")", toparse)
|
||||||
|
if res is not None:
|
||||||
|
try:
|
||||||
|
return datetime_tz.datetime_tz.smartparse(res.group(2))
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Could also try to successively parse substrings, but let's
|
||||||
|
# just give up for now.
|
||||||
|
raise ValueError("unable to parse timestamp")
|
||||||
|
|
||||||
|
def format_time(timestamp):
|
||||||
|
"""
|
||||||
|
Convert a Unix timestamp to a string for printing, using the
|
||||||
|
local timezone for display (e.g. from the TZ env var).
|
||||||
|
"""
|
||||||
|
dt = datetime_tz.datetime_tz.fromtimestamp(timestamp)
|
||||||
|
return dt.strftime("%a, %d %b %Y %H:%M:%S.%f %z")
|
@@ -2,10 +2,11 @@
|
|||||||
|
|
||||||
# Simple timer to time a block of code, for optimization debugging
|
# Simple timer to time a block of code, for optimization debugging
|
||||||
# use like:
|
# use like:
|
||||||
# with nilmdb.Timer("flush"):
|
# with nilmdb.utils.Timer("flush"):
|
||||||
# foo.flush()
|
# foo.flush()
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
from __future__ import absolute_import
|
||||||
import contextlib
|
import contextlib
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
@@ -3,19 +3,16 @@
|
|||||||
from nilmdb.utils.printf import *
|
from nilmdb.utils.printf import *
|
||||||
from nilmdb.utils import datetime_tz
|
from nilmdb.utils import datetime_tz
|
||||||
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
|
|
||||||
class Timestamper(object):
|
class Timestamper(object):
|
||||||
"""A file-like object that adds timestamps to lines of an input file."""
|
"""A file-like object that adds timestamps to lines of an input file."""
|
||||||
def __init__(self, file, ts_iter):
|
def __init__(self, infile, ts_iter):
|
||||||
"""file: filename, or another file-like object
|
"""file: filename, or another file-like object
|
||||||
ts_iter: iterator that returns a timestamp string for
|
ts_iter: iterator that returns a timestamp string for
|
||||||
each line of the file"""
|
each line of the file"""
|
||||||
if isinstance(file, basestring):
|
if isinstance(infile, basestring):
|
||||||
self.file = open(file, "r")
|
self.file = open(infile, "r")
|
||||||
else:
|
else:
|
||||||
self.file = file
|
self.file = infile
|
||||||
self.ts_iter = ts_iter
|
self.ts_iter = ts_iter
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
@@ -54,7 +51,7 @@ class Timestamper(object):
|
|||||||
|
|
||||||
class TimestamperRate(Timestamper):
|
class TimestamperRate(Timestamper):
|
||||||
"""Timestamper that uses a start time and a fixed rate"""
|
"""Timestamper that uses a start time and a fixed rate"""
|
||||||
def __init__(self, file, start, rate, end = None):
|
def __init__(self, infile, start, rate, end = None):
|
||||||
"""
|
"""
|
||||||
file: file name or object
|
file: file name or object
|
||||||
|
|
||||||
@@ -76,7 +73,7 @@ class TimestamperRate(Timestamper):
|
|||||||
# Handle case where we're passed a datetime or datetime_tz object
|
# Handle case where we're passed a datetime or datetime_tz object
|
||||||
if "totimestamp" in dir(start):
|
if "totimestamp" in dir(start):
|
||||||
start = start.totimestamp()
|
start = start.totimestamp()
|
||||||
Timestamper.__init__(self, file, iterator(start, rate, end))
|
Timestamper.__init__(self, infile, iterator(start, rate, end))
|
||||||
self.start = start
|
self.start = start
|
||||||
self.rate = rate
|
self.rate = rate
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@@ -87,21 +84,21 @@ class TimestamperRate(Timestamper):
|
|||||||
|
|
||||||
class TimestamperNow(Timestamper):
|
class TimestamperNow(Timestamper):
|
||||||
"""Timestamper that uses current time"""
|
"""Timestamper that uses current time"""
|
||||||
def __init__(self, file):
|
def __init__(self, infile):
|
||||||
def iterator():
|
def iterator():
|
||||||
while True:
|
while True:
|
||||||
now = datetime_tz.datetime_tz.utcnow().totimestamp()
|
now = datetime_tz.datetime_tz.utcnow().totimestamp()
|
||||||
yield sprintf("%.6f ", now)
|
yield sprintf("%.6f ", now)
|
||||||
Timestamper.__init__(self, file, iterator())
|
Timestamper.__init__(self, infile, iterator())
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "TimestamperNow(...)"
|
return "TimestamperNow(...)"
|
||||||
|
|
||||||
class TimestamperNull(Timestamper):
|
class TimestamperNull(Timestamper):
|
||||||
"""Timestamper that adds nothing to each line"""
|
"""Timestamper that adds nothing to each line"""
|
||||||
def __init__(self, file):
|
def __init__(self, infile):
|
||||||
def iterator():
|
def iterator():
|
||||||
while True:
|
while True:
|
||||||
yield ""
|
yield ""
|
||||||
Timestamper.__init__(self, file, iterator())
|
Timestamper.__init__(self, infile, iterator())
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "TimestamperNull(...)"
|
return "TimestamperNull(...)"
|
||||||
|
@@ -1,37 +0,0 @@
|
|||||||
from __future__ import absolute_import
|
|
||||||
from urllib import quote_plus, _is_unicode
|
|
||||||
|
|
||||||
# urllib.urlencode insists on encoding Unicode as ASCII. This is based
|
|
||||||
# on that function, except we always encode it as UTF-8 instead.
|
|
||||||
|
|
||||||
def urlencode(query):
|
|
||||||
"""Encode a dictionary into a URL query string.
|
|
||||||
|
|
||||||
If any values in the query arg are sequences, each sequence
|
|
||||||
element is converted to a separate parameter.
|
|
||||||
"""
|
|
||||||
|
|
||||||
query = query.items()
|
|
||||||
|
|
||||||
l = []
|
|
||||||
for k, v in query:
|
|
||||||
k = quote_plus(str(k))
|
|
||||||
if isinstance(v, str):
|
|
||||||
v = quote_plus(v)
|
|
||||||
l.append(k + '=' + v)
|
|
||||||
elif _is_unicode(v):
|
|
||||||
v = quote_plus(v.encode("utf-8","strict"))
|
|
||||||
l.append(k + '=' + v)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
# is this a sufficient test for sequence-ness?
|
|
||||||
len(v)
|
|
||||||
except TypeError:
|
|
||||||
# not a sequence
|
|
||||||
v = quote_plus(str(v))
|
|
||||||
l.append(k + '=' + v)
|
|
||||||
else:
|
|
||||||
# loop over the sequence
|
|
||||||
for elt in v:
|
|
||||||
l.append(k + '=' + quote_plus(str(elt)))
|
|
||||||
return '&'.join(l)
|
|
@@ -1,6 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
import nilmdb
|
|
||||||
import sys
|
|
||||||
|
|
||||||
nilmdb.cmdline.Cmdline(sys.argv[1:]).run()
|
|
35
runserver.py
35
runserver.py
@@ -1,35 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
import nilmdb
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
formatter = argparse.ArgumentDefaultsHelpFormatter
|
|
||||||
parser = argparse.ArgumentParser(description='Run the NILM server',
|
|
||||||
formatter_class = formatter)
|
|
||||||
parser.add_argument('-p', '--port', help='Port number', type=int, default=12380)
|
|
||||||
parser.add_argument('-d', '--database', help='Database directory', default="db")
|
|
||||||
parser.add_argument('-y', '--yappi', help='Run with yappi profiler',
|
|
||||||
action='store_true')
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
# Start web app on a custom port
|
|
||||||
db = nilmdb.NilmDB(args.database)
|
|
||||||
server = nilmdb.Server(db, host = "127.0.0.1",
|
|
||||||
port = args.port,
|
|
||||||
embedded = False)
|
|
||||||
|
|
||||||
|
|
||||||
if args.yappi:
|
|
||||||
print "Running in yappi"
|
|
||||||
try:
|
|
||||||
import yappi
|
|
||||||
yappi.start()
|
|
||||||
server.start(blocking = True)
|
|
||||||
finally:
|
|
||||||
yappi.stop()
|
|
||||||
print "Try: yappi.print_stats(sort_type=yappi.SORTTYPE_TTOT,limit=50)"
|
|
||||||
from IPython import embed
|
|
||||||
embed()
|
|
||||||
else:
|
|
||||||
server.start(blocking = True)
|
|
||||||
db.close()
|
|
@@ -20,6 +20,7 @@ cover-erase=1
|
|||||||
stop=1
|
stop=1
|
||||||
verbosity=2
|
verbosity=2
|
||||||
tests=tests
|
tests=tests
|
||||||
|
#tests=tests/test_threadsafety.py
|
||||||
#tests=tests/test_bulkdata.py
|
#tests=tests/test_bulkdata.py
|
||||||
#tests=tests/test_mustclose.py
|
#tests=tests/test_mustclose.py
|
||||||
#tests=tests/test_lrucache.py
|
#tests=tests/test_lrucache.py
|
||||||
|
110
setup.py
110
setup.py
@@ -1,5 +1,11 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# To release a new version, tag it:
|
||||||
|
# git tag -a nilmdb-1.1 -m "Version 1.1"
|
||||||
|
# git push --tags
|
||||||
|
# Then just package it up:
|
||||||
|
# python setup.py sdist
|
||||||
|
|
||||||
# This is supposed to be using Distribute:
|
# This is supposed to be using Distribute:
|
||||||
#
|
#
|
||||||
# distutils provides a "setup" method.
|
# distutils provides a "setup" method.
|
||||||
@@ -9,32 +15,107 @@
|
|||||||
# So we don't really know if this is using the old setuptools or the
|
# So we don't really know if this is using the old setuptools or the
|
||||||
# Distribute-provided version of setuptools.
|
# Distribute-provided version of setuptools.
|
||||||
|
|
||||||
from setuptools import setup, find_packages
|
import traceback
|
||||||
from distutils.extension import Extension
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
from Cython.Build import cythonize
|
try:
|
||||||
|
from setuptools import setup, find_packages
|
||||||
|
from distutils.extension import Extension
|
||||||
|
import distutils.version
|
||||||
|
except ImportError:
|
||||||
|
traceback.print_exc()
|
||||||
|
print "Please install the prerequisites listed in README.txt"
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Versioneer manages version numbers from git tags.
|
||||||
|
# https://github.com/warner/python-versioneer
|
||||||
|
import versioneer
|
||||||
|
versioneer.versionfile_source = 'nilmdb/_version.py'
|
||||||
|
versioneer.versionfile_build = 'nilmdb/_version.py'
|
||||||
|
versioneer.tag_prefix = 'nilmdb-'
|
||||||
|
versioneer.parentdir_prefix = 'nilmdb-'
|
||||||
|
|
||||||
# Hack to workaround logging/multiprocessing issue:
|
# Hack to workaround logging/multiprocessing issue:
|
||||||
# https://groups.google.com/d/msg/nose-users/fnJ-kAUbYHQ/_UsLN786ygcJ
|
# https://groups.google.com/d/msg/nose-users/fnJ-kAUbYHQ/_UsLN786ygcJ
|
||||||
try: import multiprocessing
|
try: import multiprocessing
|
||||||
except: pass
|
except: pass
|
||||||
|
|
||||||
# Build cython modules.
|
# Use Cython if it's new enough, otherwise use preexisting C files.
|
||||||
cython_modules = cythonize("**/*.pyx")
|
cython_modules = [ 'nilmdb.server.interval',
|
||||||
|
'nilmdb.server.layout',
|
||||||
|
'nilmdb.server.rbtree' ]
|
||||||
|
try:
|
||||||
|
import Cython
|
||||||
|
from Cython.Build import cythonize
|
||||||
|
if (distutils.version.LooseVersion(Cython.__version__) <
|
||||||
|
distutils.version.LooseVersion("0.16")):
|
||||||
|
print "Cython version", Cython.__version__, "is too old; not using it."
|
||||||
|
raise ImportError()
|
||||||
|
use_cython = True
|
||||||
|
except ImportError:
|
||||||
|
use_cython = False
|
||||||
|
|
||||||
|
ext_modules = []
|
||||||
|
for modulename in cython_modules:
|
||||||
|
filename = modulename.replace('.','/')
|
||||||
|
if use_cython:
|
||||||
|
ext_modules.extend(cythonize(filename + ".pyx"))
|
||||||
|
else:
|
||||||
|
cfile = filename + ".c"
|
||||||
|
if not os.path.exists(cfile):
|
||||||
|
raise Exception("Missing source file " + cfile + ". "
|
||||||
|
"Try installing cython >= 0.16.")
|
||||||
|
ext_modules.append(Extension(modulename, [ cfile ]))
|
||||||
|
|
||||||
|
# We need a MANIFEST.in. Generate it here rather than polluting the
|
||||||
|
# repository with yet another setup-related file.
|
||||||
|
with open("MANIFEST.in", "w") as m:
|
||||||
|
m.write("""
|
||||||
|
# Root
|
||||||
|
include README.txt
|
||||||
|
include setup.cfg
|
||||||
|
include setup.py
|
||||||
|
include versioneer.py
|
||||||
|
include Makefile
|
||||||
|
include .coveragerc
|
||||||
|
include .pylintrc
|
||||||
|
|
||||||
|
# Cython files -- include source.
|
||||||
|
recursive-include nilmdb/server *.pyx *.pyxdep *.pxd
|
||||||
|
|
||||||
|
# Tests
|
||||||
|
recursive-include tests *.py
|
||||||
|
recursive-include tests/data *
|
||||||
|
include tests/test.order
|
||||||
|
|
||||||
|
# Docs
|
||||||
|
recursive-include docs Makefile *.md
|
||||||
|
""")
|
||||||
|
|
||||||
# Run setup
|
# Run setup
|
||||||
setup(name='nilmdb',
|
setup(name='nilmdb',
|
||||||
version = '1.0',
|
version = versioneer.get_version(),
|
||||||
|
cmdclass = versioneer.get_cmdclass(),
|
||||||
url = 'https://git.jim.sh/jim/lees/nilmdb.git',
|
url = 'https://git.jim.sh/jim/lees/nilmdb.git',
|
||||||
author = 'Jim Paris',
|
author = 'Jim Paris',
|
||||||
|
description = "NILM Database",
|
||||||
|
long_description = "NILM Database",
|
||||||
|
license = "Proprietary",
|
||||||
author_email = 'jim@jtan.com',
|
author_email = 'jim@jtan.com',
|
||||||
tests_require = [ 'nose',
|
tests_require = [ 'nose',
|
||||||
'coverage',
|
'coverage',
|
||||||
],
|
],
|
||||||
setup_requires = [ 'cython',
|
setup_requires = [ 'distribute',
|
||||||
],
|
],
|
||||||
install_requires = [ 'distribute',
|
install_requires = [ 'decorator',
|
||||||
'decorator',
|
'cherrypy >= 3.2',
|
||||||
|
'simplejson',
|
||||||
|
'pycurl',
|
||||||
|
'python-dateutil',
|
||||||
|
'pytz',
|
||||||
|
'psutil >= 0.3.0',
|
||||||
|
'requests >= 1.1.0, < 2.0.0',
|
||||||
],
|
],
|
||||||
packages = [ 'nilmdb',
|
packages = [ 'nilmdb',
|
||||||
'nilmdb.utils',
|
'nilmdb.utils',
|
||||||
@@ -42,7 +123,14 @@ setup(name='nilmdb',
|
|||||||
'nilmdb.server',
|
'nilmdb.server',
|
||||||
'nilmdb.client',
|
'nilmdb.client',
|
||||||
'nilmdb.cmdline',
|
'nilmdb.cmdline',
|
||||||
|
'nilmdb.scripts',
|
||||||
],
|
],
|
||||||
ext_modules = cython_modules,
|
entry_points = {
|
||||||
|
'console_scripts': [
|
||||||
|
'nilmtool = nilmdb.scripts.nilmtool:main',
|
||||||
|
'nilmdb-server = nilmdb.scripts.nilmdb_server:main',
|
||||||
|
],
|
||||||
|
},
|
||||||
|
ext_modules = ext_modules,
|
||||||
zip_safe = False,
|
zip_safe = False,
|
||||||
)
|
)
|
||||||
|
@@ -6,6 +6,9 @@ import sys
|
|||||||
import glob
|
import glob
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
# Change into parent dir
|
||||||
|
os.chdir(os.path.dirname(os.path.realpath(__file__)) + "/..")
|
||||||
|
|
||||||
class JimOrderPlugin(nose.plugins.Plugin):
|
class JimOrderPlugin(nose.plugins.Plugin):
|
||||||
"""When searching for tests and encountering a directory that
|
"""When searching for tests and encountering a directory that
|
||||||
contains a 'test.order' file, run tests listed in that file, in the
|
contains a 'test.order' file, run tests listed in that file, in the
|
@@ -1,4 +1,5 @@
|
|||||||
test_printf.py
|
test_printf.py
|
||||||
|
test_threadsafety.py
|
||||||
test_lrucache.py
|
test_lrucache.py
|
||||||
test_mustclose.py
|
test_mustclose.py
|
||||||
|
|
||||||
|
@@ -6,6 +6,7 @@ from nilmdb.utils import timestamper
|
|||||||
from nilmdb.client import ClientError, ServerError
|
from nilmdb.client import ClientError, ServerError
|
||||||
from nilmdb.utils import datetime_tz
|
from nilmdb.utils import datetime_tz
|
||||||
|
|
||||||
|
from nose.plugins.skip import SkipTest
|
||||||
from nose.tools import *
|
from nose.tools import *
|
||||||
from nose.tools import assert_raises
|
from nose.tools import assert_raises
|
||||||
import itertools
|
import itertools
|
||||||
@@ -18,10 +19,13 @@ import simplejson as json
|
|||||||
import unittest
|
import unittest
|
||||||
import warnings
|
import warnings
|
||||||
import resource
|
import resource
|
||||||
|
import time
|
||||||
|
import re
|
||||||
|
|
||||||
from testutil.helpers import *
|
from testutil.helpers import *
|
||||||
|
|
||||||
testdb = "tests/client-testdb"
|
testdb = "tests/client-testdb"
|
||||||
|
testurl = "http://localhost:32180/"
|
||||||
|
|
||||||
def setup_module():
|
def setup_module():
|
||||||
global test_server, test_db
|
global test_server, test_db
|
||||||
@@ -29,9 +33,9 @@ def setup_module():
|
|||||||
recursive_unlink(testdb)
|
recursive_unlink(testdb)
|
||||||
|
|
||||||
# Start web app on a custom port
|
# Start web app on a custom port
|
||||||
test_db = nilmdb.NilmDB(testdb, sync = False)
|
test_db = nilmdb.utils.serializer_proxy(nilmdb.NilmDB)(testdb, sync = False)
|
||||||
test_server = nilmdb.Server(test_db, host = "127.0.0.1",
|
test_server = nilmdb.Server(test_db, host = "127.0.0.1",
|
||||||
port = 12380, stoppable = False,
|
port = 32180, stoppable = False,
|
||||||
fast_shutdown = True,
|
fast_shutdown = True,
|
||||||
force_traceback = False)
|
force_traceback = False)
|
||||||
test_server.start(blocking = False)
|
test_server.start(blocking = False)
|
||||||
@@ -44,39 +48,44 @@ def teardown_module():
|
|||||||
|
|
||||||
class TestClient(object):
|
class TestClient(object):
|
||||||
|
|
||||||
def test_client_1_basic(self):
|
def test_client_01_basic(self):
|
||||||
# Test a fake host
|
# Test a fake host
|
||||||
client = nilmdb.Client(url = "http://localhost:1/")
|
client = nilmdb.Client(url = "http://localhost:1/")
|
||||||
with assert_raises(nilmdb.client.ServerError):
|
with assert_raises(nilmdb.client.ServerError):
|
||||||
client.version()
|
client.version()
|
||||||
|
client.close()
|
||||||
|
|
||||||
# Trigger same error with a PUT request
|
# Trigger same error with a PUT request
|
||||||
client = nilmdb.Client(url = "http://localhost:1/")
|
client = nilmdb.Client(url = "http://localhost:1/")
|
||||||
with assert_raises(nilmdb.client.ServerError):
|
with assert_raises(nilmdb.client.ServerError):
|
||||||
client.version()
|
client.version()
|
||||||
|
client.close()
|
||||||
|
|
||||||
# Then a fake URL on a real host
|
# Then a fake URL on a real host
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/fake/")
|
client = nilmdb.Client(url = "http://localhost:32180/fake/")
|
||||||
with assert_raises(nilmdb.client.ClientError):
|
with assert_raises(nilmdb.client.ClientError):
|
||||||
client.version()
|
client.version()
|
||||||
|
client.close()
|
||||||
|
|
||||||
# Now a real URL with no http:// prefix
|
# Now a real URL with no http:// prefix
|
||||||
client = nilmdb.Client(url = "localhost:12380")
|
client = nilmdb.Client(url = "localhost:32180")
|
||||||
version = client.version()
|
version = client.version()
|
||||||
|
client.close()
|
||||||
|
|
||||||
# Now use the real URL
|
# Now use the real URL
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
client = nilmdb.Client(url = testurl)
|
||||||
version = client.version()
|
version = client.version()
|
||||||
eq_(distutils.version.StrictVersion(version),
|
eq_(distutils.version.LooseVersion(version),
|
||||||
distutils.version.StrictVersion(test_server.version))
|
distutils.version.LooseVersion(test_server.version))
|
||||||
|
|
||||||
# Bad URLs should give 404, not 500
|
# Bad URLs should give 404, not 500
|
||||||
with assert_raises(ClientError):
|
with assert_raises(ClientError):
|
||||||
client.http.get("/stream/create")
|
client.http.get("/stream/create")
|
||||||
|
client.close()
|
||||||
|
|
||||||
def test_client_2_createlist(self):
|
def test_client_02_createlist(self):
|
||||||
# Basic stream tests, like those in test_nilmdb:test_stream
|
# Basic stream tests, like those in test_nilmdb:test_stream
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
client = nilmdb.Client(url = testurl)
|
||||||
|
|
||||||
# Database starts empty
|
# Database starts empty
|
||||||
eq_(client.stream_list(), [])
|
eq_(client.stream_list(), [])
|
||||||
@@ -90,6 +99,15 @@ class TestClient(object):
|
|||||||
with assert_raises(ClientError):
|
with assert_raises(ClientError):
|
||||||
client.stream_create("/newton/prep", "NoSuchLayout")
|
client.stream_create("/newton/prep", "NoSuchLayout")
|
||||||
|
|
||||||
|
# Bad method types
|
||||||
|
with assert_raises(ClientError):
|
||||||
|
client.http.put("/stream/list","")
|
||||||
|
# Try a bunch of times to make sure the request body is getting consumed
|
||||||
|
for x in range(10):
|
||||||
|
with assert_raises(ClientError):
|
||||||
|
client.http.post("/stream/list")
|
||||||
|
client = nilmdb.Client(url = testurl)
|
||||||
|
|
||||||
# Create three streams
|
# Create three streams
|
||||||
client.stream_create("/newton/prep", "PrepData")
|
client.stream_create("/newton/prep", "PrepData")
|
||||||
client.stream_create("/newton/raw", "RawData")
|
client.stream_create("/newton/raw", "RawData")
|
||||||
@@ -101,8 +119,10 @@ class TestClient(object):
|
|||||||
["/newton/zzz/rawnotch", "RawNotchedData"]
|
["/newton/zzz/rawnotch", "RawNotchedData"]
|
||||||
])
|
])
|
||||||
# Match just one type or one path
|
# Match just one type or one path
|
||||||
eq_(client.stream_list(layout="RawData"), [ ["/newton/raw", "RawData"] ])
|
eq_(client.stream_list(layout="RawData"),
|
||||||
eq_(client.stream_list(path="/newton/raw"), [ ["/newton/raw", "RawData"] ])
|
[ ["/newton/raw", "RawData"] ])
|
||||||
|
eq_(client.stream_list(path="/newton/raw"),
|
||||||
|
[ ["/newton/raw", "RawData"] ])
|
||||||
|
|
||||||
# Try messing with resource limits to trigger errors and get
|
# Try messing with resource limits to trigger errors and get
|
||||||
# more coverage. Here, make it so we can only create files 1
|
# more coverage. Here, make it so we can only create files 1
|
||||||
@@ -114,9 +134,10 @@ class TestClient(object):
|
|||||||
client.stream_create("/newton/hello", "RawData")
|
client.stream_create("/newton/hello", "RawData")
|
||||||
resource.setrlimit(resource.RLIMIT_FSIZE, limit)
|
resource.setrlimit(resource.RLIMIT_FSIZE, limit)
|
||||||
|
|
||||||
|
client.close()
|
||||||
|
|
||||||
def test_client_3_metadata(self):
|
def test_client_03_metadata(self):
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
client = nilmdb.Client(url = testurl)
|
||||||
|
|
||||||
# Set / get metadata
|
# Set / get metadata
|
||||||
eq_(client.stream_get_metadata("/newton/prep"), {})
|
eq_(client.stream_get_metadata("/newton/prep"), {})
|
||||||
@@ -131,9 +152,10 @@ class TestClient(object):
|
|||||||
client.stream_update_metadata("/newton/raw", meta3)
|
client.stream_update_metadata("/newton/raw", meta3)
|
||||||
eq_(client.stream_get_metadata("/newton/prep"), meta1)
|
eq_(client.stream_get_metadata("/newton/prep"), meta1)
|
||||||
eq_(client.stream_get_metadata("/newton/raw"), meta1)
|
eq_(client.stream_get_metadata("/newton/raw"), meta1)
|
||||||
eq_(client.stream_get_metadata("/newton/raw", [ "description" ] ), meta2)
|
eq_(client.stream_get_metadata("/newton/raw",
|
||||||
eq_(client.stream_get_metadata("/newton/raw", [ "description",
|
[ "description" ] ), meta2)
|
||||||
"v_scale" ] ), meta1)
|
eq_(client.stream_get_metadata("/newton/raw",
|
||||||
|
[ "description", "v_scale" ] ), meta1)
|
||||||
|
|
||||||
# missing key
|
# missing key
|
||||||
eq_(client.stream_get_metadata("/newton/raw", "descr"),
|
eq_(client.stream_get_metadata("/newton/raw", "descr"),
|
||||||
@@ -146,9 +168,14 @@ class TestClient(object):
|
|||||||
client.stream_set_metadata("/newton/prep", [1,2,3])
|
client.stream_set_metadata("/newton/prep", [1,2,3])
|
||||||
with assert_raises(ClientError):
|
with assert_raises(ClientError):
|
||||||
client.stream_update_metadata("/newton/prep", [1,2,3])
|
client.stream_update_metadata("/newton/prep", [1,2,3])
|
||||||
|
client.close()
|
||||||
|
|
||||||
def test_client_4_insert(self):
|
def test_client_04_insert(self):
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
client = nilmdb.Client(url = testurl)
|
||||||
|
|
||||||
|
# Limit _max_data to 1 MB, since our test file is 1.5 MB
|
||||||
|
old_max_data = nilmdb.client.client.StreamInserter._max_data
|
||||||
|
nilmdb.client.client.StreamInserter._max_data = 1 * 1024 * 1024
|
||||||
|
|
||||||
datetime_tz.localtz_set("America/New_York")
|
datetime_tz.localtz_set("America/New_York")
|
||||||
|
|
||||||
@@ -177,12 +204,33 @@ class TestClient(object):
|
|||||||
result = client.stream_insert("/newton/prep", data)
|
result = client.stream_insert("/newton/prep", data)
|
||||||
eq_(result, None)
|
eq_(result, None)
|
||||||
|
|
||||||
# Try forcing a server request with empty data
|
# It's OK to insert an empty interval
|
||||||
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
||||||
|
"start": 1, "end": 2 })
|
||||||
|
eq_(list(client.stream_intervals("/newton/prep")), [[1, 2]])
|
||||||
|
client.stream_remove("/newton/prep")
|
||||||
|
eq_(list(client.stream_intervals("/newton/prep")), [])
|
||||||
|
|
||||||
|
# Timestamps can be negative too
|
||||||
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
||||||
|
"start": -2, "end": -1 })
|
||||||
|
eq_(list(client.stream_intervals("/newton/prep")), [[-2, -1]])
|
||||||
|
client.stream_remove("/newton/prep")
|
||||||
|
eq_(list(client.stream_intervals("/newton/prep")), [])
|
||||||
|
|
||||||
|
# Intervals that end at zero shouldn't be any different
|
||||||
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
||||||
|
"start": -1, "end": 0 })
|
||||||
|
eq_(list(client.stream_intervals("/newton/prep")), [[-1, 0]])
|
||||||
|
client.stream_remove("/newton/prep")
|
||||||
|
eq_(list(client.stream_intervals("/newton/prep")), [])
|
||||||
|
|
||||||
|
# Try forcing a server request with equal start and end
|
||||||
with assert_raises(ClientError) as e:
|
with assert_raises(ClientError) as e:
|
||||||
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
||||||
"start": 0, "end": 0 })
|
"start": 0, "end": 0 })
|
||||||
in_("400 Bad Request", str(e.exception))
|
in_("400 Bad Request", str(e.exception))
|
||||||
in_("no data provided", str(e.exception))
|
in_("start must precede end", str(e.exception))
|
||||||
|
|
||||||
# Specify start/end (starts too late)
|
# Specify start/end (starts too late)
|
||||||
data = timestamper.TimestamperRate(testfile, start, 120)
|
data = timestamper.TimestamperRate(testfile, start, 120)
|
||||||
@@ -201,14 +249,14 @@ class TestClient(object):
|
|||||||
in_("400 Bad Request", str(e.exception))
|
in_("400 Bad Request", str(e.exception))
|
||||||
# Client chunks the input, so the exact timestamp here might change
|
# Client chunks the input, so the exact timestamp here might change
|
||||||
# if the chunk positions change.
|
# if the chunk positions change.
|
||||||
in_("Data timestamp 1332511271.016667 >= end time 1332511201.0",
|
assert(re.search("Data timestamp 13325[0-9]+\.[0-9]+ "
|
||||||
str(e.exception))
|
">= end time 1332511201.0", str(e.exception))
|
||||||
|
is not None)
|
||||||
|
|
||||||
# Now do the real load
|
# Now do the real load
|
||||||
data = timestamper.TimestamperRate(testfile, start, 120)
|
data = timestamper.TimestamperRate(testfile, start, 120)
|
||||||
result = client.stream_insert("/newton/prep", data,
|
result = client.stream_insert("/newton/prep", data,
|
||||||
start, start + 119.999777)
|
start, start + 119.999777)
|
||||||
eq_(result, "ok")
|
|
||||||
|
|
||||||
# Verify the intervals. Should be just one, even if the data
|
# Verify the intervals. Should be just one, even if the data
|
||||||
# was inserted in chunks, due to nilmdb interval concatenation.
|
# was inserted in chunks, due to nilmdb interval concatenation.
|
||||||
@@ -222,20 +270,28 @@ class TestClient(object):
|
|||||||
in_("400 Bad Request", str(e.exception))
|
in_("400 Bad Request", str(e.exception))
|
||||||
in_("verlap", str(e.exception))
|
in_("verlap", str(e.exception))
|
||||||
|
|
||||||
def test_client_5_extractremove(self):
|
nilmdb.client.client.StreamInserter._max_data = old_max_data
|
||||||
# Misc tests for extract and remove. Most of them are in test_cmdline.
|
client.close()
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
|
||||||
|
|
||||||
for x in client.stream_extract("/newton/prep", 123, 123):
|
def test_client_05_extractremove(self):
|
||||||
|
# Misc tests for extract and remove. Most of them are in test_cmdline.
|
||||||
|
client = nilmdb.Client(url = testurl)
|
||||||
|
|
||||||
|
for x in client.stream_extract("/newton/prep", 999123, 999124):
|
||||||
raise AssertionError("shouldn't be any data for this request")
|
raise AssertionError("shouldn't be any data for this request")
|
||||||
|
|
||||||
with assert_raises(ClientError) as e:
|
with assert_raises(ClientError) as e:
|
||||||
client.stream_remove("/newton/prep", 123, 120)
|
client.stream_remove("/newton/prep", 123, 120)
|
||||||
|
|
||||||
def test_client_6_generators(self):
|
# Test count
|
||||||
|
eq_(client.stream_count("/newton/prep"), 14400)
|
||||||
|
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
def test_client_06_generators(self):
|
||||||
# A lot of the client functionality is already tested by test_cmdline,
|
# A lot of the client functionality is already tested by test_cmdline,
|
||||||
# but this gets a bit more coverage that cmdline misses.
|
# but this gets a bit more coverage that cmdline misses.
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
client = nilmdb.Client(url = testurl)
|
||||||
|
|
||||||
# Trigger a client error in generator
|
# Trigger a client error in generator
|
||||||
start = datetime_tz.datetime_tz.smartparse("20120323T2000")
|
start = datetime_tz.datetime_tz.smartparse("20120323T2000")
|
||||||
@@ -246,7 +302,7 @@ class TestClient(object):
|
|||||||
start.totimestamp(),
|
start.totimestamp(),
|
||||||
end.totimestamp()).next()
|
end.totimestamp()).next()
|
||||||
in_("400 Bad Request", str(e.exception))
|
in_("400 Bad Request", str(e.exception))
|
||||||
in_("end before start", str(e.exception))
|
in_("start must precede end", str(e.exception))
|
||||||
|
|
||||||
# Trigger a curl error in generator
|
# Trigger a curl error in generator
|
||||||
with assert_raises(ServerError) as e:
|
with assert_raises(ServerError) as e:
|
||||||
@@ -256,24 +312,6 @@ class TestClient(object):
|
|||||||
with assert_raises(ServerError) as e:
|
with assert_raises(ServerError) as e:
|
||||||
client.http.get_gen("http://nosuchurl/").next()
|
client.http.get_gen("http://nosuchurl/").next()
|
||||||
|
|
||||||
# Check non-json version of string output
|
|
||||||
eq_(json.loads(client.http.get("/stream/list",retjson=False)),
|
|
||||||
client.http.get("/stream/list",retjson=True))
|
|
||||||
|
|
||||||
# Check non-json version of generator output
|
|
||||||
for (a, b) in itertools.izip(
|
|
||||||
client.http.get_gen("/stream/list",retjson=False),
|
|
||||||
client.http.get_gen("/stream/list",retjson=True)):
|
|
||||||
eq_(json.loads(a), b)
|
|
||||||
|
|
||||||
# Check PUT with generator out
|
|
||||||
with assert_raises(ClientError) as e:
|
|
||||||
client.http.put_gen("stream/insert", "",
|
|
||||||
{ "path": "/newton/prep",
|
|
||||||
"start": 0, "end": 0 }).next()
|
|
||||||
in_("400 Bad Request", str(e.exception))
|
|
||||||
in_("no data provided", str(e.exception))
|
|
||||||
|
|
||||||
# Check 404 for missing streams
|
# Check 404 for missing streams
|
||||||
for function in [ client.stream_intervals, client.stream_extract ]:
|
for function in [ client.stream_intervals, client.stream_extract ]:
|
||||||
with assert_raises(ClientError) as e:
|
with assert_raises(ClientError) as e:
|
||||||
@@ -281,48 +319,58 @@ class TestClient(object):
|
|||||||
in_("404 Not Found", str(e.exception))
|
in_("404 Not Found", str(e.exception))
|
||||||
in_("No such stream", str(e.exception))
|
in_("No such stream", str(e.exception))
|
||||||
|
|
||||||
def test_client_7_headers(self):
|
client.close()
|
||||||
|
|
||||||
|
def test_client_07_headers(self):
|
||||||
# Make sure that /stream/intervals and /stream/extract
|
# Make sure that /stream/intervals and /stream/extract
|
||||||
# properly return streaming, chunked, text/plain response.
|
# properly return streaming, chunked, text/plain response.
|
||||||
# Pokes around in client.http internals a bit to look at the
|
# Pokes around in client.http internals a bit to look at the
|
||||||
# response headers.
|
# response headers.
|
||||||
|
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
client = nilmdb.Client(url = testurl)
|
||||||
http = client.http
|
http = client.http
|
||||||
|
|
||||||
# Use a warning rather than returning a test failure, so that we can
|
# Use a warning rather than returning a test failure for the
|
||||||
# still disable chunked responses for debugging.
|
# transfer-encoding, so that we can still disable chunked
|
||||||
|
# responses for debugging.
|
||||||
|
|
||||||
|
def headers():
|
||||||
|
h = ""
|
||||||
|
for (k, v) in http._last_response.headers.items():
|
||||||
|
h += k + ": " + v + "\n"
|
||||||
|
return h.lower()
|
||||||
|
|
||||||
# Intervals
|
# Intervals
|
||||||
x = http.get("stream/intervals", { "path": "/newton/prep" },
|
x = http.get("stream/intervals", { "path": "/newton/prep" })
|
||||||
retjson=False)
|
if "transfer-encoding: chunked" not in headers():
|
||||||
lines_(x, 1)
|
|
||||||
if "Transfer-Encoding: chunked" not in http._headers:
|
|
||||||
warnings.warn("Non-chunked HTTP response for /stream/intervals")
|
warnings.warn("Non-chunked HTTP response for /stream/intervals")
|
||||||
if "Content-Type: text/plain;charset=utf-8" not in http._headers:
|
if "content-type: application/x-json-stream" not in headers():
|
||||||
raise AssertionError("/stream/intervals is not text/plain:\n" +
|
raise AssertionError("/stream/intervals content type "
|
||||||
http._headers)
|
"is not application/x-json-stream:\n" +
|
||||||
|
headers())
|
||||||
|
|
||||||
# Extract
|
# Extract
|
||||||
x = http.get("stream/extract",
|
x = http.get("stream/extract",
|
||||||
{ "path": "/newton/prep",
|
{ "path": "/newton/prep",
|
||||||
"start": "123",
|
"start": "123",
|
||||||
"end": "123" }, retjson=False)
|
"end": "124" })
|
||||||
if "Transfer-Encoding: chunked" not in http._headers:
|
if "transfer-encoding: chunked" not in headers():
|
||||||
warnings.warn("Non-chunked HTTP response for /stream/extract")
|
warnings.warn("Non-chunked HTTP response for /stream/extract")
|
||||||
if "Content-Type: text/plain;charset=utf-8" not in http._headers:
|
if "content-type: text/plain;charset=utf-8" not in headers():
|
||||||
raise AssertionError("/stream/extract is not text/plain:\n" +
|
raise AssertionError("/stream/extract is not text/plain:\n" +
|
||||||
http._headers)
|
headers())
|
||||||
|
|
||||||
# Make sure Access-Control-Allow-Origin gets set
|
# Make sure Access-Control-Allow-Origin gets set
|
||||||
if "Access-Control-Allow-Origin: " not in http._headers:
|
if "access-control-allow-origin: " not in headers():
|
||||||
raise AssertionError("No Access-Control-Allow-Origin (CORS) "
|
raise AssertionError("No Access-Control-Allow-Origin (CORS) "
|
||||||
"header in /stream/extract response:\n" +
|
"header in /stream/extract response:\n" +
|
||||||
http._headers)
|
headers())
|
||||||
|
|
||||||
def test_client_8_unicode(self):
|
client.close()
|
||||||
|
|
||||||
|
def test_client_08_unicode(self):
|
||||||
# Basic Unicode tests
|
# Basic Unicode tests
|
||||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
client = nilmdb.Client(url = testurl)
|
||||||
|
|
||||||
# Delete streams that exist
|
# Delete streams that exist
|
||||||
for stream in client.stream_list():
|
for stream in client.stream_list():
|
||||||
@@ -356,3 +404,209 @@ class TestClient(object):
|
|||||||
eq_(client.stream_get_metadata(raw[0]), meta1)
|
eq_(client.stream_get_metadata(raw[0]), meta1)
|
||||||
eq_(client.stream_get_metadata(raw[0], [ "alpha" ]), meta2)
|
eq_(client.stream_get_metadata(raw[0], [ "alpha" ]), meta2)
|
||||||
eq_(client.stream_get_metadata(raw[0], [ "alpha", "β" ]), meta1)
|
eq_(client.stream_get_metadata(raw[0], [ "alpha", "β" ]), meta1)
|
||||||
|
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
def test_client_09_closing(self):
|
||||||
|
# Make sure we actually close sockets correctly. New
|
||||||
|
# connections will block for a while if they're not, since the
|
||||||
|
# server will stop accepting new connections.
|
||||||
|
for test in [1, 2]:
|
||||||
|
start = time.time()
|
||||||
|
for i in range(50):
|
||||||
|
if time.time() - start > 15:
|
||||||
|
raise AssertionError("Connections seem to be blocking... "
|
||||||
|
"probably not closing properly.")
|
||||||
|
if test == 1:
|
||||||
|
# explicit close
|
||||||
|
client = nilmdb.Client(url = testurl)
|
||||||
|
with assert_raises(ClientError) as e:
|
||||||
|
client.stream_remove("/newton/prep", 123, 120)
|
||||||
|
client.close() # remove this to see the failure
|
||||||
|
elif test == 2:
|
||||||
|
# use the context manager
|
||||||
|
with nilmdb.Client(url = testurl) as c:
|
||||||
|
with assert_raises(ClientError) as e:
|
||||||
|
c.stream_remove("/newton/prep", 123, 120)
|
||||||
|
|
||||||
|
def test_client_10_context(self):
|
||||||
|
# Test using the client's stream insertion context manager to
|
||||||
|
# insert data.
|
||||||
|
client = nilmdb.Client(testurl)
|
||||||
|
|
||||||
|
client.stream_create("/context/test", "uint16_1")
|
||||||
|
with client.stream_insert_context("/context/test") as ctx:
|
||||||
|
# override _max_data to trigger frequent server updates
|
||||||
|
ctx._max_data = 15
|
||||||
|
|
||||||
|
with assert_raises(ValueError):
|
||||||
|
ctx.insert_line("100 1")
|
||||||
|
|
||||||
|
ctx.insert_line("100 1\n")
|
||||||
|
ctx.insert_iter([ "101 1\n",
|
||||||
|
"102 1\n",
|
||||||
|
"103 1\n" ])
|
||||||
|
ctx.insert_line("104 1\n")
|
||||||
|
ctx.insert_line("105 1\n")
|
||||||
|
ctx.finalize()
|
||||||
|
|
||||||
|
ctx.insert_line("106 1\n")
|
||||||
|
ctx.update_end(106.5)
|
||||||
|
ctx.finalize()
|
||||||
|
ctx.update_start(106.8)
|
||||||
|
ctx.insert_line("107 1\n")
|
||||||
|
ctx.insert_line("108 1\n")
|
||||||
|
ctx.insert_line("109 1\n")
|
||||||
|
ctx.insert_line("110 1\n")
|
||||||
|
ctx.insert_line("111 1\n")
|
||||||
|
ctx.update_end(113)
|
||||||
|
ctx.insert_line("112 1\n")
|
||||||
|
ctx.update_end(114)
|
||||||
|
ctx.insert_line("113 1\n")
|
||||||
|
ctx.update_end(115)
|
||||||
|
ctx.insert_line("114 1\n")
|
||||||
|
ctx.finalize()
|
||||||
|
|
||||||
|
with assert_raises(ClientError):
|
||||||
|
with client.stream_insert_context("/context/test", 100, 200) as ctx:
|
||||||
|
ctx.insert_line("115 1\n")
|
||||||
|
|
||||||
|
with assert_raises(ClientError):
|
||||||
|
with client.stream_insert_context("/context/test", 200, 300) as ctx:
|
||||||
|
ctx.insert_line("115 1\n")
|
||||||
|
|
||||||
|
with client.stream_insert_context("/context/test", 200, 300) as ctx:
|
||||||
|
# make sure our override wasn't permanent
|
||||||
|
ne_(ctx._max_data, 15)
|
||||||
|
ctx.insert_line("225 1\n")
|
||||||
|
ctx.finalize()
|
||||||
|
|
||||||
|
eq_(list(client.stream_intervals("/context/test")),
|
||||||
|
[ [ 100, 105.000001 ],
|
||||||
|
[ 106, 106.5 ],
|
||||||
|
[ 106.8, 115 ],
|
||||||
|
[ 200, 300 ] ])
|
||||||
|
|
||||||
|
client.stream_destroy("/context/test")
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
def test_client_11_emptyintervals(self):
|
||||||
|
# Empty intervals are ok! If recording detection events
|
||||||
|
# by inserting rows into the database, we want to be able to
|
||||||
|
# have an interval where no events occurred. Test them here.
|
||||||
|
client = nilmdb.Client(testurl)
|
||||||
|
client.stream_create("/empty/test", "uint16_1")
|
||||||
|
|
||||||
|
def info():
|
||||||
|
result = []
|
||||||
|
for interval in list(client.stream_intervals("/empty/test")):
|
||||||
|
result.append((client.stream_count("/empty/test", *interval),
|
||||||
|
interval))
|
||||||
|
return result
|
||||||
|
|
||||||
|
eq_(info(), [])
|
||||||
|
|
||||||
|
# Insert a region with just a few points
|
||||||
|
with client.stream_insert_context("/empty/test") as ctx:
|
||||||
|
ctx.update_start(100)
|
||||||
|
ctx.insert_line("140 1\n")
|
||||||
|
ctx.insert_line("150 1\n")
|
||||||
|
ctx.insert_line("160 1\n")
|
||||||
|
ctx.update_end(200)
|
||||||
|
ctx.finalize()
|
||||||
|
|
||||||
|
eq_(info(), [(3, [100, 200])])
|
||||||
|
|
||||||
|
# Delete chunk, which will leave one data point and two intervals
|
||||||
|
client.stream_remove("/empty/test", 145, 175)
|
||||||
|
eq_(info(), [(1, [100, 145]),
|
||||||
|
(0, [175, 200])])
|
||||||
|
|
||||||
|
# Try also creating a completely empty interval from scratch,
|
||||||
|
# in a few different ways.
|
||||||
|
client.stream_insert_block("/empty/test", "", 300, 350)
|
||||||
|
client.stream_insert("/empty/test", [], 400, 450)
|
||||||
|
with client.stream_insert_context("/empty/test", 500, 550):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# If enough timestamps aren't provided, empty streams won't be created.
|
||||||
|
client.stream_insert("/empty/test", [])
|
||||||
|
with client.stream_insert_context("/empty/test"):
|
||||||
|
pass
|
||||||
|
client.stream_insert("/empty/test", [], start = 600)
|
||||||
|
with client.stream_insert_context("/empty/test", start = 700):
|
||||||
|
pass
|
||||||
|
client.stream_insert("/empty/test", [], end = 850)
|
||||||
|
with client.stream_insert_context("/empty/test", end = 950):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Try various things that might cause problems
|
||||||
|
with client.stream_insert_context("/empty/test", 1000, 1050):
|
||||||
|
ctx.finalize() # inserts [1000, 1050]
|
||||||
|
ctx.finalize() # nothing
|
||||||
|
ctx.finalize() # nothing
|
||||||
|
ctx.insert_line("1100 1\n")
|
||||||
|
ctx.finalize() # inserts [1100, 1100.000001]
|
||||||
|
ctx.update_start(1199)
|
||||||
|
ctx.insert_line("1200 1\n")
|
||||||
|
ctx.update_end(1250)
|
||||||
|
ctx.finalize() # inserts [1199, 1250]
|
||||||
|
ctx.update_start(1299)
|
||||||
|
ctx.finalize() # nothing
|
||||||
|
ctx.update_end(1350)
|
||||||
|
ctx.finalize() # nothing
|
||||||
|
ctx.update_start(1400)
|
||||||
|
ctx.update_end(1450)
|
||||||
|
ctx.finalize()
|
||||||
|
# implicit last finalize inserts [1400, 1450]
|
||||||
|
|
||||||
|
# Check everything
|
||||||
|
eq_(info(), [(1, [100, 145]),
|
||||||
|
(0, [175, 200]),
|
||||||
|
(0, [300, 350]),
|
||||||
|
(0, [400, 450]),
|
||||||
|
(0, [500, 550]),
|
||||||
|
(0, [1000, 1050]),
|
||||||
|
(1, [1100, 1100.000001]),
|
||||||
|
(1, [1199, 1250]),
|
||||||
|
(0, [1400, 1450]),
|
||||||
|
])
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
client.stream_destroy("/empty/test")
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
def test_client_12_persistent(self):
|
||||||
|
# Check that connections are persistent when they should be.
|
||||||
|
# This is pretty hard to test; we have to poke deep into
|
||||||
|
# the Requests library.
|
||||||
|
with nilmdb.Client(url = testurl) as c:
|
||||||
|
def connections():
|
||||||
|
try:
|
||||||
|
poolmanager = c.http._last_response.connection.poolmanager
|
||||||
|
pool = poolmanager.pools[('http','localhost',32180)]
|
||||||
|
return (pool.num_connections, pool.num_requests)
|
||||||
|
except:
|
||||||
|
raise SkipTest("can't get connection info")
|
||||||
|
|
||||||
|
# First request makes a connection
|
||||||
|
c.stream_create("/persist/test", "uint16_1")
|
||||||
|
eq_(connections(), (1, 1))
|
||||||
|
|
||||||
|
# Non-generator
|
||||||
|
c.stream_list("/persist/test")
|
||||||
|
eq_(connections(), (1, 2))
|
||||||
|
c.stream_list("/persist/test")
|
||||||
|
eq_(connections(), (1, 3))
|
||||||
|
|
||||||
|
# Generators
|
||||||
|
for x in c.stream_intervals("/persist/test"):
|
||||||
|
pass
|
||||||
|
eq_(connections(), (1, 4))
|
||||||
|
for x in c.stream_intervals("/persist/test"):
|
||||||
|
pass
|
||||||
|
eq_(connections(), (1, 5))
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
c.stream_destroy("/persist/test")
|
||||||
|
eq_(connections(), (1, 6))
|
||||||
|
@@ -27,11 +27,12 @@ testdb = "tests/cmdline-testdb"
|
|||||||
def server_start(max_results = None, bulkdata_args = {}):
|
def server_start(max_results = None, bulkdata_args = {}):
|
||||||
global test_server, test_db
|
global test_server, test_db
|
||||||
# Start web app on a custom port
|
# Start web app on a custom port
|
||||||
test_db = nilmdb.NilmDB(testdb, sync = False,
|
test_db = nilmdb.utils.serializer_proxy(nilmdb.NilmDB)(
|
||||||
max_results = max_results,
|
testdb, sync = False,
|
||||||
bulkdata_args = bulkdata_args)
|
max_results = max_results,
|
||||||
|
bulkdata_args = bulkdata_args)
|
||||||
test_server = nilmdb.Server(test_db, host = "127.0.0.1",
|
test_server = nilmdb.Server(test_db, host = "127.0.0.1",
|
||||||
port = 12380, stoppable = False,
|
port = 32180, stoppable = False,
|
||||||
fast_shutdown = True,
|
fast_shutdown = True,
|
||||||
force_traceback = False)
|
force_traceback = False)
|
||||||
test_server.start(blocking = False)
|
test_server.start(blocking = False)
|
||||||
@@ -63,6 +64,7 @@ class TestCmdline(object):
|
|||||||
passing the given input. Returns a tuple with the output and
|
passing the given input. Returns a tuple with the output and
|
||||||
exit code"""
|
exit code"""
|
||||||
# printf("TZ=UTC ./nilmtool.py %s\n", arg_string)
|
# printf("TZ=UTC ./nilmtool.py %s\n", arg_string)
|
||||||
|
os.environ['NILMDB_URL'] = "http://localhost:32180/"
|
||||||
class stdio_wrapper:
|
class stdio_wrapper:
|
||||||
def __init__(self, stdin, stdout, stderr):
|
def __init__(self, stdin, stdout, stderr):
|
||||||
self.io = (stdin, stdout, stderr)
|
self.io = (stdin, stdout, stderr)
|
||||||
@@ -162,18 +164,18 @@ class TestCmdline(object):
|
|||||||
|
|
||||||
# try some URL constructions
|
# try some URL constructions
|
||||||
self.fail("--url http://nosuchurl/ info")
|
self.fail("--url http://nosuchurl/ info")
|
||||||
self.contain("Couldn't resolve host 'nosuchurl'")
|
self.contain("error connecting to server")
|
||||||
|
|
||||||
self.fail("--url nosuchurl info")
|
self.fail("--url nosuchurl info")
|
||||||
self.contain("Couldn't resolve host 'nosuchurl'")
|
self.contain("error connecting to server")
|
||||||
|
|
||||||
self.fail("-u nosuchurl/foo info")
|
self.fail("-u nosuchurl/foo info")
|
||||||
self.contain("Couldn't resolve host 'nosuchurl'")
|
self.contain("error connecting to server")
|
||||||
|
|
||||||
self.fail("-u localhost:0 info")
|
self.fail("-u localhost:1 info")
|
||||||
self.contain("couldn't connect to host")
|
self.contain("error connecting to server")
|
||||||
|
|
||||||
self.ok("-u localhost:12380 info")
|
self.ok("-u localhost:32180 info")
|
||||||
self.ok("info")
|
self.ok("info")
|
||||||
|
|
||||||
# Duplicated arguments should fail, but this isn't implemented
|
# Duplicated arguments should fail, but this isn't implemented
|
||||||
@@ -191,14 +193,46 @@ class TestCmdline(object):
|
|||||||
self.fail("extract --start 2000-01-01 --start 2001-01-02")
|
self.fail("extract --start 2000-01-01 --start 2001-01-02")
|
||||||
self.contain("duplicated argument")
|
self.contain("duplicated argument")
|
||||||
|
|
||||||
def test_02_info(self):
|
# Verify that "help command" and "command --help" are identical
|
||||||
|
# for all commands.
|
||||||
|
self.fail("")
|
||||||
|
m = re.search(r"{(.*)}", self.captured)
|
||||||
|
for command in [""] + m.group(1).split(','):
|
||||||
|
self.ok(command + " --help")
|
||||||
|
cap1 = self.captured
|
||||||
|
self.ok("help " + command)
|
||||||
|
cap2 = self.captured
|
||||||
|
self.ok("help " + command + " asdf --url --zxcv -")
|
||||||
|
cap3 = self.captured
|
||||||
|
eq_(cap1, cap2)
|
||||||
|
eq_(cap2, cap3)
|
||||||
|
|
||||||
|
def test_02_parsetime(self):
|
||||||
|
os.environ['TZ'] = "America/New_York"
|
||||||
|
test = datetime_tz.datetime_tz.now()
|
||||||
|
parse_time = nilmdb.utils.time.parse_time
|
||||||
|
eq_(parse_time(str(test)), test)
|
||||||
|
test = datetime_tz.datetime_tz.smartparse("20120405 1400-0400")
|
||||||
|
eq_(parse_time("hi there 20120405 1400-0400 testing! 123"), test)
|
||||||
|
eq_(parse_time("20120405 1800 UTC"), test)
|
||||||
|
eq_(parse_time("20120405 1400-0400 UTC"), test)
|
||||||
|
for badtime in [ "20120405 1400-9999", "hello", "-", "", "4:00" ]:
|
||||||
|
with assert_raises(ValueError):
|
||||||
|
x = parse_time(badtime)
|
||||||
|
x = parse_time("now")
|
||||||
|
eq_(parse_time("snapshot-20120405-140000.raw.gz"), test)
|
||||||
|
eq_(parse_time("prep-20120405T1400"), test)
|
||||||
|
|
||||||
|
def test_03_info(self):
|
||||||
self.ok("info")
|
self.ok("info")
|
||||||
self.contain("Server URL: http://localhost:12380/")
|
self.contain("Server URL: http://localhost:32180/")
|
||||||
|
self.contain("Client version: " + nilmdb.__version__)
|
||||||
self.contain("Server version: " + test_server.version)
|
self.contain("Server version: " + test_server.version)
|
||||||
self.contain("Server database path")
|
self.contain("Server database path")
|
||||||
self.contain("Server database size")
|
self.contain("Server database size")
|
||||||
|
self.contain("Server database free space")
|
||||||
|
|
||||||
def test_03_createlist(self):
|
def test_04_createlist(self):
|
||||||
# Basic stream tests, like those in test_client.
|
# Basic stream tests, like those in test_client.
|
||||||
|
|
||||||
# No streams
|
# No streams
|
||||||
@@ -272,9 +306,9 @@ class TestCmdline(object):
|
|||||||
|
|
||||||
# reversed range
|
# reversed range
|
||||||
self.fail("list /newton/prep --start 2020-01-01 --end 2000-01-01")
|
self.fail("list /newton/prep --start 2020-01-01 --end 2000-01-01")
|
||||||
self.contain("start is after end")
|
self.contain("start must precede end")
|
||||||
|
|
||||||
def test_04_metadata(self):
|
def test_05_metadata(self):
|
||||||
# Set / get metadata
|
# Set / get metadata
|
||||||
self.fail("metadata")
|
self.fail("metadata")
|
||||||
self.fail("metadata --get")
|
self.fail("metadata --get")
|
||||||
@@ -331,22 +365,6 @@ class TestCmdline(object):
|
|||||||
self.fail("metadata /newton/nosuchpath")
|
self.fail("metadata /newton/nosuchpath")
|
||||||
self.contain("No stream at path /newton/nosuchpath")
|
self.contain("No stream at path /newton/nosuchpath")
|
||||||
|
|
||||||
def test_05_parsetime(self):
|
|
||||||
os.environ['TZ'] = "America/New_York"
|
|
||||||
cmd = nilmdb.cmdline.Cmdline(None)
|
|
||||||
test = datetime_tz.datetime_tz.now()
|
|
||||||
eq_(cmd.parse_time(str(test)), test)
|
|
||||||
test = datetime_tz.datetime_tz.smartparse("20120405 1400-0400")
|
|
||||||
eq_(cmd.parse_time("hi there 20120405 1400-0400 testing! 123"), test)
|
|
||||||
eq_(cmd.parse_time("20120405 1800 UTC"), test)
|
|
||||||
eq_(cmd.parse_time("20120405 1400-0400 UTC"), test)
|
|
||||||
for badtime in [ "20120405 1400-9999", "hello", "-", "", "4:00" ]:
|
|
||||||
with assert_raises(ValueError):
|
|
||||||
x = cmd.parse_time(badtime)
|
|
||||||
x = cmd.parse_time("now")
|
|
||||||
eq_(cmd.parse_time("snapshot-20120405-140000.raw.gz"), test)
|
|
||||||
eq_(cmd.parse_time("prep-20120405T1400"), test)
|
|
||||||
|
|
||||||
def test_06_insert(self):
|
def test_06_insert(self):
|
||||||
self.ok("insert --help")
|
self.ok("insert --help")
|
||||||
|
|
||||||
@@ -415,7 +433,7 @@ class TestCmdline(object):
|
|||||||
# bad start time
|
# bad start time
|
||||||
self.fail("insert --rate 120 --start 'whatever' /newton/prep /dev/null")
|
self.fail("insert --rate 120 --start 'whatever' /newton/prep /dev/null")
|
||||||
|
|
||||||
def test_07_detail(self):
|
def test_07_detail_extent(self):
|
||||||
# Just count the number of lines, it's probably fine
|
# Just count the number of lines, it's probably fine
|
||||||
self.ok("list --detail")
|
self.ok("list --detail")
|
||||||
lines_(self.captured, 8)
|
lines_(self.captured, 8)
|
||||||
@@ -442,7 +460,7 @@ class TestCmdline(object):
|
|||||||
self.contain("no intervals")
|
self.contain("no intervals")
|
||||||
|
|
||||||
self.ok("list --detail --path *prep --start='23 Mar 2012 10:05:15.50'"
|
self.ok("list --detail --path *prep --start='23 Mar 2012 10:05:15.50'"
|
||||||
+ " --end='23 Mar 2012 10:05:15.50'")
|
+ " --end='23 Mar 2012 10:05:15.51'")
|
||||||
lines_(self.captured, 2)
|
lines_(self.captured, 2)
|
||||||
self.contain("10:05:15.500")
|
self.contain("10:05:15.500")
|
||||||
|
|
||||||
@@ -460,6 +478,18 @@ class TestCmdline(object):
|
|||||||
lines_(self.captured, 2)
|
lines_(self.captured, 2)
|
||||||
self.contain("[ 1332497115.612 -> 1332497159.991668 ]")
|
self.contain("[ 1332497115.612 -> 1332497159.991668 ]")
|
||||||
|
|
||||||
|
# Check --extent output
|
||||||
|
self.ok("list --extent")
|
||||||
|
lines_(self.captured, 6)
|
||||||
|
|
||||||
|
self.ok("list -E -T")
|
||||||
|
self.contain(" extent: 1332496800 -> 1332497159.991668")
|
||||||
|
self.contain(" extent: (no data)")
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
self.fail("list --extent --start='23 Mar 2012 10:05:15.50'")
|
||||||
|
self.contain("--start and --end only make sense with --detail")
|
||||||
|
|
||||||
def test_08_extract(self):
|
def test_08_extract(self):
|
||||||
# nonexistent stream
|
# nonexistent stream
|
||||||
self.fail("extract /no/such/foo --start 2000-01-01 --end 2020-01-01")
|
self.fail("extract /no/such/foo --start 2000-01-01 --end 2020-01-01")
|
||||||
@@ -471,29 +501,29 @@ class TestCmdline(object):
|
|||||||
|
|
||||||
# empty ranges return error 2
|
# empty ranges return error 2
|
||||||
self.fail("extract -a /newton/prep " +
|
self.fail("extract -a /newton/prep " +
|
||||||
"--start '23 Mar 2012 10:00:30' " +
|
"--start '23 Mar 2012 20:00:30' " +
|
||||||
"--end '23 Mar 2012 10:00:30'",
|
"--end '23 Mar 2012 20:00:31'",
|
||||||
exitcode = 2, require_error = False)
|
exitcode = 2, require_error = False)
|
||||||
self.contain("no data")
|
self.contain("no data")
|
||||||
self.fail("extract -a /newton/prep " +
|
self.fail("extract -a /newton/prep " +
|
||||||
"--start '23 Mar 2012 10:00:30.000001' " +
|
"--start '23 Mar 2012 20:00:30.000001' " +
|
||||||
"--end '23 Mar 2012 10:00:30.000001'",
|
"--end '23 Mar 2012 20:00:30.000002'",
|
||||||
exitcode = 2, require_error = False)
|
exitcode = 2, require_error = False)
|
||||||
self.contain("no data")
|
self.contain("no data")
|
||||||
self.fail("extract -a /newton/prep " +
|
self.fail("extract -a /newton/prep " +
|
||||||
"--start '23 Mar 2022 10:00:30' " +
|
"--start '23 Mar 2022 10:00:30' " +
|
||||||
"--end '23 Mar 2022 10:00:30'",
|
"--end '23 Mar 2022 10:00:31'",
|
||||||
exitcode = 2, require_error = False)
|
exitcode = 2, require_error = False)
|
||||||
self.contain("no data")
|
self.contain("no data")
|
||||||
|
|
||||||
# but are ok if we're just counting results
|
# but are ok if we're just counting results
|
||||||
self.ok("extract --count /newton/prep " +
|
self.ok("extract --count /newton/prep " +
|
||||||
"--start '23 Mar 2012 10:00:30' " +
|
"--start '23 Mar 2012 20:00:30' " +
|
||||||
"--end '23 Mar 2012 10:00:30'")
|
"--end '23 Mar 2012 20:00:31'")
|
||||||
self.match("0\n")
|
self.match("0\n")
|
||||||
self.ok("extract -c /newton/prep " +
|
self.ok("extract -c /newton/prep " +
|
||||||
"--start '23 Mar 2012 10:00:30.000001' " +
|
"--start '23 Mar 2012 20:00:30.000001' " +
|
||||||
"--end '23 Mar 2012 10:00:30.000001'")
|
"--end '23 Mar 2012 20:00:30.000002'")
|
||||||
self.match("0\n")
|
self.match("0\n")
|
||||||
|
|
||||||
# Check various dumps against stored copies of how they should appear
|
# Check various dumps against stored copies of how they should appear
|
||||||
@@ -540,31 +570,31 @@ class TestCmdline(object):
|
|||||||
self.fail("remove /no/such/foo --start 2000-01-01 --end 2020-01-01")
|
self.fail("remove /no/such/foo --start 2000-01-01 --end 2020-01-01")
|
||||||
self.contain("No stream at path")
|
self.contain("No stream at path")
|
||||||
|
|
||||||
|
# empty or backward ranges return errors
|
||||||
self.fail("remove /newton/prep --start 2020-01-01 --end 2000-01-01")
|
self.fail("remove /newton/prep --start 2020-01-01 --end 2000-01-01")
|
||||||
self.contain("start is after end")
|
self.contain("start must precede end")
|
||||||
|
|
||||||
# empty ranges return success, backwards ranges return error
|
self.fail("remove /newton/prep " +
|
||||||
self.ok("remove /newton/prep " +
|
"--start '23 Mar 2012 10:00:30' " +
|
||||||
"--start '23 Mar 2012 10:00:30' " +
|
"--end '23 Mar 2012 10:00:30'")
|
||||||
"--end '23 Mar 2012 10:00:30'")
|
self.contain("start must precede end")
|
||||||
self.match("")
|
self.fail("remove /newton/prep " +
|
||||||
self.ok("remove /newton/prep " +
|
"--start '23 Mar 2012 10:00:30.000001' " +
|
||||||
"--start '23 Mar 2012 10:00:30.000001' " +
|
"--end '23 Mar 2012 10:00:30.000001'")
|
||||||
"--end '23 Mar 2012 10:00:30.000001'")
|
self.contain("start must precede end")
|
||||||
self.match("")
|
self.fail("remove /newton/prep " +
|
||||||
self.ok("remove /newton/prep " +
|
"--start '23 Mar 2022 10:00:30' " +
|
||||||
"--start '23 Mar 2022 10:00:30' " +
|
"--end '23 Mar 2022 10:00:30'")
|
||||||
"--end '23 Mar 2022 10:00:30'")
|
self.contain("start must precede end")
|
||||||
self.match("")
|
|
||||||
|
|
||||||
# Verbose
|
# Verbose
|
||||||
self.ok("remove -c /newton/prep " +
|
self.ok("remove -c /newton/prep " +
|
||||||
"--start '23 Mar 2012 10:00:30' " +
|
"--start '23 Mar 2022 20:00:30' " +
|
||||||
"--end '23 Mar 2012 10:00:30'")
|
"--end '23 Mar 2022 20:00:31'")
|
||||||
self.match("0\n")
|
self.match("0\n")
|
||||||
self.ok("remove --count /newton/prep " +
|
self.ok("remove --count /newton/prep " +
|
||||||
"--start '23 Mar 2012 10:00:30' " +
|
"--start '23 Mar 2022 20:00:30' " +
|
||||||
"--end '23 Mar 2012 10:00:30'")
|
"--end '23 Mar 2022 20:00:31'")
|
||||||
self.match("0\n")
|
self.match("0\n")
|
||||||
|
|
||||||
# Make sure we have the data we expect
|
# Make sure we have the data we expect
|
||||||
@@ -765,7 +795,7 @@ class TestCmdline(object):
|
|||||||
"tests/data/prep-20120323T1000")
|
"tests/data/prep-20120323T1000")
|
||||||
|
|
||||||
# Should take up about 2.8 MB here (including directory entries)
|
# Should take up about 2.8 MB here (including directory entries)
|
||||||
du_before = nilmdb.utils.diskusage.du_bytes(testdb)
|
du_before = nilmdb.utils.diskusage.du(testdb)
|
||||||
|
|
||||||
# Make sure we have the data we expect
|
# Make sure we have the data we expect
|
||||||
self.ok("list --detail")
|
self.ok("list --detail")
|
||||||
@@ -815,7 +845,7 @@ class TestCmdline(object):
|
|||||||
|
|
||||||
# We have 1/8 of the data that we had before, so the file size
|
# We have 1/8 of the data that we had before, so the file size
|
||||||
# should have dropped below 1/4 of what it used to be
|
# should have dropped below 1/4 of what it used to be
|
||||||
du_after = nilmdb.utils.diskusage.du_bytes(testdb)
|
du_after = nilmdb.utils.diskusage.du(testdb)
|
||||||
lt_(du_after, (du_before / 4))
|
lt_(du_after, (du_before / 4))
|
||||||
|
|
||||||
# Remove anything that came from the 10:02 data file
|
# Remove anything that came from the 10:02 data file
|
||||||
|
@@ -55,7 +55,7 @@ class TestInterval:
|
|||||||
for x in [ "03/24/2012", "03/25/2012", "03/26/2012" ] ]
|
for x in [ "03/24/2012", "03/25/2012", "03/26/2012" ] ]
|
||||||
|
|
||||||
# basic construction
|
# basic construction
|
||||||
i = Interval(d1, d1)
|
i = Interval(d1, d2)
|
||||||
i = Interval(d1, d3)
|
i = Interval(d1, d3)
|
||||||
eq_(i.start, d1)
|
eq_(i.start, d1)
|
||||||
eq_(i.end, d3)
|
eq_(i.end, d3)
|
||||||
@@ -77,8 +77,8 @@ class TestInterval:
|
|||||||
assert(Interval(d1, d3) > Interval(d1, d2))
|
assert(Interval(d1, d3) > Interval(d1, d2))
|
||||||
assert(Interval(d1, d2) < Interval(d2, d3))
|
assert(Interval(d1, d2) < Interval(d2, d3))
|
||||||
assert(Interval(d1, d3) < Interval(d2, d3))
|
assert(Interval(d1, d3) < Interval(d2, d3))
|
||||||
assert(Interval(d2, d2) > Interval(d1, d3))
|
assert(Interval(d2, d2+0.01) > Interval(d1, d3))
|
||||||
assert(Interval(d3, d3) == Interval(d3, d3))
|
assert(Interval(d3, d3+0.01) == Interval(d3, d3+0.01))
|
||||||
#with assert_raises(TypeError): # was AttributeError, that's wrong
|
#with assert_raises(TypeError): # was AttributeError, that's wrong
|
||||||
# x = (i == 123)
|
# x = (i == 123)
|
||||||
|
|
||||||
@@ -293,7 +293,7 @@ class TestIntervalDB:
|
|||||||
# actual start, end can be a subset
|
# actual start, end can be a subset
|
||||||
a = DBInterval(150, 200, 100, 200, 10000, 20000)
|
a = DBInterval(150, 200, 100, 200, 10000, 20000)
|
||||||
b = DBInterval(100, 150, 100, 200, 10000, 20000)
|
b = DBInterval(100, 150, 100, 200, 10000, 20000)
|
||||||
c = DBInterval(150, 150, 100, 200, 10000, 20000)
|
c = DBInterval(150, 160, 100, 200, 10000, 20000)
|
||||||
|
|
||||||
# Make a set of DBIntervals
|
# Make a set of DBIntervals
|
||||||
iseta = IntervalSet([a, b])
|
iseta = IntervalSet([a, b])
|
||||||
|
@@ -246,7 +246,7 @@ class TestLayoutSpeed:
|
|||||||
parser = Parser(layout)
|
parser = Parser(layout)
|
||||||
formatter = Formatter(layout)
|
formatter = Formatter(layout)
|
||||||
parser.parse(data)
|
parser.parse(data)
|
||||||
data = formatter.format(parser.data)
|
formatter.format(parser.data)
|
||||||
elapsed = time.time() - start
|
elapsed = time.time() - start
|
||||||
printf("roundtrip %s: %d ms, %.1f μs/row, %d rows/sec\n",
|
printf("roundtrip %s: %d ms, %.1f μs/row, %d rows/sec\n",
|
||||||
layout,
|
layout,
|
||||||
@@ -264,3 +264,8 @@ class TestLayoutSpeed:
|
|||||||
return [ sprintf("%d", random.randint(0,65535))
|
return [ sprintf("%d", random.randint(0,65535))
|
||||||
for x in range(10) ]
|
for x in range(10) ]
|
||||||
do_speedtest("uint16_10", datagen)
|
do_speedtest("uint16_10", datagen)
|
||||||
|
|
||||||
|
def datagen():
|
||||||
|
return [ sprintf("%d", random.randint(0,65535))
|
||||||
|
for x in range(6) ]
|
||||||
|
do_speedtest("uint16_6", datagen)
|
||||||
|
@@ -34,6 +34,10 @@ class Bar:
|
|||||||
def __del__(self):
|
def __del__(self):
|
||||||
fprintf(err, "Deleting\n")
|
fprintf(err, "Deleting\n")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def baz(self):
|
||||||
|
fprintf(err, "Baz\n")
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
fprintf(err, "Closing\n")
|
fprintf(err, "Closing\n")
|
||||||
|
|
||||||
|
@@ -16,6 +16,8 @@ import Queue
|
|||||||
import cStringIO
|
import cStringIO
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from nilmdb.utils import serializer_proxy
|
||||||
|
|
||||||
testdb = "tests/testdb"
|
testdb = "tests/testdb"
|
||||||
|
|
||||||
#@atexit.register
|
#@atexit.register
|
||||||
@@ -93,19 +95,31 @@ class Test00Nilmdb(object): # named 00 so it runs first
|
|||||||
eq_(db.stream_get_metadata("/newton/prep"), meta1)
|
eq_(db.stream_get_metadata("/newton/prep"), meta1)
|
||||||
eq_(db.stream_get_metadata("/newton/raw"), meta1)
|
eq_(db.stream_get_metadata("/newton/raw"), meta1)
|
||||||
|
|
||||||
|
# fill in some test coverage for start >= end
|
||||||
|
with assert_raises(nilmdb.server.NilmDBError):
|
||||||
|
db.stream_remove("/newton/prep", 0, 0)
|
||||||
|
with assert_raises(nilmdb.server.NilmDBError):
|
||||||
|
db.stream_remove("/newton/prep", 1, 0)
|
||||||
|
db.stream_remove("/newton/prep", 0, 1)
|
||||||
|
|
||||||
db.close()
|
db.close()
|
||||||
|
|
||||||
class TestBlockingServer(object):
|
class TestBlockingServer(object):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.db = nilmdb.NilmDB(testdb, sync=False)
|
self.db = serializer_proxy(nilmdb.NilmDB)(testdb, sync=False)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.db.close()
|
self.db.close()
|
||||||
|
|
||||||
def test_blocking_server(self):
|
def test_blocking_server(self):
|
||||||
|
# Server should fail if the database doesn't have a "_thread_safe"
|
||||||
|
# property.
|
||||||
|
with assert_raises(KeyError):
|
||||||
|
nilmdb.Server(object())
|
||||||
|
|
||||||
# Start web app on a custom port
|
# Start web app on a custom port
|
||||||
self.server = nilmdb.Server(self.db, host = "127.0.0.1",
|
self.server = nilmdb.Server(self.db, host = "127.0.0.1",
|
||||||
port = 12380, stoppable = True)
|
port = 32180, stoppable = True)
|
||||||
|
|
||||||
# Run it
|
# Run it
|
||||||
event = threading.Event()
|
event = threading.Event()
|
||||||
@@ -117,13 +131,13 @@ class TestBlockingServer(object):
|
|||||||
raise AssertionError("server didn't start in 10 seconds")
|
raise AssertionError("server didn't start in 10 seconds")
|
||||||
|
|
||||||
# Send request to exit.
|
# Send request to exit.
|
||||||
req = urlopen("http://127.0.0.1:12380/exit/", timeout = 1)
|
req = urlopen("http://127.0.0.1:32180/exit/", timeout = 1)
|
||||||
|
|
||||||
# Wait for it
|
# Wait for it
|
||||||
thread.join()
|
thread.join()
|
||||||
|
|
||||||
def geturl(path):
|
def geturl(path):
|
||||||
req = urlopen("http://127.0.0.1:12380" + path, timeout = 10)
|
req = urlopen("http://127.0.0.1:32180" + path, timeout = 10)
|
||||||
return req.read()
|
return req.read()
|
||||||
|
|
||||||
def getjson(path):
|
def getjson(path):
|
||||||
@@ -133,9 +147,9 @@ class TestServer(object):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
# Start web app on a custom port
|
# Start web app on a custom port
|
||||||
self.db = nilmdb.NilmDB(testdb, sync=False)
|
self.db = serializer_proxy(nilmdb.NilmDB)(testdb, sync=False)
|
||||||
self.server = nilmdb.Server(self.db, host = "127.0.0.1",
|
self.server = nilmdb.Server(self.db, host = "127.0.0.1",
|
||||||
port = 12380, stoppable = False)
|
port = 32180, stoppable = False)
|
||||||
self.server.start(blocking = False)
|
self.server.start(blocking = False)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
@@ -151,8 +165,8 @@ class TestServer(object):
|
|||||||
eq_(e.exception.code, 404)
|
eq_(e.exception.code, 404)
|
||||||
|
|
||||||
# Check version
|
# Check version
|
||||||
eq_(distutils.version.StrictVersion(getjson("/version")),
|
eq_(distutils.version.LooseVersion(getjson("/version")),
|
||||||
distutils.version.StrictVersion(self.server.version))
|
distutils.version.LooseVersion(nilmdb.__version__))
|
||||||
|
|
||||||
def test_stream_list(self):
|
def test_stream_list(self):
|
||||||
# Known streams that got populated by an earlier test (test_nilmdb)
|
# Known streams that got populated by an earlier test (test_nilmdb)
|
||||||
@@ -194,12 +208,3 @@ class TestServer(object):
|
|||||||
data = getjson("/stream/get_metadata?path=/newton/prep"
|
data = getjson("/stream/get_metadata?path=/newton/prep"
|
||||||
"&key=foo")
|
"&key=foo")
|
||||||
eq_(data, {'foo': None})
|
eq_(data, {'foo': None})
|
||||||
|
|
||||||
|
|
||||||
def test_insert(self):
|
|
||||||
# GET instead of POST (no body)
|
|
||||||
# (actual POST test is done by client code)
|
|
||||||
with assert_raises(HTTPError) as e:
|
|
||||||
getjson("/stream/insert?path=/newton/prep&start=0&end=0")
|
|
||||||
eq_(e.exception.code, 400)
|
|
||||||
|
|
||||||
|
@@ -9,16 +9,28 @@ import time
|
|||||||
|
|
||||||
from testutil.helpers import *
|
from testutil.helpers import *
|
||||||
|
|
||||||
#raise nose.exc.SkipTest("Skip these")
|
|
||||||
|
|
||||||
class Foo(object):
|
class Foo(object):
|
||||||
val = 0
|
val = 0
|
||||||
|
|
||||||
|
def __init__(self, asdf = "asdf"):
|
||||||
|
self.init_thread = threading.current_thread().name
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def foo(self):
|
||||||
|
pass
|
||||||
|
|
||||||
def fail(self):
|
def fail(self):
|
||||||
raise Exception("you asked me to do this")
|
raise Exception("you asked me to do this")
|
||||||
|
|
||||||
def test(self, debug = False):
|
def test(self, debug = False):
|
||||||
|
self.tester(debug)
|
||||||
|
|
||||||
|
def t(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def tester(self, debug = False):
|
||||||
# purposely not thread-safe
|
# purposely not thread-safe
|
||||||
|
self.test_thread = threading.current_thread().name
|
||||||
oldval = self.val
|
oldval = self.val
|
||||||
newval = oldval + 1
|
newval = oldval + 1
|
||||||
time.sleep(0.05)
|
time.sleep(0.05)
|
||||||
@@ -46,27 +58,29 @@ class Base(object):
|
|||||||
t.join()
|
t.join()
|
||||||
self.verify_result()
|
self.verify_result()
|
||||||
|
|
||||||
|
def verify_result(self):
|
||||||
|
eq_(self.foo.val, 20)
|
||||||
|
eq_(self.foo.init_thread, self.foo.test_thread)
|
||||||
|
|
||||||
class TestUnserialized(Base):
|
class TestUnserialized(Base):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.foo = Foo()
|
self.foo = Foo()
|
||||||
|
|
||||||
def verify_result(self):
|
def verify_result(self):
|
||||||
# This should have failed to increment properly
|
# This should have failed to increment properly
|
||||||
assert(self.foo.val != 20)
|
ne_(self.foo.val, 20)
|
||||||
|
# Init and tests ran in different threads
|
||||||
|
ne_(self.foo.init_thread, self.foo.test_thread)
|
||||||
|
|
||||||
class TestSerialized(Base):
|
class TestSerializer(Base):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.realfoo = Foo()
|
self.foo = nilmdb.utils.serializer_proxy(Foo)("qwer")
|
||||||
self.foo = nilmdb.utils.Serializer(self.realfoo)
|
|
||||||
|
|
||||||
def tearDown(self):
|
def test_multi(self):
|
||||||
del self.foo
|
sp = nilmdb.utils.serializer_proxy
|
||||||
|
sp(Foo("x")).t()
|
||||||
def verify_result(self):
|
sp(sp(Foo)("x")).t()
|
||||||
# This should have worked
|
sp(sp(Foo))("x").t()
|
||||||
eq_(self.realfoo.val, 20)
|
sp(sp(Foo("x"))).t()
|
||||||
|
sp(sp(Foo)("x")).t()
|
||||||
def test_attribute(self):
|
sp(sp(Foo))("x").t()
|
||||||
# Can't wrap attributes yet
|
|
||||||
with assert_raises(TypeError):
|
|
||||||
self.foo.val
|
|
||||||
|
96
tests/test_threadsafety.py
Normal file
96
tests/test_threadsafety.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
import nilmdb
|
||||||
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
|
import nose
|
||||||
|
from nose.tools import *
|
||||||
|
from nose.tools import assert_raises
|
||||||
|
|
||||||
|
from testutil.helpers import *
|
||||||
|
import threading
|
||||||
|
|
||||||
|
class Thread(threading.Thread):
|
||||||
|
def __init__(self, target):
|
||||||
|
self.target = target
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
try:
|
||||||
|
self.target()
|
||||||
|
except AssertionError as e:
|
||||||
|
self.error = e
|
||||||
|
else:
|
||||||
|
self.error = None
|
||||||
|
|
||||||
|
class Test():
|
||||||
|
def __init__(self):
|
||||||
|
self.test = 1234
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def asdf(cls):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def foo(self, exception = False, reenter = False):
|
||||||
|
if exception:
|
||||||
|
raise Exception()
|
||||||
|
self.bar(reenter)
|
||||||
|
|
||||||
|
def bar(self, reenter):
|
||||||
|
if reenter:
|
||||||
|
self.foo()
|
||||||
|
return 123
|
||||||
|
|
||||||
|
def baz_threaded(self, target):
|
||||||
|
t = Thread(target)
|
||||||
|
t.start()
|
||||||
|
t.join()
|
||||||
|
return t
|
||||||
|
|
||||||
|
def baz(self, target):
|
||||||
|
target()
|
||||||
|
|
||||||
|
class TestThreadSafety(object):
|
||||||
|
def tryit(self, c, threading_ok, concurrent_ok):
|
||||||
|
eq_(c.test, 1234)
|
||||||
|
c.foo()
|
||||||
|
t = Thread(c.foo)
|
||||||
|
t.start()
|
||||||
|
t.join()
|
||||||
|
if threading_ok and t.error:
|
||||||
|
raise Exception("got unexpected error: " + str(t.error))
|
||||||
|
if not threading_ok and not t.error:
|
||||||
|
raise Exception("failed to get expected error")
|
||||||
|
try:
|
||||||
|
c.baz(c.foo)
|
||||||
|
except AssertionError as e:
|
||||||
|
if concurrent_ok:
|
||||||
|
raise Exception("got unexpected error: " + str(e))
|
||||||
|
else:
|
||||||
|
if not concurrent_ok:
|
||||||
|
raise Exception("failed to get expected error")
|
||||||
|
t = c.baz_threaded(c.foo)
|
||||||
|
if (concurrent_ok and threading_ok) and t.error:
|
||||||
|
raise Exception("got unexpected error: " + str(t.error))
|
||||||
|
if not (concurrent_ok and threading_ok) and not t.error:
|
||||||
|
raise Exception("failed to get expected error")
|
||||||
|
|
||||||
|
def test(self):
|
||||||
|
proxy = nilmdb.utils.threadsafety.verify_proxy
|
||||||
|
self.tryit(Test(), True, True)
|
||||||
|
self.tryit(proxy(Test(), True, True, True), False, False)
|
||||||
|
self.tryit(proxy(Test(), True, True, False), False, True)
|
||||||
|
self.tryit(proxy(Test(), True, False, True), True, False)
|
||||||
|
self.tryit(proxy(Test(), True, False, False), True, True)
|
||||||
|
self.tryit(proxy(Test, True, True, True)(), False, False)
|
||||||
|
self.tryit(proxy(Test, True, True, False)(), False, True)
|
||||||
|
self.tryit(proxy(Test, True, False, True)(), True, False)
|
||||||
|
self.tryit(proxy(Test, True, False, False)(), True, True)
|
||||||
|
|
||||||
|
proxy(proxy(proxy(Test))()).foo()
|
||||||
|
|
||||||
|
c = proxy(Test())
|
||||||
|
c.foo()
|
||||||
|
try:
|
||||||
|
c.foo(exception = True)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
c.foo()
|
655
versioneer.py
Normal file
655
versioneer.py
Normal file
@@ -0,0 +1,655 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
"""versioneer.py
|
||||||
|
|
||||||
|
(like a rocketeer, but for versions)
|
||||||
|
|
||||||
|
* https://github.com/warner/python-versioneer
|
||||||
|
* Brian Warner
|
||||||
|
* License: Public Domain
|
||||||
|
* Version: 0.7+
|
||||||
|
|
||||||
|
This file helps distutils-based projects manage their version number by just
|
||||||
|
creating version-control tags.
|
||||||
|
|
||||||
|
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
|
||||||
|
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
|
||||||
|
version number by asking your version-control tool about the current
|
||||||
|
checkout. The version number will be written into a generated _version.py
|
||||||
|
file of your choosing, where it can be included by your __init__.py
|
||||||
|
|
||||||
|
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
|
||||||
|
compute a version number by looking at the name of the directory created when
|
||||||
|
te tarball is unpacked. This conventionally includes both the name of the
|
||||||
|
project and a version number.
|
||||||
|
|
||||||
|
For users who work from a tarball built by 'setup.py sdist', it will get a
|
||||||
|
version number from a previously-generated _version.py file.
|
||||||
|
|
||||||
|
As a result, loading code directly from the source tree will not result in a
|
||||||
|
real version. If you want real versions from VCS trees (where you frequently
|
||||||
|
update from the upstream repository, or do new development), you will need to
|
||||||
|
do a 'setup.py version' after each update, and load code from the build/
|
||||||
|
directory.
|
||||||
|
|
||||||
|
You need to provide this code with a few configuration values:
|
||||||
|
|
||||||
|
versionfile_source:
|
||||||
|
A project-relative pathname into which the generated version strings
|
||||||
|
should be written. This is usually a _version.py next to your project's
|
||||||
|
main __init__.py file. If your project uses src/myproject/__init__.py,
|
||||||
|
this should be 'src/myproject/_version.py'. This file should be checked
|
||||||
|
in to your VCS as usual: the copy created below by 'setup.py
|
||||||
|
update_files' will include code that parses expanded VCS keywords in
|
||||||
|
generated tarballs. The 'build' and 'sdist' commands will replace it with
|
||||||
|
a copy that has just the calculated version string.
|
||||||
|
|
||||||
|
versionfile_build:
|
||||||
|
Like versionfile_source, but relative to the build directory instead of
|
||||||
|
the source directory. These will differ when your setup.py uses
|
||||||
|
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
|
||||||
|
then you will probably have versionfile_build='myproject/_version.py' and
|
||||||
|
versionfile_source='src/myproject/_version.py'.
|
||||||
|
|
||||||
|
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
|
||||||
|
VCS tags. If your tags look like 'myproject-1.2.0', then you
|
||||||
|
should use tag_prefix='myproject-'. If you use unprefixed tags
|
||||||
|
like '1.2.0', this should be an empty string.
|
||||||
|
|
||||||
|
parentdir_prefix: a string, frequently the same as tag_prefix, which
|
||||||
|
appears at the start of all unpacked tarball filenames. If
|
||||||
|
your tarball unpacks into 'myproject-1.2.0', this should
|
||||||
|
be 'myproject-'.
|
||||||
|
|
||||||
|
To use it:
|
||||||
|
|
||||||
|
1: include this file in the top level of your project
|
||||||
|
2: make the following changes to the top of your setup.py:
|
||||||
|
import versioneer
|
||||||
|
versioneer.versionfile_source = 'src/myproject/_version.py'
|
||||||
|
versioneer.versionfile_build = 'myproject/_version.py'
|
||||||
|
versioneer.tag_prefix = '' # tags are like 1.2.0
|
||||||
|
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
|
||||||
|
3: add the following arguments to the setup() call in your setup.py:
|
||||||
|
version=versioneer.get_version(),
|
||||||
|
cmdclass=versioneer.get_cmdclass(),
|
||||||
|
4: run 'setup.py update_files', which will create _version.py, and will
|
||||||
|
append the following to your __init__.py:
|
||||||
|
from _version import __version__
|
||||||
|
5: modify your MANIFEST.in to include versioneer.py
|
||||||
|
6: add both versioneer.py and the generated _version.py to your VCS
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, sys, re
|
||||||
|
from distutils.core import Command
|
||||||
|
from distutils.command.sdist import sdist as _sdist
|
||||||
|
from distutils.command.build_py import build_py as _build_py
|
||||||
|
|
||||||
|
versionfile_source = None
|
||||||
|
versionfile_build = None
|
||||||
|
tag_prefix = None
|
||||||
|
parentdir_prefix = None
|
||||||
|
|
||||||
|
VCS = "git"
|
||||||
|
IN_LONG_VERSION_PY = False
|
||||||
|
|
||||||
|
|
||||||
|
LONG_VERSION_PY = '''
|
||||||
|
IN_LONG_VERSION_PY = True
|
||||||
|
# This file helps to compute a version number in source trees obtained from
|
||||||
|
# git-archive tarball (such as those provided by githubs download-from-tag
|
||||||
|
# feature). Distribution tarballs (build by setup.py sdist) and build
|
||||||
|
# directories (produced by setup.py build) will contain a much shorter file
|
||||||
|
# that just contains the computed version number.
|
||||||
|
|
||||||
|
# This file is released into the public domain. Generated by
|
||||||
|
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
|
||||||
|
|
||||||
|
# these strings will be replaced by git during git-archive
|
||||||
|
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
|
||||||
|
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
|
||||||
|
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def run_command(args, cwd=None, verbose=False):
|
||||||
|
try:
|
||||||
|
# remember shell=False, so use git.cmd on windows, not just git
|
||||||
|
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
|
||||||
|
except EnvironmentError:
|
||||||
|
e = sys.exc_info()[1]
|
||||||
|
if verbose:
|
||||||
|
print("unable to run %%s" %% args[0])
|
||||||
|
print(e)
|
||||||
|
return None
|
||||||
|
stdout = p.communicate()[0].strip()
|
||||||
|
if sys.version >= '3':
|
||||||
|
stdout = stdout.decode()
|
||||||
|
if p.returncode != 0:
|
||||||
|
if verbose:
|
||||||
|
print("unable to run %%s (error)" %% args[0])
|
||||||
|
return None
|
||||||
|
return stdout
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
def get_expanded_variables(versionfile_source):
|
||||||
|
# the code embedded in _version.py can just fetch the value of these
|
||||||
|
# variables. When used from setup.py, we don't want to import
|
||||||
|
# _version.py, so we do it with a regexp instead. This function is not
|
||||||
|
# used from _version.py.
|
||||||
|
variables = {}
|
||||||
|
try:
|
||||||
|
for line in open(versionfile_source,"r").readlines():
|
||||||
|
if line.strip().startswith("git_refnames ="):
|
||||||
|
mo = re.search(r'=\s*"(.*)"', line)
|
||||||
|
if mo:
|
||||||
|
variables["refnames"] = mo.group(1)
|
||||||
|
if line.strip().startswith("git_full ="):
|
||||||
|
mo = re.search(r'=\s*"(.*)"', line)
|
||||||
|
if mo:
|
||||||
|
variables["full"] = mo.group(1)
|
||||||
|
except EnvironmentError:
|
||||||
|
pass
|
||||||
|
return variables
|
||||||
|
|
||||||
|
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
|
||||||
|
refnames = variables["refnames"].strip()
|
||||||
|
if refnames.startswith("$Format"):
|
||||||
|
if verbose:
|
||||||
|
print("variables are unexpanded, not using")
|
||||||
|
return {} # unexpanded, so not in an unpacked git-archive tarball
|
||||||
|
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
||||||
|
for ref in list(refs):
|
||||||
|
if not re.search(r'\d', ref):
|
||||||
|
if verbose:
|
||||||
|
print("discarding '%%s', no digits" %% ref)
|
||||||
|
refs.discard(ref)
|
||||||
|
# Assume all version tags have a digit. git's %%d expansion
|
||||||
|
# behaves like git log --decorate=short and strips out the
|
||||||
|
# refs/heads/ and refs/tags/ prefixes that would let us
|
||||||
|
# distinguish between branches and tags. By ignoring refnames
|
||||||
|
# without digits, we filter out many common branch names like
|
||||||
|
# "release" and "stabilization", as well as "HEAD" and "master".
|
||||||
|
if verbose:
|
||||||
|
print("remaining refs: %%s" %% ",".join(sorted(refs)))
|
||||||
|
for ref in sorted(refs):
|
||||||
|
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
||||||
|
if ref.startswith(tag_prefix):
|
||||||
|
r = ref[len(tag_prefix):]
|
||||||
|
if verbose:
|
||||||
|
print("picking %%s" %% r)
|
||||||
|
return { "version": r,
|
||||||
|
"full": variables["full"].strip() }
|
||||||
|
# no suitable tags, so we use the full revision id
|
||||||
|
if verbose:
|
||||||
|
print("no suitable tags, using full revision id")
|
||||||
|
return { "version": variables["full"].strip(),
|
||||||
|
"full": variables["full"].strip() }
|
||||||
|
|
||||||
|
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
|
||||||
|
# this runs 'git' from the root of the source tree. That either means
|
||||||
|
# someone ran a setup.py command (and this code is in versioneer.py, so
|
||||||
|
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
|
||||||
|
# the source tree), or someone ran a project-specific entry point (and
|
||||||
|
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
|
||||||
|
# containing directory is somewhere deeper in the source tree). This only
|
||||||
|
# gets called if the git-archive 'subst' variables were *not* expanded,
|
||||||
|
# and _version.py hasn't already been rewritten with a short version
|
||||||
|
# string, meaning we're inside a checked out source tree.
|
||||||
|
|
||||||
|
try:
|
||||||
|
here = os.path.abspath(__file__)
|
||||||
|
except NameError:
|
||||||
|
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
|
||||||
|
return {} # not always correct
|
||||||
|
|
||||||
|
# versionfile_source is the relative path from the top of the source tree
|
||||||
|
# (where the .git directory might live) to this file. Invert this to find
|
||||||
|
# the root from __file__.
|
||||||
|
root = here
|
||||||
|
if IN_LONG_VERSION_PY:
|
||||||
|
for i in range(len(versionfile_source.split("/"))):
|
||||||
|
root = os.path.dirname(root)
|
||||||
|
else:
|
||||||
|
root = os.path.dirname(here)
|
||||||
|
if not os.path.exists(os.path.join(root, ".git")):
|
||||||
|
if verbose:
|
||||||
|
print("no .git in %%s" %% root)
|
||||||
|
return {}
|
||||||
|
|
||||||
|
GIT = "git"
|
||||||
|
if sys.platform == "win32":
|
||||||
|
GIT = "git.cmd"
|
||||||
|
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
|
||||||
|
cwd=root)
|
||||||
|
if stdout is None:
|
||||||
|
return {}
|
||||||
|
if not stdout.startswith(tag_prefix):
|
||||||
|
if verbose:
|
||||||
|
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
|
||||||
|
return {}
|
||||||
|
tag = stdout[len(tag_prefix):]
|
||||||
|
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
|
||||||
|
if stdout is None:
|
||||||
|
return {}
|
||||||
|
full = stdout.strip()
|
||||||
|
if tag.endswith("-dirty"):
|
||||||
|
full += "-dirty"
|
||||||
|
return {"version": tag, "full": full}
|
||||||
|
|
||||||
|
|
||||||
|
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
|
||||||
|
if IN_LONG_VERSION_PY:
|
||||||
|
# We're running from _version.py. If it's from a source tree
|
||||||
|
# (execute-in-place), we can work upwards to find the root of the
|
||||||
|
# tree, and then check the parent directory for a version string. If
|
||||||
|
# it's in an installed application, there's no hope.
|
||||||
|
try:
|
||||||
|
here = os.path.abspath(__file__)
|
||||||
|
except NameError:
|
||||||
|
# py2exe/bbfreeze/non-CPython don't have __file__
|
||||||
|
return {} # without __file__, we have no hope
|
||||||
|
# versionfile_source is the relative path from the top of the source
|
||||||
|
# tree to _version.py. Invert this to find the root from __file__.
|
||||||
|
root = here
|
||||||
|
for i in range(len(versionfile_source.split("/"))):
|
||||||
|
root = os.path.dirname(root)
|
||||||
|
else:
|
||||||
|
# we're running from versioneer.py, which means we're running from
|
||||||
|
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
|
||||||
|
here = os.path.abspath(sys.argv[0])
|
||||||
|
root = os.path.dirname(here)
|
||||||
|
|
||||||
|
# Source tarballs conventionally unpack into a directory that includes
|
||||||
|
# both the project name and a version string.
|
||||||
|
dirname = os.path.basename(root)
|
||||||
|
if not dirname.startswith(parentdir_prefix):
|
||||||
|
if verbose:
|
||||||
|
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
|
||||||
|
(root, dirname, parentdir_prefix))
|
||||||
|
return None
|
||||||
|
return {"version": dirname[len(parentdir_prefix):], "full": ""}
|
||||||
|
|
||||||
|
tag_prefix = "%(TAG_PREFIX)s"
|
||||||
|
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
|
||||||
|
versionfile_source = "%(VERSIONFILE_SOURCE)s"
|
||||||
|
|
||||||
|
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
|
||||||
|
variables = { "refnames": git_refnames, "full": git_full }
|
||||||
|
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
|
||||||
|
verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = default
|
||||||
|
return ver
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def run_command(args, cwd=None, verbose=False):
|
||||||
|
try:
|
||||||
|
# remember shell=False, so use git.cmd on windows, not just git
|
||||||
|
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
|
||||||
|
except EnvironmentError:
|
||||||
|
e = sys.exc_info()[1]
|
||||||
|
if verbose:
|
||||||
|
print("unable to run %s" % args[0])
|
||||||
|
print(e)
|
||||||
|
return None
|
||||||
|
stdout = p.communicate()[0].strip()
|
||||||
|
if sys.version >= '3':
|
||||||
|
stdout = stdout.decode()
|
||||||
|
if p.returncode != 0:
|
||||||
|
if verbose:
|
||||||
|
print("unable to run %s (error)" % args[0])
|
||||||
|
return None
|
||||||
|
return stdout
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
def get_expanded_variables(versionfile_source):
|
||||||
|
# the code embedded in _version.py can just fetch the value of these
|
||||||
|
# variables. When used from setup.py, we don't want to import
|
||||||
|
# _version.py, so we do it with a regexp instead. This function is not
|
||||||
|
# used from _version.py.
|
||||||
|
variables = {}
|
||||||
|
try:
|
||||||
|
for line in open(versionfile_source,"r").readlines():
|
||||||
|
if line.strip().startswith("git_refnames ="):
|
||||||
|
mo = re.search(r'=\s*"(.*)"', line)
|
||||||
|
if mo:
|
||||||
|
variables["refnames"] = mo.group(1)
|
||||||
|
if line.strip().startswith("git_full ="):
|
||||||
|
mo = re.search(r'=\s*"(.*)"', line)
|
||||||
|
if mo:
|
||||||
|
variables["full"] = mo.group(1)
|
||||||
|
except EnvironmentError:
|
||||||
|
pass
|
||||||
|
return variables
|
||||||
|
|
||||||
|
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
|
||||||
|
refnames = variables["refnames"].strip()
|
||||||
|
if refnames.startswith("$Format"):
|
||||||
|
if verbose:
|
||||||
|
print("variables are unexpanded, not using")
|
||||||
|
return {} # unexpanded, so not in an unpacked git-archive tarball
|
||||||
|
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
||||||
|
for ref in list(refs):
|
||||||
|
if not re.search(r'\d', ref):
|
||||||
|
if verbose:
|
||||||
|
print("discarding '%s', no digits" % ref)
|
||||||
|
refs.discard(ref)
|
||||||
|
# Assume all version tags have a digit. git's %d expansion
|
||||||
|
# behaves like git log --decorate=short and strips out the
|
||||||
|
# refs/heads/ and refs/tags/ prefixes that would let us
|
||||||
|
# distinguish between branches and tags. By ignoring refnames
|
||||||
|
# without digits, we filter out many common branch names like
|
||||||
|
# "release" and "stabilization", as well as "HEAD" and "master".
|
||||||
|
if verbose:
|
||||||
|
print("remaining refs: %s" % ",".join(sorted(refs)))
|
||||||
|
for ref in sorted(refs):
|
||||||
|
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
||||||
|
if ref.startswith(tag_prefix):
|
||||||
|
r = ref[len(tag_prefix):]
|
||||||
|
if verbose:
|
||||||
|
print("picking %s" % r)
|
||||||
|
return { "version": r,
|
||||||
|
"full": variables["full"].strip() }
|
||||||
|
# no suitable tags, so we use the full revision id
|
||||||
|
if verbose:
|
||||||
|
print("no suitable tags, using full revision id")
|
||||||
|
return { "version": variables["full"].strip(),
|
||||||
|
"full": variables["full"].strip() }
|
||||||
|
|
||||||
|
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
|
||||||
|
# this runs 'git' from the root of the source tree. That either means
|
||||||
|
# someone ran a setup.py command (and this code is in versioneer.py, so
|
||||||
|
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
|
||||||
|
# the source tree), or someone ran a project-specific entry point (and
|
||||||
|
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
|
||||||
|
# containing directory is somewhere deeper in the source tree). This only
|
||||||
|
# gets called if the git-archive 'subst' variables were *not* expanded,
|
||||||
|
# and _version.py hasn't already been rewritten with a short version
|
||||||
|
# string, meaning we're inside a checked out source tree.
|
||||||
|
|
||||||
|
try:
|
||||||
|
here = os.path.abspath(__file__)
|
||||||
|
except NameError:
|
||||||
|
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
|
||||||
|
return {} # not always correct
|
||||||
|
|
||||||
|
# versionfile_source is the relative path from the top of the source tree
|
||||||
|
# (where the .git directory might live) to this file. Invert this to find
|
||||||
|
# the root from __file__.
|
||||||
|
root = here
|
||||||
|
if IN_LONG_VERSION_PY:
|
||||||
|
for i in range(len(versionfile_source.split("/"))):
|
||||||
|
root = os.path.dirname(root)
|
||||||
|
else:
|
||||||
|
root = os.path.dirname(here)
|
||||||
|
if not os.path.exists(os.path.join(root, ".git")):
|
||||||
|
if verbose:
|
||||||
|
print("no .git in %s" % root)
|
||||||
|
return {}
|
||||||
|
|
||||||
|
GIT = "git"
|
||||||
|
if sys.platform == "win32":
|
||||||
|
GIT = "git.cmd"
|
||||||
|
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
|
||||||
|
cwd=root)
|
||||||
|
if stdout is None:
|
||||||
|
return {}
|
||||||
|
if not stdout.startswith(tag_prefix):
|
||||||
|
if verbose:
|
||||||
|
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
|
||||||
|
return {}
|
||||||
|
tag = stdout[len(tag_prefix):]
|
||||||
|
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
|
||||||
|
if stdout is None:
|
||||||
|
return {}
|
||||||
|
full = stdout.strip()
|
||||||
|
if tag.endswith("-dirty"):
|
||||||
|
full += "-dirty"
|
||||||
|
return {"version": tag, "full": full}
|
||||||
|
|
||||||
|
|
||||||
|
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
|
||||||
|
if IN_LONG_VERSION_PY:
|
||||||
|
# We're running from _version.py. If it's from a source tree
|
||||||
|
# (execute-in-place), we can work upwards to find the root of the
|
||||||
|
# tree, and then check the parent directory for a version string. If
|
||||||
|
# it's in an installed application, there's no hope.
|
||||||
|
try:
|
||||||
|
here = os.path.abspath(__file__)
|
||||||
|
except NameError:
|
||||||
|
# py2exe/bbfreeze/non-CPython don't have __file__
|
||||||
|
return {} # without __file__, we have no hope
|
||||||
|
# versionfile_source is the relative path from the top of the source
|
||||||
|
# tree to _version.py. Invert this to find the root from __file__.
|
||||||
|
root = here
|
||||||
|
for i in range(len(versionfile_source.split("/"))):
|
||||||
|
root = os.path.dirname(root)
|
||||||
|
else:
|
||||||
|
# we're running from versioneer.py, which means we're running from
|
||||||
|
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
|
||||||
|
here = os.path.abspath(sys.argv[0])
|
||||||
|
root = os.path.dirname(here)
|
||||||
|
|
||||||
|
# Source tarballs conventionally unpack into a directory that includes
|
||||||
|
# both the project name and a version string.
|
||||||
|
dirname = os.path.basename(root)
|
||||||
|
if not dirname.startswith(parentdir_prefix):
|
||||||
|
if verbose:
|
||||||
|
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
|
||||||
|
(root, dirname, parentdir_prefix))
|
||||||
|
return None
|
||||||
|
return {"version": dirname[len(parentdir_prefix):], "full": ""}
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def do_vcs_install(versionfile_source, ipy):
|
||||||
|
GIT = "git"
|
||||||
|
if sys.platform == "win32":
|
||||||
|
GIT = "git.cmd"
|
||||||
|
run_command([GIT, "add", "versioneer.py"])
|
||||||
|
run_command([GIT, "add", versionfile_source])
|
||||||
|
run_command([GIT, "add", ipy])
|
||||||
|
present = False
|
||||||
|
try:
|
||||||
|
f = open(".gitattributes", "r")
|
||||||
|
for line in f.readlines():
|
||||||
|
if line.strip().startswith(versionfile_source):
|
||||||
|
if "export-subst" in line.strip().split()[1:]:
|
||||||
|
present = True
|
||||||
|
f.close()
|
||||||
|
except EnvironmentError:
|
||||||
|
pass
|
||||||
|
if not present:
|
||||||
|
f = open(".gitattributes", "a+")
|
||||||
|
f.write("%s export-subst\n" % versionfile_source)
|
||||||
|
f.close()
|
||||||
|
run_command([GIT, "add", ".gitattributes"])
|
||||||
|
|
||||||
|
|
||||||
|
SHORT_VERSION_PY = """
|
||||||
|
# This file was generated by 'versioneer.py' (0.7+) from
|
||||||
|
# revision-control system data, or from the parent directory name of an
|
||||||
|
# unpacked source archive. Distribution tarballs contain a pre-generated copy
|
||||||
|
# of this file.
|
||||||
|
|
||||||
|
version_version = '%(version)s'
|
||||||
|
version_full = '%(full)s'
|
||||||
|
def get_versions(default={}, verbose=False):
|
||||||
|
return {'version': version_version, 'full': version_full}
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
DEFAULT = {"version": "unknown", "full": "unknown"}
|
||||||
|
|
||||||
|
def versions_from_file(filename):
|
||||||
|
versions = {}
|
||||||
|
try:
|
||||||
|
f = open(filename)
|
||||||
|
except EnvironmentError:
|
||||||
|
return versions
|
||||||
|
for line in f.readlines():
|
||||||
|
mo = re.match("version_version = '([^']+)'", line)
|
||||||
|
if mo:
|
||||||
|
versions["version"] = mo.group(1)
|
||||||
|
mo = re.match("version_full = '([^']+)'", line)
|
||||||
|
if mo:
|
||||||
|
versions["full"] = mo.group(1)
|
||||||
|
return versions
|
||||||
|
|
||||||
|
def write_to_version_file(filename, versions):
|
||||||
|
f = open(filename, "w")
|
||||||
|
f.write(SHORT_VERSION_PY % versions)
|
||||||
|
f.close()
|
||||||
|
print("set %s to '%s'" % (filename, versions["version"]))
|
||||||
|
|
||||||
|
|
||||||
|
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
|
||||||
|
default=DEFAULT, verbose=False):
|
||||||
|
# returns dict with two keys: 'version' and 'full'
|
||||||
|
#
|
||||||
|
# extract version from first of _version.py, 'git describe', parentdir.
|
||||||
|
# This is meant to work for developers using a source checkout, for users
|
||||||
|
# of a tarball created by 'setup.py sdist', and for users of a
|
||||||
|
# tarball/zipball created by 'git archive' or github's download-from-tag
|
||||||
|
# feature.
|
||||||
|
|
||||||
|
variables = get_expanded_variables(versionfile_source)
|
||||||
|
if variables:
|
||||||
|
ver = versions_from_expanded_variables(variables, tag_prefix)
|
||||||
|
if ver:
|
||||||
|
if verbose: print("got version from expanded variable %s" % ver)
|
||||||
|
return ver
|
||||||
|
|
||||||
|
ver = versions_from_file(versionfile)
|
||||||
|
if ver:
|
||||||
|
if verbose: print("got version from file %s %s" % (versionfile, ver))
|
||||||
|
return ver
|
||||||
|
|
||||||
|
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
|
||||||
|
if ver:
|
||||||
|
if verbose: print("got version from git %s" % ver)
|
||||||
|
return ver
|
||||||
|
|
||||||
|
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
|
||||||
|
if ver:
|
||||||
|
if verbose: print("got version from parentdir %s" % ver)
|
||||||
|
return ver
|
||||||
|
|
||||||
|
if verbose: print("got version from default %s" % ver)
|
||||||
|
return default
|
||||||
|
|
||||||
|
def get_versions(default=DEFAULT, verbose=False):
|
||||||
|
assert versionfile_source is not None, "please set versioneer.versionfile_source"
|
||||||
|
assert tag_prefix is not None, "please set versioneer.tag_prefix"
|
||||||
|
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
|
||||||
|
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
|
||||||
|
default=default, verbose=verbose)
|
||||||
|
def get_version(verbose=False):
|
||||||
|
return get_versions(verbose=verbose)["version"]
|
||||||
|
|
||||||
|
class cmd_version(Command):
|
||||||
|
description = "report generated version string"
|
||||||
|
user_options = []
|
||||||
|
boolean_options = []
|
||||||
|
def initialize_options(self):
|
||||||
|
pass
|
||||||
|
def finalize_options(self):
|
||||||
|
pass
|
||||||
|
def run(self):
|
||||||
|
ver = get_version(verbose=True)
|
||||||
|
print("Version is currently: %s" % ver)
|
||||||
|
|
||||||
|
class cmd_build_py(_build_py):
|
||||||
|
def run(self):
|
||||||
|
versions = get_versions(verbose=True)
|
||||||
|
_build_py.run(self)
|
||||||
|
# now locate _version.py in the new build/ directory and replace it
|
||||||
|
# with an updated value
|
||||||
|
target_versionfile = os.path.join(self.build_lib, versionfile_build)
|
||||||
|
print("UPDATING %s" % target_versionfile)
|
||||||
|
os.unlink(target_versionfile)
|
||||||
|
f = open(target_versionfile, "w")
|
||||||
|
f.write(SHORT_VERSION_PY % versions)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
class cmd_sdist(_sdist):
|
||||||
|
def run(self):
|
||||||
|
versions = get_versions(verbose=True)
|
||||||
|
self._versioneer_generated_versions = versions
|
||||||
|
# unless we update this, the command will keep using the old version
|
||||||
|
self.distribution.metadata.version = versions["version"]
|
||||||
|
return _sdist.run(self)
|
||||||
|
|
||||||
|
def make_release_tree(self, base_dir, files):
|
||||||
|
_sdist.make_release_tree(self, base_dir, files)
|
||||||
|
# now locate _version.py in the new base_dir directory (remembering
|
||||||
|
# that it may be a hardlink) and replace it with an updated value
|
||||||
|
target_versionfile = os.path.join(base_dir, versionfile_source)
|
||||||
|
print("UPDATING %s" % target_versionfile)
|
||||||
|
os.unlink(target_versionfile)
|
||||||
|
f = open(target_versionfile, "w")
|
||||||
|
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
INIT_PY_SNIPPET = """
|
||||||
|
from ._version import get_versions
|
||||||
|
__version__ = get_versions()['version']
|
||||||
|
del get_versions
|
||||||
|
"""
|
||||||
|
|
||||||
|
class cmd_update_files(Command):
|
||||||
|
description = "modify __init__.py and create _version.py"
|
||||||
|
user_options = []
|
||||||
|
boolean_options = []
|
||||||
|
def initialize_options(self):
|
||||||
|
pass
|
||||||
|
def finalize_options(self):
|
||||||
|
pass
|
||||||
|
def run(self):
|
||||||
|
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
|
||||||
|
print(" creating %s" % versionfile_source)
|
||||||
|
f = open(versionfile_source, "w")
|
||||||
|
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
|
||||||
|
"TAG_PREFIX": tag_prefix,
|
||||||
|
"PARENTDIR_PREFIX": parentdir_prefix,
|
||||||
|
"VERSIONFILE_SOURCE": versionfile_source,
|
||||||
|
})
|
||||||
|
f.close()
|
||||||
|
try:
|
||||||
|
old = open(ipy, "r").read()
|
||||||
|
except EnvironmentError:
|
||||||
|
old = ""
|
||||||
|
if INIT_PY_SNIPPET not in old:
|
||||||
|
print(" appending to %s" % ipy)
|
||||||
|
f = open(ipy, "a")
|
||||||
|
f.write(INIT_PY_SNIPPET)
|
||||||
|
f.close()
|
||||||
|
else:
|
||||||
|
print(" %s unmodified" % ipy)
|
||||||
|
do_vcs_install(versionfile_source, ipy)
|
||||||
|
|
||||||
|
def get_cmdclass():
|
||||||
|
return {'version': cmd_version,
|
||||||
|
'update_files': cmd_update_files,
|
||||||
|
'build_py': cmd_build_py,
|
||||||
|
'sdist': cmd_sdist,
|
||||||
|
}
|
Reference in New Issue
Block a user