Compare commits
42 Commits
nilmdb-0.2
...
nilmdb-1.1
Author | SHA1 | Date | |
---|---|---|---|
5dce851bef | |||
1431e41d16 | |||
a49c655816 | |||
30e3ffc0e9 | |||
db7211c3a9 | |||
c6d57cf5c3 | |||
ca5253ddee | |||
e19da84b2e | |||
3e8e3542fd | |||
2f7365412d | |||
bba9ad131e | |||
ee24380d1f | |||
bfcd91acf8 | |||
d97291d4d3 | |||
a61fbbcf45 | |||
5adc8fd0a7 | |||
251a486c28 | |||
1edb96a0bd | |||
52e674a192 | |||
e241c13bf1 | |||
b53ff31212 | |||
2045e89f24 | |||
841b2dab5c | |||
d634f7d3cf | |||
1593e181a3 | |||
8e781506de | |||
f6a2c7620a | |||
6c30e5ab2f | |||
810eac4e61 | |||
d9bb3ab7ab | |||
21d0e90bd9 | |||
f071d749ce | |||
d95c354595 | |||
9bcd8183f6 | |||
5c531d8273 | |||
3fe3e2ca95 | |||
f01e781469 | |||
e6180a5a81 | |||
a9d31b46ed | |||
b01f23ed99 | |||
842bf21411 | |||
750d9e3c38 |
@@ -7,4 +7,4 @@
|
||||
exclude_lines =
|
||||
pragma: no cover
|
||||
if 0:
|
||||
omit = nilmdb/utils/datetime_tz*
|
||||
omit = nilmdb/utils/datetime_tz*,nilmdb/scripts,nilmdb/_version.py
|
||||
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
nilmdb/_version.py export-subst
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -18,6 +18,10 @@ nilmdb/server/rbtree.so
|
||||
dist/
|
||||
nilmdb.egg-info/
|
||||
|
||||
# This gets generated as needed by setup.py
|
||||
MANIFEST.in
|
||||
MANIFEST
|
||||
|
||||
# Misc
|
||||
timeit*out
|
||||
|
||||
|
250
.pylintrc
Normal file
250
.pylintrc
Normal file
@@ -0,0 +1,250 @@
|
||||
# -*- conf -*-
|
||||
[MASTER]
|
||||
|
||||
# Specify a configuration file.
|
||||
#rcfile=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Profiled execution.
|
||||
profile=no
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=datetime_tz
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=no
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time.
|
||||
#enable=
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once).
|
||||
disable=C0111,R0903,R0201,R0914,R0912,W0142,W0703,W0702
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
# (visual studio) and html
|
||||
output-format=parseable
|
||||
|
||||
# Include message's id in output
|
||||
include-ids=yes
|
||||
|
||||
# Put messages in a separate file for each module / package specified on the
|
||||
# command line instead of printing them on stdout. Reports (if any) will be
|
||||
# written in a file name "pylint_global.[txt|html]".
|
||||
files-output=no
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=yes
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
# Add a comment according to your evaluation note. This is used by the global
|
||||
# evaluation report (RP0004).
|
||||
comment=no
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# List of classes names for which member attributes should not be checked
|
||||
# (useful for classes with attributes dynamically set).
|
||||
ignored-classes=SQLObject
|
||||
|
||||
# When zope mode is activated, add a predefined set of Zope acquired attributes
|
||||
# to generated-members.
|
||||
zope=no
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E0201 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=REQUEST,acl_users,aq_parent
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=80
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1000
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,XXX,TODO
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# A regular expression matching the beginning of the name of dummy variables
|
||||
# (i.e. not used).
|
||||
dummy-variables-rgx=_|dummy
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Required attributes for module, separated by a comma
|
||||
required-attributes=
|
||||
|
||||
# List of builtins function names that should not be used, separated by a comma
|
||||
bad-functions=apply,input
|
||||
|
||||
# Regular expression which should only match correct module names
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Regular expression which should only match correct module level names
|
||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|version)$
|
||||
|
||||
# Regular expression which should only match correct class names
|
||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Regular expression which should only match correct function names
|
||||
function-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||
|
||||
# Regular expression which should only match correct method names
|
||||
method-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||
|
||||
# Regular expression which should only match correct instance attribute names
|
||||
attr-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||
|
||||
# Regular expression which should only match correct argument names
|
||||
argument-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||
|
||||
# Regular expression which should only match correct variable names
|
||||
variable-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||
|
||||
# Regular expression which should only match correct list comprehension /
|
||||
# generator expression variable names
|
||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
good-names=i,j,k,ex,Run,_
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=foo,bar,baz,toto,tutu,tata
|
||||
|
||||
# Regular expression which should only match functions or classes name which do
|
||||
# not require a docstring
|
||||
no-docstring-rgx=__.*__
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# List of interface methods to ignore, separated by a comma. This is used for
|
||||
# instance to not check methods defines in Zope's Interface base class.
|
||||
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=5
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branchs=12
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
28
Makefile
28
Makefile
@@ -1,12 +1,36 @@
|
||||
# By default, run the tests.
|
||||
all: test
|
||||
|
||||
version:
|
||||
python setup.py version
|
||||
|
||||
build:
|
||||
python setup.py build_ext --inplace
|
||||
|
||||
dist: sdist
|
||||
sdist:
|
||||
python setup.py sdist
|
||||
|
||||
install:
|
||||
python setup.py install
|
||||
|
||||
docs:
|
||||
make -C docs
|
||||
|
||||
lint:
|
||||
pylint -f parseable nilmdb
|
||||
pylint --rcfile=.pylintrc nilmdb
|
||||
|
||||
test:
|
||||
python runtests.py
|
||||
python tests/runtests.py
|
||||
|
||||
clean::
|
||||
find . -name '*pyc' | xargs rm -f
|
||||
rm -f .coverage
|
||||
rm -rf tests/*testdb*
|
||||
rm -rf nilmdb.egg-info/ build/ nilmdb/server/*.so MANIFEST.in
|
||||
make -C docs clean
|
||||
|
||||
gitclean::
|
||||
git clean -dXf
|
||||
|
||||
.PHONY: all build dist sdist install docs lint test clean
|
||||
|
14
README.txt
14
README.txt
@@ -3,8 +3,20 @@ by Jim Paris <jim@jtan.com>
|
||||
|
||||
Prerequisites:
|
||||
|
||||
sudo apt-get install python2.7 python-cherrypy3 python-decorator python-nose python-coverage python-setuptools
|
||||
# Runtime and build environments
|
||||
sudo apt-get install python2.7 python2.7-dev python-setuptools cython
|
||||
|
||||
# Base NilmDB dependencies
|
||||
sudo apt-get install python-cherrypy3 python-decorator python-simplejson python-pycurl python-dateutil python-tz python-psutil
|
||||
|
||||
# Tools for running tests
|
||||
sudo apt-get install python-nose python-coverage
|
||||
|
||||
Install:
|
||||
|
||||
python setup.py install
|
||||
|
||||
Usage:
|
||||
|
||||
nilmdb-server --help
|
||||
nilmtool --help
|
||||
|
@@ -1,4 +1,8 @@
|
||||
"""Main NilmDB import"""
|
||||
|
||||
from server import NilmDB, Server
|
||||
from client import Client
|
||||
from nilmdb.server import NilmDB, Server
|
||||
from nilmdb.client import Client
|
||||
|
||||
from nilmdb._version import get_versions
|
||||
__version__ = get_versions()['version']
|
||||
del get_versions
|
||||
|
197
nilmdb/_version.py
Normal file
197
nilmdb/_version.py
Normal file
@@ -0,0 +1,197 @@
|
||||
|
||||
IN_LONG_VERSION_PY = True
|
||||
# This file helps to compute a version number in source trees obtained from
|
||||
# git-archive tarball (such as those provided by githubs download-from-tag
|
||||
# feature). Distribution tarballs (build by setup.py sdist) and build
|
||||
# directories (produced by setup.py build) will contain a much shorter file
|
||||
# that just contains the computed version number.
|
||||
|
||||
# This file is released into the public domain. Generated by
|
||||
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
|
||||
|
||||
# these strings will be replaced by git during git-archive
|
||||
git_refnames = "$Format:%d$"
|
||||
git_full = "$Format:%H$"
|
||||
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def run_command(args, cwd=None, verbose=False):
|
||||
try:
|
||||
# remember shell=False, so use git.cmd on windows, not just git
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
|
||||
except EnvironmentError:
|
||||
e = sys.exc_info()[1]
|
||||
if verbose:
|
||||
print("unable to run %s" % args[0])
|
||||
print(e)
|
||||
return None
|
||||
stdout = p.communicate()[0].strip()
|
||||
if sys.version >= '3':
|
||||
stdout = stdout.decode()
|
||||
if p.returncode != 0:
|
||||
if verbose:
|
||||
print("unable to run %s (error)" % args[0])
|
||||
return None
|
||||
return stdout
|
||||
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os.path
|
||||
|
||||
def get_expanded_variables(versionfile_source):
|
||||
# the code embedded in _version.py can just fetch the value of these
|
||||
# variables. When used from setup.py, we don't want to import
|
||||
# _version.py, so we do it with a regexp instead. This function is not
|
||||
# used from _version.py.
|
||||
variables = {}
|
||||
try:
|
||||
for line in open(versionfile_source,"r").readlines():
|
||||
if line.strip().startswith("git_refnames ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
variables["refnames"] = mo.group(1)
|
||||
if line.strip().startswith("git_full ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
variables["full"] = mo.group(1)
|
||||
except EnvironmentError:
|
||||
pass
|
||||
return variables
|
||||
|
||||
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
|
||||
refnames = variables["refnames"].strip()
|
||||
if refnames.startswith("$Format"):
|
||||
if verbose:
|
||||
print("variables are unexpanded, not using")
|
||||
return {} # unexpanded, so not in an unpacked git-archive tarball
|
||||
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
||||
for ref in list(refs):
|
||||
if not re.search(r'\d', ref):
|
||||
if verbose:
|
||||
print("discarding '%s', no digits" % ref)
|
||||
refs.discard(ref)
|
||||
# Assume all version tags have a digit. git's %d expansion
|
||||
# behaves like git log --decorate=short and strips out the
|
||||
# refs/heads/ and refs/tags/ prefixes that would let us
|
||||
# distinguish between branches and tags. By ignoring refnames
|
||||
# without digits, we filter out many common branch names like
|
||||
# "release" and "stabilization", as well as "HEAD" and "master".
|
||||
if verbose:
|
||||
print("remaining refs: %s" % ",".join(sorted(refs)))
|
||||
for ref in sorted(refs):
|
||||
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
||||
if ref.startswith(tag_prefix):
|
||||
r = ref[len(tag_prefix):]
|
||||
if verbose:
|
||||
print("picking %s" % r)
|
||||
return { "version": r,
|
||||
"full": variables["full"].strip() }
|
||||
# no suitable tags, so we use the full revision id
|
||||
if verbose:
|
||||
print("no suitable tags, using full revision id")
|
||||
return { "version": variables["full"].strip(),
|
||||
"full": variables["full"].strip() }
|
||||
|
||||
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
|
||||
# this runs 'git' from the root of the source tree. That either means
|
||||
# someone ran a setup.py command (and this code is in versioneer.py, so
|
||||
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
|
||||
# the source tree), or someone ran a project-specific entry point (and
|
||||
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
|
||||
# containing directory is somewhere deeper in the source tree). This only
|
||||
# gets called if the git-archive 'subst' variables were *not* expanded,
|
||||
# and _version.py hasn't already been rewritten with a short version
|
||||
# string, meaning we're inside a checked out source tree.
|
||||
|
||||
try:
|
||||
here = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
|
||||
return {} # not always correct
|
||||
|
||||
# versionfile_source is the relative path from the top of the source tree
|
||||
# (where the .git directory might live) to this file. Invert this to find
|
||||
# the root from __file__.
|
||||
root = here
|
||||
if IN_LONG_VERSION_PY:
|
||||
for i in range(len(versionfile_source.split("/"))):
|
||||
root = os.path.dirname(root)
|
||||
else:
|
||||
root = os.path.dirname(here)
|
||||
if not os.path.exists(os.path.join(root, ".git")):
|
||||
if verbose:
|
||||
print("no .git in %s" % root)
|
||||
return {}
|
||||
|
||||
GIT = "git"
|
||||
if sys.platform == "win32":
|
||||
GIT = "git.cmd"
|
||||
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
|
||||
cwd=root)
|
||||
if stdout is None:
|
||||
return {}
|
||||
if not stdout.startswith(tag_prefix):
|
||||
if verbose:
|
||||
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
|
||||
return {}
|
||||
tag = stdout[len(tag_prefix):]
|
||||
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
|
||||
if stdout is None:
|
||||
return {}
|
||||
full = stdout.strip()
|
||||
if tag.endswith("-dirty"):
|
||||
full += "-dirty"
|
||||
return {"version": tag, "full": full}
|
||||
|
||||
|
||||
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
|
||||
if IN_LONG_VERSION_PY:
|
||||
# We're running from _version.py. If it's from a source tree
|
||||
# (execute-in-place), we can work upwards to find the root of the
|
||||
# tree, and then check the parent directory for a version string. If
|
||||
# it's in an installed application, there's no hope.
|
||||
try:
|
||||
here = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
# py2exe/bbfreeze/non-CPython don't have __file__
|
||||
return {} # without __file__, we have no hope
|
||||
# versionfile_source is the relative path from the top of the source
|
||||
# tree to _version.py. Invert this to find the root from __file__.
|
||||
root = here
|
||||
for i in range(len(versionfile_source.split("/"))):
|
||||
root = os.path.dirname(root)
|
||||
else:
|
||||
# we're running from versioneer.py, which means we're running from
|
||||
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
|
||||
here = os.path.abspath(sys.argv[0])
|
||||
root = os.path.dirname(here)
|
||||
|
||||
# Source tarballs conventionally unpack into a directory that includes
|
||||
# both the project name and a version string.
|
||||
dirname = os.path.basename(root)
|
||||
if not dirname.startswith(parentdir_prefix):
|
||||
if verbose:
|
||||
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
|
||||
(root, dirname, parentdir_prefix))
|
||||
return None
|
||||
return {"version": dirname[len(parentdir_prefix):], "full": ""}
|
||||
|
||||
tag_prefix = "nilmdb-"
|
||||
parentdir_prefix = "nilmdb-"
|
||||
versionfile_source = "nilmdb/_version.py"
|
||||
|
||||
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
|
||||
variables = { "refnames": git_refnames, "full": git_full }
|
||||
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
|
||||
if not ver:
|
||||
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
|
||||
if not ver:
|
||||
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
|
||||
verbose)
|
||||
if not ver:
|
||||
ver = default
|
||||
return ver
|
||||
|
@@ -1,4 +1,4 @@
|
||||
"""nilmdb.client"""
|
||||
|
||||
from .client import Client
|
||||
from .errors import *
|
||||
from nilmdb.client.client import Client
|
||||
from nilmdb.client.errors import ClientError, ServerError, Error
|
||||
|
@@ -5,34 +5,38 @@
|
||||
import nilmdb
|
||||
import nilmdb.utils
|
||||
import nilmdb.client.httpclient
|
||||
from nilmdb.utils.printf import *
|
||||
|
||||
import time
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
import simplejson as json
|
||||
import itertools
|
||||
|
||||
version = "1.0"
|
||||
import contextlib
|
||||
|
||||
def float_to_string(f):
|
||||
# Use repr to maintain full precision in the string output.
|
||||
"""Use repr to maintain full precision in the string output."""
|
||||
return repr(float(f))
|
||||
|
||||
def extract_timestamp(line):
|
||||
"""Extract just the timestamp from a line of data text"""
|
||||
return float(line.split()[0])
|
||||
|
||||
class Client(object):
|
||||
"""Main client interface to the Nilm database."""
|
||||
|
||||
client_version = version
|
||||
|
||||
def __init__(self, url):
|
||||
self.http = nilmdb.client.httpclient.HTTPClient(url)
|
||||
|
||||
# __enter__/__exit__ allow this class to be a context manager
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
|
||||
def _json_param(self, data):
|
||||
"""Return compact json-encoded version of parameter"""
|
||||
return json.dumps(data, separators=(',',':'))
|
||||
|
||||
def close(self):
|
||||
"""Close the connection; safe to call multiple times"""
|
||||
self.http.close()
|
||||
|
||||
def geturl(self):
|
||||
@@ -43,13 +47,10 @@ class Client(object):
|
||||
"""Return server version"""
|
||||
return self.http.get("version")
|
||||
|
||||
def dbpath(self):
|
||||
"""Return server database path"""
|
||||
return self.http.get("dbpath")
|
||||
|
||||
def dbsize(self):
|
||||
"""Return server database size as human readable string"""
|
||||
return self.http.get("dbsize")
|
||||
def dbinfo(self):
|
||||
"""Return server database info (path, size, free space)
|
||||
as a dictionary."""
|
||||
return self.http.get("dbinfo")
|
||||
|
||||
def stream_list(self, path = None, layout = None):
|
||||
params = {}
|
||||
@@ -104,77 +105,45 @@ class Client(object):
|
||||
params["end"] = float_to_string(end)
|
||||
return self.http.get("stream/remove", params)
|
||||
|
||||
def stream_insert(self, path, data, start = None, end = None):
|
||||
"""Insert data into a stream. data should be a file-like object
|
||||
that provides ASCII data that matches the database layout for path.
|
||||
@contextlib.contextmanager
|
||||
def stream_insert_context(self, path, start = None, end = None):
|
||||
"""Return a context manager that allows data to be efficiently
|
||||
inserted into a stream in a piecewise manner. Data is be provided
|
||||
as single lines, and is aggregated and sent to the server in larger
|
||||
chunks as necessary. Data lines must match the database layout for
|
||||
the given path, and end with a newline.
|
||||
|
||||
start and end are the starting and ending timestamp of this
|
||||
stream; all timestamps t in the data must satisfy 'start <= t
|
||||
< end'. If left unspecified, 'start' is the timestamp of the
|
||||
first line of data, and 'end' is the timestamp on the last line
|
||||
of data, plus a small delta of 1μs.
|
||||
Example:
|
||||
with client.stream_insert_context('/path', start, end) as ctx:
|
||||
ctx.insert_line('1234567890.0 1 2 3 4\\n')
|
||||
ctx.insert_line('1234567891.0 1 2 3 4\\n')
|
||||
|
||||
For more details, see help for nilmdb.client.client.StreamInserter
|
||||
|
||||
This may make multiple requests to the server, if the data is
|
||||
large enough or enough time has passed between insertions.
|
||||
"""
|
||||
params = { "path": path }
|
||||
ctx = StreamInserter(self, path, start, end)
|
||||
yield ctx
|
||||
ctx.finalize()
|
||||
|
||||
# See design.md for a discussion of how much data to send.
|
||||
# These are soft limits -- actual data might be rounded up.
|
||||
max_data = 1048576
|
||||
max_time = 30
|
||||
end_epsilon = 1e-6
|
||||
def stream_insert(self, path, data, start = None, end = None):
|
||||
"""Insert rows of data into a stream. data should be an
|
||||
iterable object that provides ASCII data that matches the
|
||||
database layout for path. See stream_insert_context for
|
||||
details on the 'start' and 'end' parameters."""
|
||||
with self.stream_insert_context(path, start, end) as ctx:
|
||||
ctx.insert_iter(data)
|
||||
return ctx.last_response
|
||||
|
||||
|
||||
def extract_timestamp(line):
|
||||
return float(line.split()[0])
|
||||
|
||||
def sendit():
|
||||
# If we have more data after this, use the timestamp of
|
||||
# the next line as the end. Otherwise, use the given
|
||||
# overall end time, or add end_epsilon to the last data
|
||||
# point.
|
||||
if nextline:
|
||||
block_end = extract_timestamp(nextline)
|
||||
if end and block_end > end:
|
||||
# This is unexpected, but we'll defer to the server
|
||||
# to return an error in this case.
|
||||
block_end = end
|
||||
elif end:
|
||||
block_end = end
|
||||
else:
|
||||
block_end = extract_timestamp(line) + end_epsilon
|
||||
|
||||
# Send it
|
||||
params["start"] = float_to_string(block_start)
|
||||
params["end"] = float_to_string(block_end)
|
||||
return self.http.put("stream/insert", block_data, params)
|
||||
|
||||
clock_start = time.time()
|
||||
block_data = ""
|
||||
block_start = start
|
||||
result = None
|
||||
for (line, nextline) in nilmdb.utils.misc.pairwise(data):
|
||||
# If we don't have a starting time, extract it from the first line
|
||||
if block_start is None:
|
||||
block_start = extract_timestamp(line)
|
||||
|
||||
clock_elapsed = time.time() - clock_start
|
||||
block_data += line
|
||||
|
||||
# If we have enough data, or enough time has elapsed,
|
||||
# send this block to the server, and empty things out
|
||||
# for the next block.
|
||||
if (len(block_data) > max_data) or (clock_elapsed > max_time):
|
||||
result = sendit()
|
||||
block_start = None
|
||||
block_data = ""
|
||||
clock_start = time.time()
|
||||
|
||||
# One last block?
|
||||
if len(block_data):
|
||||
result = sendit()
|
||||
|
||||
# Return the most recent JSON result we got back, or None if
|
||||
# we didn't make any requests.
|
||||
return result
|
||||
def stream_insert_block(self, path, block, start, end):
|
||||
"""Insert an entire block of data into a stream. Like
|
||||
stream_insert, except 'block' contains multiple lines of ASCII
|
||||
text and is sent in one single chunk."""
|
||||
params = { "path": path,
|
||||
"start": float_to_string(start),
|
||||
"end": float_to_string(end) }
|
||||
return self.http.put("stream/insert", block, params)
|
||||
|
||||
def stream_intervals(self, path, start = None, end = None):
|
||||
"""
|
||||
@@ -195,8 +164,8 @@ class Client(object):
|
||||
lines of ASCII-formatted data that matches the database
|
||||
layout for the given path.
|
||||
|
||||
Specify count=True to just get a count of values rather than
|
||||
the actual data.
|
||||
Specify count = True to return a count of matching data points
|
||||
rather than the actual data. The output format is unchanged.
|
||||
"""
|
||||
params = {
|
||||
"path": path,
|
||||
@@ -209,3 +178,202 @@ class Client(object):
|
||||
params["count"] = 1
|
||||
|
||||
return self.http.get_gen("stream/extract", params, retjson = False)
|
||||
|
||||
def stream_count(self, path, start = None, end = None):
|
||||
"""
|
||||
Return the number of rows of data in the stream that satisfy
|
||||
the given timestamps.
|
||||
"""
|
||||
counts = list(self.stream_extract(path, start, end, count = True))
|
||||
return int(counts[0])
|
||||
|
||||
class StreamInserter(object):
|
||||
"""Object returned by stream_insert_context() that manages
|
||||
the insertion of rows of data into a particular path.
|
||||
|
||||
The basic data flow is that we are filling a contiguous interval
|
||||
on the server, with no gaps, that extends from timestamp 'start'
|
||||
to timestamp 'end'. Data timestamps satisfy 'start <= t < end'.
|
||||
Data is provided by the user one line at a time with
|
||||
.insert_line() or .insert_iter().
|
||||
|
||||
1. The first inserted line begins a new interval that starts at
|
||||
'start'. If 'start' is not given, it is deduced from the first
|
||||
line's timestamp.
|
||||
|
||||
2. Subsequent lines go into the same contiguous interval. As lines
|
||||
are inserted, this routine may make multiple insertion requests to
|
||||
the server, but will structure the timestamps to leave no gaps.
|
||||
|
||||
3. The current contiguous interval can be completed by manually
|
||||
calling .finalize(), which the context manager will also do
|
||||
automatically. This will send any remaining data to the server,
|
||||
using the 'end' timestamp to end the interval.
|
||||
|
||||
After a .finalize(), inserting new data goes back to step 1.
|
||||
|
||||
.update_start() can be called before step 1 to change the start
|
||||
time for the interval. .update_end() can be called before step 3
|
||||
to change the end time for the interval.
|
||||
"""
|
||||
|
||||
# See design.md for a discussion of how much data to send.
|
||||
# These are soft limits -- actual data might be rounded up.
|
||||
# We send when we have a certain amount of data queued, or
|
||||
# when a certain amount of time has passed since the last send.
|
||||
_max_data = 1048576
|
||||
_max_time = 30
|
||||
|
||||
# Delta to add to the final timestamp, if "end" wasn't given
|
||||
_end_epsilon = 1e-6
|
||||
|
||||
def __init__(self, client, path, start = None, end = None):
|
||||
"""'http' is the httpclient object. 'path' is the database
|
||||
path to insert to. 'start' and 'end' are used for the first
|
||||
contiguous interval."""
|
||||
self.last_response = None
|
||||
|
||||
self._client = client
|
||||
self._path = path
|
||||
|
||||
# Start and end for the overall contiguous interval we're
|
||||
# filling
|
||||
self._interval_start = start
|
||||
self._interval_end = end
|
||||
|
||||
# Data for the specific block we're building up to send
|
||||
self._block_data = []
|
||||
self._block_len = 0
|
||||
self._block_start = None
|
||||
|
||||
# Time of last request
|
||||
self._last_time = time.time()
|
||||
|
||||
# We keep a buffer of the two most recently inserted lines.
|
||||
# Only the older one actually gets processed; the newer one
|
||||
# is used to "look-ahead" to the next timestamp if we need
|
||||
# to internally split an insertion into two requests.
|
||||
self._line_old = None
|
||||
self._line_new = None
|
||||
|
||||
def insert_iter(self, iter):
|
||||
"""Insert all lines of ASCII formatted data from the given
|
||||
iterable. Lines must be terminated with '\\n'."""
|
||||
for line in iter:
|
||||
self.insert_line(line)
|
||||
|
||||
def insert_line(self, line, allow_intermediate = True):
|
||||
"""Insert a single line of ASCII formatted data. Line
|
||||
must be terminated with '\\n'."""
|
||||
if line and (len(line) < 1 or line[-1] != '\n'):
|
||||
raise ValueError("lines must end in with a newline character")
|
||||
|
||||
# Store this new line, but process the previous (old) one.
|
||||
# This lets us "look ahead" to the next line.
|
||||
self._line_old = self._line_new
|
||||
self._line_new = line
|
||||
if self._line_old is None:
|
||||
return
|
||||
|
||||
# If starting a new block, pull out the timestamp if needed.
|
||||
if self._block_start is None:
|
||||
if self._interval_start is not None:
|
||||
# User provided a start timestamp. Use it once, then
|
||||
# clear it for the next block.
|
||||
self._block_start = self._interval_start
|
||||
self._interval_start = None
|
||||
else:
|
||||
# Extract timestamp from the first row
|
||||
self._block_start = extract_timestamp(self._line_old)
|
||||
|
||||
# Save the line
|
||||
self._block_data.append(self._line_old)
|
||||
self._block_len += len(self._line_old)
|
||||
|
||||
if allow_intermediate:
|
||||
# Send an intermediate block to the server if needed.
|
||||
elapsed = time.time() - self._last_time
|
||||
if (self._block_len > self._max_data) or (elapsed > self._max_time):
|
||||
self._send_block_intermediate()
|
||||
|
||||
def update_start(self, start):
|
||||
"""Update the start time for the next contiguous interval.
|
||||
Call this before starting to insert data for a new interval,
|
||||
for example, after .finalize()"""
|
||||
self._interval_start = start
|
||||
|
||||
def update_end(self, end):
|
||||
"""Update the end time for the current contiguous interval.
|
||||
Call this before .finalize()"""
|
||||
self._interval_end = end
|
||||
|
||||
def finalize(self):
|
||||
"""Stop filling the current contiguous interval.
|
||||
All outstanding data will be sent, and the interval end
|
||||
time of the interval will be taken from the 'end' argument
|
||||
used when initializing this class, or the most recent
|
||||
value passed to update_end(), or the last timestamp plus
|
||||
a small epsilon value if no other endpoint was provided.
|
||||
|
||||
If more data is inserted after a finalize(), it will become
|
||||
part of a new interval and there may be a gap left in-between."""
|
||||
# Special marker tells insert_line that this is the end
|
||||
self.insert_line(None, allow_intermediate = False)
|
||||
|
||||
if self._block_len > 0:
|
||||
# We have data pending, so send the final block
|
||||
self._send_block_final()
|
||||
elif None not in (self._interval_start, self._interval_end):
|
||||
# We have no data, but enough information to create an
|
||||
# empty interval.
|
||||
self._block_start = self._interval_start
|
||||
self._interval_start = None
|
||||
self._send_block_final()
|
||||
else:
|
||||
# No data, and no timestamps to use to create an empty
|
||||
# interval.
|
||||
pass
|
||||
|
||||
# Make sure both timestamps are emptied for future intervals.
|
||||
self._interval_start = None
|
||||
self._interval_end = None
|
||||
|
||||
def _send_block_intermediate(self):
|
||||
"""Send data, when we still have more data to send.
|
||||
Use the timestamp from the next line, so that the blocks
|
||||
are contiguous."""
|
||||
block_end = extract_timestamp(self._line_new)
|
||||
if self._interval_end is not None and block_end > self._interval_end:
|
||||
# Something's fishy -- the timestamp we found is after
|
||||
# the user's specified end. Limit it here, and the
|
||||
# server will return an error.
|
||||
block_end = self._interval_end
|
||||
self._send_block(block_end)
|
||||
|
||||
def _send_block_final(self):
|
||||
"""Send data, when this is the last block for the interval.
|
||||
There is no next line, so figure out the actual interval end
|
||||
using interval_end or end_epsilon."""
|
||||
if self._interval_end is not None:
|
||||
# Use the user's specified end timestamp
|
||||
block_end = self._interval_end
|
||||
# Clear it in case we send more intervals in the future.
|
||||
self._interval_end = None
|
||||
else:
|
||||
# Add an epsilon to the last timestamp we saw
|
||||
block_end = extract_timestamp(self._line_old) + self._end_epsilon
|
||||
self._send_block(block_end)
|
||||
|
||||
def _send_block(self, block_end):
|
||||
"""Send current block to the server"""
|
||||
self.last_response = self._client.stream_insert_block(
|
||||
self._path, "".join(self._block_data),
|
||||
self._block_start, block_end)
|
||||
|
||||
# Clear out the block
|
||||
self._block_data = []
|
||||
self._block_len = 0
|
||||
self._block_start = None
|
||||
|
||||
# Note when we sent it
|
||||
self._last_time = time.time()
|
||||
|
@@ -2,13 +2,8 @@
|
||||
|
||||
import nilmdb
|
||||
import nilmdb.utils
|
||||
from nilmdb.utils.printf import *
|
||||
from nilmdb.client.errors import *
|
||||
from nilmdb.client.errors import ClientError, ServerError, Error
|
||||
|
||||
import time
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
import simplejson as json
|
||||
import urlparse
|
||||
import pycurl
|
||||
@@ -38,14 +33,28 @@ class HTTPClient(object):
|
||||
self.curl.setopt(pycurl.URL, url)
|
||||
self.url = url
|
||||
|
||||
def _check_busy_and_set_upload(self, upload):
|
||||
"""Sets the pycurl.UPLOAD option, but also raises a more
|
||||
friendly exception if the client is already serving a request."""
|
||||
try:
|
||||
self.curl.setopt(pycurl.UPLOAD, upload)
|
||||
except pycurl.error as e:
|
||||
if "is currently running" in str(e):
|
||||
raise Exception("Client is already performing a request, and "
|
||||
"nesting calls is not supported.")
|
||||
else: # pragma: no cover (shouldn't happen)
|
||||
raise
|
||||
|
||||
def _check_error(self, body = None):
|
||||
code = self.curl.getinfo(pycurl.RESPONSE_CODE)
|
||||
if code == 200:
|
||||
return
|
||||
# Default variables for exception
|
||||
# Default variables for exception. We use the entire body as
|
||||
# the default message, in case we can't extract it from a JSON
|
||||
# response.
|
||||
args = { "url" : self.url,
|
||||
"status" : str(code),
|
||||
"message" : None,
|
||||
"message" : body,
|
||||
"traceback" : None }
|
||||
try:
|
||||
# Fill with server-provided data if we can
|
||||
@@ -83,11 +92,11 @@ class HTTPClient(object):
|
||||
self._status = int(data.split(" ")[1])
|
||||
self._headers += data
|
||||
self.curl.setopt(pycurl.HEADERFUNCTION, header_callback)
|
||||
def func(callback):
|
||||
def perform(callback):
|
||||
self.curl.setopt(pycurl.WRITEFUNCTION, callback)
|
||||
self.curl.perform()
|
||||
try:
|
||||
with nilmdb.utils.Iteratorizer(func, curl_hack = True) as it:
|
||||
with nilmdb.utils.Iteratorizer(perform, curl_hack = True) as it:
|
||||
for i in it:
|
||||
if self._status == 200:
|
||||
# If we had a 200 response, yield the data to caller.
|
||||
@@ -159,12 +168,12 @@ class HTTPClient(object):
|
||||
|
||||
def get(self, url, params = None, retjson = True):
|
||||
"""Simple GET"""
|
||||
self.curl.setopt(pycurl.UPLOAD, 0)
|
||||
self._check_busy_and_set_upload(0)
|
||||
return self._doreq(url, params, retjson)
|
||||
|
||||
def put(self, url, postdata, params = None, retjson = True):
|
||||
"""Simple PUT"""
|
||||
self.curl.setopt(pycurl.UPLOAD, 1)
|
||||
self._check_busy_and_set_upload(1)
|
||||
self._setup_url(url, params)
|
||||
data = cStringIO.StringIO(postdata)
|
||||
self.curl.setopt(pycurl.READFUNCTION, data.read)
|
||||
@@ -187,12 +196,12 @@ class HTTPClient(object):
|
||||
|
||||
def get_gen(self, url, params = None, retjson = True):
|
||||
"""Simple GET, returning a generator"""
|
||||
self.curl.setopt(pycurl.UPLOAD, 0)
|
||||
self._check_busy_and_set_upload(0)
|
||||
return self._doreq_gen(url, params, retjson)
|
||||
|
||||
def put_gen(self, url, postdata, params = None, retjson = True):
|
||||
"""Simple PUT, returning a generator"""
|
||||
self.curl.setopt(pycurl.UPLOAD, 1)
|
||||
self._check_busy_and_set_upload(1)
|
||||
self._setup_url(url, params)
|
||||
data = cStringIO.StringIO(postdata)
|
||||
self.curl.setopt(pycurl.READFUNCTION, data.read)
|
||||
|
@@ -1,3 +1,3 @@
|
||||
"""nilmdb.cmdline"""
|
||||
|
||||
from .cmdline import Cmdline
|
||||
from nilmdb.cmdline.cmdline import Cmdline
|
||||
|
@@ -4,21 +4,17 @@ import nilmdb
|
||||
from nilmdb.utils.printf import *
|
||||
from nilmdb.utils import datetime_tz
|
||||
|
||||
import dateutil.parser
|
||||
import sys
|
||||
import re
|
||||
import argparse
|
||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||
|
||||
version = "1.0"
|
||||
|
||||
# Valid subcommands. Defined in separate files just to break
|
||||
# things up -- they're still called with Cmdline as self.
|
||||
subcommands = [ "info", "create", "list", "metadata", "insert", "extract",
|
||||
"remove", "destroy" ]
|
||||
|
||||
# Import the subcommand modules. Equivalent way of doing this would be
|
||||
# from . import info as cmd_info
|
||||
# Import the subcommand modules
|
||||
subcmd_mods = {}
|
||||
for cmd in subcommands:
|
||||
subcmd_mods[cmd] = __import__("nilmdb.cmdline." + cmd, fromlist = [ cmd ])
|
||||
@@ -30,8 +26,8 @@ class JimArgumentParser(argparse.ArgumentParser):
|
||||
|
||||
class Cmdline(object):
|
||||
|
||||
def __init__(self, argv):
|
||||
self.argv = argv
|
||||
def __init__(self, argv = None):
|
||||
self.argv = argv or sys.argv[1:]
|
||||
self.client = None
|
||||
|
||||
def arg_time(self, toparse):
|
||||
@@ -95,9 +91,6 @@ class Cmdline(object):
|
||||
return dt.strftime("%a, %d %b %Y %H:%M:%S.%f %z")
|
||||
|
||||
def parser_setup(self):
|
||||
version_string = sprintf("nilmtool %s, client library %s",
|
||||
version, nilmdb.Client.client_version)
|
||||
|
||||
self.parser = JimArgumentParser(add_help = False,
|
||||
formatter_class = def_form)
|
||||
|
||||
@@ -105,7 +98,7 @@ class Cmdline(object):
|
||||
group.add_argument("-h", "--help", action='help',
|
||||
help='show this help message and exit')
|
||||
group.add_argument("-V", "--version", action="version",
|
||||
version=version_string)
|
||||
version = nilmdb.__version__)
|
||||
|
||||
group = self.parser.add_argument_group("Server")
|
||||
group.add_argument("-u", "--url", action="store",
|
||||
|
@@ -1,9 +1,7 @@
|
||||
from nilmdb.utils.printf import *
|
||||
import nilmdb
|
||||
import nilmdb.client
|
||||
import textwrap
|
||||
|
||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||
from argparse import RawDescriptionHelpFormatter as raw_form
|
||||
|
||||
def setup(self, sub):
|
||||
|
@@ -1,8 +1,6 @@
|
||||
from __future__ import print_function
|
||||
from nilmdb.utils.printf import *
|
||||
import nilmdb
|
||||
import nilmdb.client
|
||||
import sys
|
||||
|
||||
def setup(self, sub):
|
||||
cmd = sub.add_parser("extract", help="Extract data",
|
||||
|
@@ -1,4 +1,6 @@
|
||||
import nilmdb
|
||||
from nilmdb.utils.printf import *
|
||||
from nilmdb.utils import human_size
|
||||
|
||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||
|
||||
@@ -13,8 +15,10 @@ def setup(self, sub):
|
||||
|
||||
def cmd_info(self):
|
||||
"""Print info about the server"""
|
||||
printf("Client library version: %s\n", self.client.client_version)
|
||||
printf("Client version: %s\n", nilmdb.__version__)
|
||||
printf("Server version: %s\n", self.client.version())
|
||||
printf("Server URL: %s\n", self.client.geturl())
|
||||
printf("Server database path: %s\n", self.client.dbpath())
|
||||
printf("Server database size: %s\n", self.client.dbsize())
|
||||
dbinfo = self.client.dbinfo()
|
||||
printf("Server database path: %s\n", dbinfo["path"])
|
||||
printf("Server database size: %s\n", human_size(dbinfo["size"]))
|
||||
printf("Server database free space: %s\n", human_size(dbinfo["free"]))
|
||||
|
@@ -53,8 +53,6 @@ def cmd_insert(self):
|
||||
if len(streams) != 1:
|
||||
self.die("error getting stream info for path %s", self.args.path)
|
||||
|
||||
layout = streams[0][1]
|
||||
|
||||
if self.args.start and len(self.args.file) != 1:
|
||||
self.die("error: --start can only be used with one input file")
|
||||
|
||||
@@ -93,7 +91,7 @@ def cmd_insert(self):
|
||||
|
||||
# Insert the data
|
||||
try:
|
||||
result = self.client.stream_insert(self.args.path, ts)
|
||||
self.client.stream_insert(self.args.path, ts)
|
||||
except nilmdb.client.Error as e:
|
||||
# TODO: It would be nice to be able to offer better errors
|
||||
# here, particularly in the case of overlap, which just shows
|
||||
|
@@ -1,6 +1,4 @@
|
||||
from nilmdb.utils.printf import *
|
||||
import nilmdb
|
||||
import nilmdb.client
|
||||
|
||||
import fnmatch
|
||||
import argparse
|
||||
@@ -49,8 +47,8 @@ def cmd_list_verify(self):
|
||||
self.args.path = self.args.path_positional
|
||||
|
||||
if self.args.start is not None and self.args.end is not None:
|
||||
if self.args.start > self.args.end:
|
||||
self.parser.error("start is after end")
|
||||
if self.args.start >= self.args.end:
|
||||
self.parser.error("start must precede end")
|
||||
|
||||
def cmd_list(self):
|
||||
"""List available streams"""
|
||||
|
@@ -1,7 +1,6 @@
|
||||
from nilmdb.utils.printf import *
|
||||
import nilmdb
|
||||
import nilmdb.client
|
||||
import sys
|
||||
|
||||
def setup(self, sub):
|
||||
cmd = sub.add_parser("remove", help="Remove data",
|
||||
@@ -9,8 +8,7 @@ def setup(self, sub):
|
||||
Remove all data from a specified time range within a
|
||||
stream.
|
||||
""")
|
||||
cmd.set_defaults(verify = cmd_remove_verify,
|
||||
handler = cmd_remove)
|
||||
cmd.set_defaults(handler = cmd_remove)
|
||||
|
||||
group = cmd.add_argument_group("Data selection")
|
||||
group.add_argument("path",
|
||||
@@ -26,11 +24,6 @@ def setup(self, sub):
|
||||
group.add_argument("-c", "--count", action="store_true",
|
||||
help="Output number of data points removed")
|
||||
|
||||
def cmd_remove_verify(self):
|
||||
if self.args.start is not None and self.args.end is not None:
|
||||
if self.args.start > self.args.end:
|
||||
self.parser.error("start is after end")
|
||||
|
||||
def cmd_remove(self):
|
||||
try:
|
||||
count = self.client.stream_remove(self.args.path,
|
||||
|
1
nilmdb/scripts/__init__.py
Normal file
1
nilmdb/scripts/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Command line scripts
|
81
nilmdb/scripts/nilmdb_server.py
Executable file
81
nilmdb/scripts/nilmdb_server.py
Executable file
@@ -0,0 +1,81 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import nilmdb.server
|
||||
import argparse
|
||||
import os
|
||||
import socket
|
||||
|
||||
def main():
|
||||
"""Main entry point for the 'nilmdb-server' command line script"""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description = 'Run the NilmDB server',
|
||||
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
|
||||
|
||||
parser.add_argument("-V", "--version", action="version",
|
||||
version = nilmdb.__version__)
|
||||
|
||||
group = parser.add_argument_group("Standard options")
|
||||
group.add_argument('-a', '--address',
|
||||
help = 'Only listen on the given address',
|
||||
default = '0.0.0.0')
|
||||
group.add_argument('-p', '--port', help = 'Listen on the given port',
|
||||
type = int, default = 12380)
|
||||
group.add_argument('-d', '--database', help = 'Database directory',
|
||||
default = os.path.join(os.getcwd(), "db"))
|
||||
group.add_argument('-q', '--quiet', help = 'Silence output',
|
||||
action = 'store_true')
|
||||
|
||||
group = parser.add_argument_group("Debug options")
|
||||
group.add_argument('-y', '--yappi', help = 'Run under yappi profiler and '
|
||||
'invoke interactive shell afterwards',
|
||||
action = 'store_true')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Create database object
|
||||
db = nilmdb.server.NilmDB(args.database)
|
||||
|
||||
# Configure the server
|
||||
if args.quiet:
|
||||
embedded = True
|
||||
else:
|
||||
embedded = False
|
||||
server = nilmdb.server.Server(db,
|
||||
host = args.address,
|
||||
port = args.port,
|
||||
embedded = embedded)
|
||||
|
||||
# Print info
|
||||
if not args.quiet:
|
||||
print "Database: %s" % (os.path.realpath(args.database))
|
||||
if args.address == '0.0.0.0' or args.address == '::':
|
||||
host = socket.getfqdn()
|
||||
else:
|
||||
host = args.address
|
||||
print "Server URL: http://%s:%d/" % ( host, args.port)
|
||||
print "----"
|
||||
|
||||
# Run it
|
||||
if args.yappi:
|
||||
print "Running in yappi"
|
||||
try:
|
||||
import yappi
|
||||
yappi.start()
|
||||
server.start(blocking = True)
|
||||
finally:
|
||||
yappi.stop()
|
||||
yappi.print_stats(sort_type = yappi.SORTTYPE_TTOT, limit = 50)
|
||||
from IPython import embed
|
||||
embed(header = "Use the yappi object to explore further, "
|
||||
"quit to exit")
|
||||
else:
|
||||
server.start(blocking = True)
|
||||
|
||||
# Clean up
|
||||
if not args.quiet:
|
||||
print "Closing database"
|
||||
db.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
10
nilmdb/scripts/nilmtool.py
Executable file
10
nilmdb/scripts/nilmtool.py
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import nilmdb.cmdline
|
||||
|
||||
def main():
|
||||
"""Main entry point for the 'nilmtool' command line script"""
|
||||
nilmdb.cmdline.Cmdline().run()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,15 +1,22 @@
|
||||
"""nilmdb.server"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Try to set up pyximport to automatically rebuild Cython modules. If
|
||||
# this doesn't work, it's OK, as long as the modules were built externally.
|
||||
# (e.g. python setup.py build_ext --inplace)
|
||||
try:
|
||||
import Cython
|
||||
import distutils.version
|
||||
if (distutils.version.LooseVersion(Cython.__version__) <
|
||||
distutils.version.LooseVersion("0.16")): # pragma: no cover
|
||||
raise ImportError("Cython version too old")
|
||||
import pyximport
|
||||
pyximport.install()
|
||||
import layout
|
||||
except: # pragma: no cover
|
||||
pyximport.install(inplace = True, build_in_temp = False)
|
||||
except ImportError: # pragma: no cover
|
||||
pass
|
||||
|
||||
from .nilmdb import NilmDB
|
||||
from .server import Server
|
||||
from .errors import *
|
||||
import nilmdb.server.layout
|
||||
from nilmdb.server.nilmdb import NilmDB
|
||||
from nilmdb.server.server import Server
|
||||
from nilmdb.server.errors import NilmDBError, StreamError, OverlapError
|
||||
|
@@ -8,13 +8,21 @@ import nilmdb
|
||||
from nilmdb.utils.printf import *
|
||||
|
||||
import os
|
||||
import sys
|
||||
import cPickle as pickle
|
||||
import struct
|
||||
import fnmatch
|
||||
import mmap
|
||||
import re
|
||||
|
||||
# If we have the faulthandler module, use it. All of the mmap stuff
|
||||
# might trigger a SIGSEGV or SIGBUS if we're not careful, and
|
||||
# faulthandler will give a traceback in that case. (the Python
|
||||
# interpreter will still die either way).
|
||||
try: # pragma: no cover
|
||||
import faulthandler
|
||||
faulthandler.enable()
|
||||
except: # pragma: no cover
|
||||
pass
|
||||
|
||||
# Up to 256 open file descriptors at any given time.
|
||||
# These variables are global so they can be used in the decorator arguments.
|
||||
table_cache_size = 16
|
||||
@@ -91,8 +99,7 @@ class BulkData(object):
|
||||
"float32": 'f',
|
||||
"float64": 'd',
|
||||
}
|
||||
for n in range(layout.count):
|
||||
struct_fmt += struct_mapping[layout.datatype]
|
||||
struct_fmt += struct_mapping[layout.datatype] * layout.count
|
||||
except KeyError:
|
||||
raise ValueError("no such layout, or bad data types")
|
||||
|
||||
@@ -164,6 +171,52 @@ class BulkData(object):
|
||||
ospath = os.path.join(self.root, *elements)
|
||||
return Table(ospath)
|
||||
|
||||
@nilmdb.utils.must_close(wrap_verify = True)
|
||||
class File(object):
|
||||
"""Object representing a single file on disk. Data can be appended,
|
||||
or the self.mmap handle can be used for random reads."""
|
||||
|
||||
def __init__(self, root, subdir, filename):
|
||||
# Create path if it doesn't exist
|
||||
try:
|
||||
os.mkdir(os.path.join(root, subdir))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# Open/create file
|
||||
self._f = open(os.path.join(root, subdir, filename), "a+b", 0)
|
||||
|
||||
# Seek to end, and get size
|
||||
self._f.seek(0, 2)
|
||||
self.size = self._f.tell()
|
||||
|
||||
# Open mmap object
|
||||
self.mmap = None
|
||||
self._mmap_reopen()
|
||||
|
||||
def _mmap_reopen(self):
|
||||
if self.size == 0:
|
||||
# Don't mmap if the file is empty; it would fail
|
||||
pass
|
||||
elif self.mmap is None:
|
||||
# Not opened yet, so open it
|
||||
self.mmap = mmap.mmap(self._f.fileno(), 0)
|
||||
else:
|
||||
# Already opened, so just resize it
|
||||
self.mmap.resize(self.size)
|
||||
|
||||
def close(self):
|
||||
if self.mmap is not None:
|
||||
self.mmap.close()
|
||||
self._f.close()
|
||||
|
||||
def append(self, data):
|
||||
# Write data, flush it, and resize our mmap accordingly
|
||||
self._f.write(data)
|
||||
self._f.flush()
|
||||
self.size += len(data)
|
||||
self._mmap_reopen()
|
||||
|
||||
@nilmdb.utils.must_close(wrap_verify = True)
|
||||
class Table(object):
|
||||
"""Tools to help access a single table (data at a specific OS path)."""
|
||||
@@ -185,12 +238,12 @@ class Table(object):
|
||||
packer = struct.Struct(struct_fmt)
|
||||
rows_per_file = max(file_size // packer.size, 1)
|
||||
|
||||
format = { "rows_per_file": rows_per_file,
|
||||
fmt = { "rows_per_file": rows_per_file,
|
||||
"files_per_dir": files_per_dir,
|
||||
"struct_fmt": struct_fmt,
|
||||
"version": 1 }
|
||||
with open(os.path.join(root, "_format"), "wb") as f:
|
||||
pickle.dump(format, f, 2)
|
||||
pickle.dump(fmt, f, 2)
|
||||
|
||||
# Normal methods
|
||||
def __init__(self, root):
|
||||
@@ -199,22 +252,22 @@ class Table(object):
|
||||
|
||||
# Load the format and build packer
|
||||
with open(os.path.join(self.root, "_format"), "rb") as f:
|
||||
format = pickle.load(f)
|
||||
fmt = pickle.load(f)
|
||||
|
||||
if format["version"] != 1: # pragma: no cover (just future proofing)
|
||||
raise NotImplementedError("version " + format["version"] +
|
||||
if fmt["version"] != 1: # pragma: no cover (just future proofing)
|
||||
raise NotImplementedError("version " + fmt["version"] +
|
||||
" bulk data store not supported")
|
||||
|
||||
self.rows_per_file = format["rows_per_file"]
|
||||
self.files_per_dir = format["files_per_dir"]
|
||||
self.packer = struct.Struct(format["struct_fmt"])
|
||||
self.rows_per_file = fmt["rows_per_file"]
|
||||
self.files_per_dir = fmt["files_per_dir"]
|
||||
self.packer = struct.Struct(fmt["struct_fmt"])
|
||||
self.file_size = self.packer.size * self.rows_per_file
|
||||
|
||||
# Find nrows
|
||||
self.nrows = self._get_nrows()
|
||||
|
||||
def close(self):
|
||||
self.mmap_open.cache_remove_all()
|
||||
self.file_open.cache_remove_all()
|
||||
|
||||
# Internal helpers
|
||||
def _get_nrows(self):
|
||||
@@ -278,37 +331,11 @@ class Table(object):
|
||||
|
||||
# Cache open files
|
||||
@nilmdb.utils.lru_cache(size = fd_cache_size,
|
||||
keys = slice(0,3), # exclude newsize
|
||||
onremove = lambda x: x.close())
|
||||
def mmap_open(self, subdir, filename, newsize = None):
|
||||
onremove = lambda f: f.close())
|
||||
def file_open(self, subdir, filename):
|
||||
"""Open and map a given 'subdir/filename' (relative to self.root).
|
||||
Will be automatically closed when evicted from the cache.
|
||||
|
||||
If 'newsize' is provided, the file is truncated to the given
|
||||
size before the mapping is returned. (Note that the LRU cache
|
||||
on this function means the truncate will only happen if the
|
||||
object isn't already cached; mmap.resize should be used too.)"""
|
||||
try:
|
||||
os.mkdir(os.path.join(self.root, subdir))
|
||||
except OSError:
|
||||
pass
|
||||
f = open(os.path.join(self.root, subdir, filename), "a+", 0)
|
||||
if newsize is not None:
|
||||
# mmap can't map a zero-length file, so this allows the
|
||||
# caller to set the filesize between file creation and
|
||||
# mmap.
|
||||
f.truncate(newsize)
|
||||
mm = mmap.mmap(f.fileno(), 0)
|
||||
return mm
|
||||
|
||||
def mmap_open_resize(self, subdir, filename, newsize):
|
||||
"""Open and map a given 'subdir/filename' (relative to self.root).
|
||||
The file is resized to the given size."""
|
||||
# Pass new size to mmap_open
|
||||
mm = self.mmap_open(subdir, filename, newsize)
|
||||
# In case we got a cached copy, need to call mm.resize too.
|
||||
mm.resize(newsize)
|
||||
return mm
|
||||
Will be automatically closed when evicted from the cache."""
|
||||
return File(self.root, subdir, filename)
|
||||
|
||||
def append(self, data):
|
||||
"""Append the data and flush it to disk.
|
||||
@@ -320,14 +347,13 @@ class Table(object):
|
||||
(subdir, fname, offset, count) = self._offset_from_row(self.nrows)
|
||||
if count > remaining:
|
||||
count = remaining
|
||||
newsize = offset + count * self.packer.size
|
||||
mm = self.mmap_open_resize(subdir, fname, newsize)
|
||||
mm.seek(offset)
|
||||
|
||||
f = self.file_open(subdir, fname)
|
||||
|
||||
# Write the data
|
||||
for i in xrange(count):
|
||||
row = dataiter.next()
|
||||
mm.write(self.packer.pack(*row))
|
||||
f.append(self.packer.pack(*row))
|
||||
remaining -= count
|
||||
self.nrows += count
|
||||
|
||||
@@ -354,7 +380,7 @@ class Table(object):
|
||||
(subdir, filename, offset, count) = self._offset_from_row(row)
|
||||
if count > remaining:
|
||||
count = remaining
|
||||
mm = self.mmap_open(subdir, filename)
|
||||
mm = self.file_open(subdir, filename).mmap
|
||||
for i in xrange(count):
|
||||
ret.append(list(self.packer.unpack_from(mm, offset)))
|
||||
offset += self.packer.size
|
||||
@@ -366,7 +392,7 @@ class Table(object):
|
||||
if key < 0 or key >= self.nrows:
|
||||
raise IndexError("Index out of range")
|
||||
(subdir, filename, offset, count) = self._offset_from_row(key)
|
||||
mm = self.mmap_open(subdir, filename)
|
||||
mm = self.file_open(subdir, filename).mmap
|
||||
# unpack_from ignores the mmap object's current seek position
|
||||
return list(self.packer.unpack_from(mm, offset))
|
||||
|
||||
@@ -413,8 +439,8 @@ class Table(object):
|
||||
# are generally easier if we don't have to special-case that.
|
||||
if (len(merged) == 1 and
|
||||
merged[0][0] == 0 and merged[0][1] == self.rows_per_file):
|
||||
# Close potentially open file in mmap_open LRU cache
|
||||
self.mmap_open.cache_remove(self, subdir, filename)
|
||||
# Close potentially open file in file_open LRU cache
|
||||
self.file_open.cache_remove(self, subdir, filename)
|
||||
|
||||
# Delete files
|
||||
os.remove(datafile)
|
||||
|
@@ -36,7 +36,7 @@ cdef class Interval:
|
||||
"""
|
||||
'start' and 'end' are arbitrary floats that represent time
|
||||
"""
|
||||
if start > end:
|
||||
if start >= end:
|
||||
# Explicitly disallow zero-width intervals (since they're half-open)
|
||||
raise IntervalError("start %s must precede end %s" % (start, end))
|
||||
self.start = float(start)
|
||||
|
@@ -15,11 +15,9 @@ from nilmdb.utils.printf import *
|
||||
from nilmdb.server.interval import (Interval, DBInterval,
|
||||
IntervalSet, IntervalError)
|
||||
from nilmdb.server import bulkdata
|
||||
from nilmdb.server.errors import *
|
||||
from nilmdb.server.errors import NilmDBError, StreamError, OverlapError
|
||||
|
||||
import sqlite3
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
import errno
|
||||
import bisect
|
||||
@@ -80,7 +78,10 @@ class NilmDB(object):
|
||||
verbose = 0
|
||||
|
||||
def __init__(self, basepath, sync=True, max_results=None,
|
||||
bulkdata_args={}):
|
||||
bulkdata_args=None):
|
||||
if bulkdata_args is None:
|
||||
bulkdata_args = {}
|
||||
|
||||
# set up path
|
||||
self.basepath = os.path.abspath(basepath)
|
||||
|
||||
@@ -141,6 +142,15 @@ class NilmDB(object):
|
||||
with self.con:
|
||||
cur.execute("PRAGMA user_version = {v:d}".format(v=version))
|
||||
|
||||
def _check_user_times(self, start, end):
|
||||
if start is None:
|
||||
start = -1e12
|
||||
if end is None:
|
||||
end = 1e12
|
||||
if start >= end:
|
||||
raise NilmDBError("start must precede end")
|
||||
return (start, end)
|
||||
|
||||
@nilmdb.utils.lru_cache(size = 16)
|
||||
def _get_intervals(self, stream_id):
|
||||
"""
|
||||
@@ -156,7 +166,7 @@ class NilmDB(object):
|
||||
iset += DBInterval(start_time, end_time,
|
||||
start_time, end_time,
|
||||
start_pos, end_pos)
|
||||
except IntervalError as e: # pragma: no cover
|
||||
except IntervalError: # pragma: no cover
|
||||
raise NilmDBError("unexpected overlap in ranges table!")
|
||||
|
||||
return iset
|
||||
@@ -302,7 +312,8 @@ class NilmDB(object):
|
||||
"""
|
||||
stream_id = self._stream_id(path)
|
||||
intervals = self._get_intervals(stream_id)
|
||||
requested = Interval(start or 0, end or 1e12)
|
||||
(start, end) = self._check_user_times(start, end)
|
||||
requested = Interval(start, end)
|
||||
result = []
|
||||
for n, i in enumerate(intervals.intersection(requested)):
|
||||
if n >= self.max_results:
|
||||
@@ -395,7 +406,7 @@ class NilmDB(object):
|
||||
path: Path at which to add the data
|
||||
start: Starting timestamp
|
||||
end: Ending timestamp
|
||||
data: Rows of data, to be passed to PyTable's table.append
|
||||
data: Rows of data, to be passed to bulkdata table.append
|
||||
method. E.g. nilmdb.layout.Parser.data
|
||||
"""
|
||||
# First check for basic overlap using timestamp info given.
|
||||
@@ -416,7 +427,7 @@ class NilmDB(object):
|
||||
self._add_interval(stream_id, interval, row_start, row_end)
|
||||
|
||||
# And that's all
|
||||
return "ok"
|
||||
return
|
||||
|
||||
def _find_start(self, table, dbinterval):
|
||||
"""
|
||||
@@ -474,7 +485,8 @@ class NilmDB(object):
|
||||
stream_id = self._stream_id(path)
|
||||
table = self.data.getnode(path)
|
||||
intervals = self._get_intervals(stream_id)
|
||||
requested = Interval(start or 0, end or 1e12)
|
||||
(start, end) = self._check_user_times(start, end)
|
||||
requested = Interval(start, end)
|
||||
result = []
|
||||
matched = 0
|
||||
remaining = self.max_results
|
||||
@@ -520,12 +532,10 @@ class NilmDB(object):
|
||||
stream_id = self._stream_id(path)
|
||||
table = self.data.getnode(path)
|
||||
intervals = self._get_intervals(stream_id)
|
||||
to_remove = Interval(start or 0, end or 1e12)
|
||||
(start, end) = self._check_user_times(start, end)
|
||||
to_remove = Interval(start, end)
|
||||
removed = 0
|
||||
|
||||
if start == end:
|
||||
return 0
|
||||
|
||||
# Can't remove intervals from within the iterator, so we need to
|
||||
# remember what's currently in the intersection now.
|
||||
all_candidates = list(intervals.intersection(to_remove, orig = True))
|
||||
|
@@ -5,18 +5,17 @@
|
||||
from __future__ import absolute_import
|
||||
import nilmdb
|
||||
from nilmdb.utils.printf import *
|
||||
from nilmdb.server.errors import *
|
||||
from nilmdb.server.errors import NilmDBError
|
||||
|
||||
import cherrypy
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import simplejson as json
|
||||
import decorator
|
||||
import traceback
|
||||
import psutil
|
||||
|
||||
try:
|
||||
import cherrypy
|
||||
cherrypy.tools.json_out
|
||||
except: # pragma: no cover
|
||||
sys.stderr.write("Cherrypy 3.2+ required\n")
|
||||
@@ -26,8 +25,6 @@ class NilmApp(object):
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
|
||||
version = "1.2"
|
||||
|
||||
# Decorators
|
||||
def chunked_response(func):
|
||||
"""Decorator to enable chunked responses."""
|
||||
@@ -57,7 +54,7 @@ def workaround_cp_bug_1200(func, *args, **kwargs): # pragma: no cover
|
||||
try:
|
||||
for val in func(*args, **kwargs):
|
||||
yield val
|
||||
except (LookupError, UnicodeError) as e:
|
||||
except (LookupError, UnicodeError):
|
||||
raise Exception("bug workaround; real exception is:\n" +
|
||||
traceback.format_exc())
|
||||
|
||||
@@ -84,9 +81,8 @@ def exception_to_httperror(*expected):
|
||||
class Root(NilmApp):
|
||||
"""Root application for NILM database"""
|
||||
|
||||
def __init__(self, db, version):
|
||||
def __init__(self, db):
|
||||
super(Root, self).__init__(db)
|
||||
self.server_version = version
|
||||
|
||||
# /
|
||||
@cherrypy.expose
|
||||
@@ -102,19 +98,18 @@ class Root(NilmApp):
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def version(self):
|
||||
return self.server_version
|
||||
return nilmdb.__version__
|
||||
|
||||
# /dbpath
|
||||
# /dbinfo
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def dbpath(self):
|
||||
return self.db.get_basepath()
|
||||
|
||||
# /dbsize
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def dbsize(self):
|
||||
return nilmdb.utils.du(self.db.get_basepath())
|
||||
def dbinfo(self):
|
||||
"""Return a dictionary with the database path,
|
||||
size of the database in bytes, and free disk space in bytes"""
|
||||
path = self.db.get_basepath()
|
||||
return { "path": path,
|
||||
"size": nilmdb.utils.du(path),
|
||||
"free": psutil.disk_usage(path).free }
|
||||
|
||||
class Stream(NilmApp):
|
||||
"""Stream-specific operations"""
|
||||
@@ -182,7 +177,6 @@ class Stream(NilmApp):
|
||||
dictionary"""
|
||||
data_dict = json.loads(data)
|
||||
self.db.stream_set_metadata(path, data_dict)
|
||||
return "ok"
|
||||
|
||||
# /stream/update_metadata?path=/newton/prep&data=<json>
|
||||
@cherrypy.expose
|
||||
@@ -193,7 +187,6 @@ class Stream(NilmApp):
|
||||
should be a json-encoded dictionary"""
|
||||
data_dict = json.loads(data)
|
||||
self.db.stream_update_metadata(path, data_dict)
|
||||
return "ok"
|
||||
|
||||
# /stream/insert?path=/newton/prep
|
||||
@cherrypy.expose
|
||||
@@ -228,31 +221,29 @@ class Stream(NilmApp):
|
||||
"error parsing input data: " +
|
||||
e.message)
|
||||
|
||||
if (not parser.min_timestamp or not parser.max_timestamp or
|
||||
not len(parser.data)):
|
||||
raise cherrypy.HTTPError("400 Bad Request",
|
||||
"no data provided")
|
||||
|
||||
# Check limits
|
||||
start = float(start)
|
||||
end = float(end)
|
||||
if parser.min_timestamp < start:
|
||||
if start >= end:
|
||||
raise cherrypy.HTTPError("400 Bad Request",
|
||||
"start must precede end")
|
||||
if parser.min_timestamp is not None and parser.min_timestamp < start:
|
||||
raise cherrypy.HTTPError("400 Bad Request", "Data timestamp " +
|
||||
repr(parser.min_timestamp) +
|
||||
" < start time " + repr(start))
|
||||
if parser.max_timestamp >= end:
|
||||
if parser.max_timestamp is not None and parser.max_timestamp >= end:
|
||||
raise cherrypy.HTTPError("400 Bad Request", "Data timestamp " +
|
||||
repr(parser.max_timestamp) +
|
||||
" >= end time " + repr(end))
|
||||
|
||||
# Now do the nilmdb insert, passing it the parser full of data.
|
||||
try:
|
||||
result = self.db.stream_insert(path, start, end, parser.data)
|
||||
self.db.stream_insert(path, start, end, parser.data)
|
||||
except NilmDBError as e:
|
||||
raise cherrypy.HTTPError("400 Bad Request", e.message)
|
||||
|
||||
# Done
|
||||
return "ok"
|
||||
return
|
||||
|
||||
# /stream/remove?path=/newton/prep
|
||||
# /stream/remove?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
||||
@@ -270,9 +261,9 @@ class Stream(NilmApp):
|
||||
if end is not None:
|
||||
end = float(end)
|
||||
if start is not None and end is not None:
|
||||
if end < start:
|
||||
if start >= end:
|
||||
raise cherrypy.HTTPError("400 Bad Request",
|
||||
"end before start")
|
||||
"start must precede end")
|
||||
return self.db.stream_remove(path, start, end)
|
||||
|
||||
# /stream/intervals?path=/newton/prep
|
||||
@@ -297,9 +288,9 @@ class Stream(NilmApp):
|
||||
end = float(end)
|
||||
|
||||
if start is not None and end is not None:
|
||||
if end < start:
|
||||
if start >= end:
|
||||
raise cherrypy.HTTPError("400 Bad Request",
|
||||
"end before start")
|
||||
"start must precede end")
|
||||
|
||||
streams = self.db.stream_list(path = path)
|
||||
if len(streams) != 1:
|
||||
@@ -309,7 +300,7 @@ class Stream(NilmApp):
|
||||
def content(start, end):
|
||||
# Note: disable chunked responses to see tracebacks from here.
|
||||
while True:
|
||||
(intervals, restart) = self.db.stream_intervals(path,start,end)
|
||||
(intervals, restart) = self.db.stream_intervals(path, start, end)
|
||||
response = ''.join([ json.dumps(i) + "\n" for i in intervals ])
|
||||
yield response
|
||||
if restart == 0:
|
||||
@@ -337,9 +328,9 @@ class Stream(NilmApp):
|
||||
|
||||
# Check parameters
|
||||
if start is not None and end is not None:
|
||||
if end < start:
|
||||
if start >= end:
|
||||
raise cherrypy.HTTPError("400 Bad Request",
|
||||
"end before start")
|
||||
"start must precede end")
|
||||
|
||||
# Check path and get layout
|
||||
streams = self.db.stream_list(path = path)
|
||||
@@ -387,31 +378,39 @@ class Server(object):
|
||||
fast_shutdown = False, # don't wait for clients to disconn.
|
||||
force_traceback = False # include traceback in all errors
|
||||
):
|
||||
self.version = version
|
||||
# Save server version, just for verification during tests
|
||||
self.version = nilmdb.__version__
|
||||
|
||||
# Need to wrap DB object in a serializer because we'll call
|
||||
# into it from separate threads.
|
||||
self.embedded = embedded
|
||||
self.db = nilmdb.utils.Serializer(db)
|
||||
|
||||
# Build up global server configuration
|
||||
cherrypy.config.update({
|
||||
'server.socket_host': host,
|
||||
'server.socket_port': port,
|
||||
'engine.autoreload_on': False,
|
||||
'server.max_request_body_size': 4*1024*1024,
|
||||
'error_page.default': self.json_error_page,
|
||||
})
|
||||
if self.embedded:
|
||||
cherrypy.config.update({ 'environment': 'embedded' })
|
||||
|
||||
# Build up application specific configuration
|
||||
app_config = {}
|
||||
app_config.update({
|
||||
'error_page.default': self.json_error_page,
|
||||
})
|
||||
|
||||
# Send a permissive Access-Control-Allow-Origin (CORS) header
|
||||
# with all responses so that browsers can send cross-domain
|
||||
# requests to this server.
|
||||
cherrypy.config.update({ 'response.headers.Access-Control-Allow-Origin':
|
||||
app_config.update({ 'response.headers.Access-Control-Allow-Origin':
|
||||
'*' })
|
||||
|
||||
# Send tracebacks in error responses. They're hidden by the
|
||||
# error_page function for client errors (code 400-499).
|
||||
cherrypy.config.update({ 'request.show_tracebacks' : True })
|
||||
app_config.update({ 'request.show_tracebacks' : True })
|
||||
self.force_traceback = force_traceback
|
||||
|
||||
# Patch CherryPy error handler to never pad out error messages.
|
||||
@@ -419,11 +418,13 @@ class Server(object):
|
||||
# error messages.
|
||||
cherrypy._cperror._ie_friendly_error_sizes = {}
|
||||
|
||||
cherrypy.tree.apps = {}
|
||||
cherrypy.tree.mount(Root(self.db, self.version), "/")
|
||||
cherrypy.tree.mount(Stream(self.db), "/stream")
|
||||
# Build up the application and mount it
|
||||
root = Root(self.db)
|
||||
root.stream = Stream(self.db)
|
||||
if stoppable:
|
||||
cherrypy.tree.mount(Exiter(), "/exit")
|
||||
root.exit = Exiter()
|
||||
cherrypy.tree.apps = {}
|
||||
cherrypy.tree.mount(root, "/", config = { "/" : app_config })
|
||||
|
||||
# Shutdowns normally wait for clients to disconnect. To speed
|
||||
# up tests, set fast_shutdown = True
|
||||
@@ -444,7 +445,7 @@ class Server(object):
|
||||
if not self.force_traceback:
|
||||
if code >= 400 and code <= 499:
|
||||
errordata["traceback"] = ""
|
||||
except Exception as e: # pragma: no cover
|
||||
except Exception: # pragma: no cover
|
||||
pass
|
||||
# Override the response type, which was previously set to text/html
|
||||
cherrypy.serving.response.headers['Content-Type'] = (
|
||||
|
@@ -1,11 +1,10 @@
|
||||
"""NilmDB utilities"""
|
||||
|
||||
from .timer import Timer
|
||||
from .iteratorizer import Iteratorizer
|
||||
from .serializer import Serializer
|
||||
from .lrucache import lru_cache
|
||||
from .diskusage import du
|
||||
from .mustclose import must_close
|
||||
from .urllib import urlencode
|
||||
from . import misc
|
||||
from . import atomic
|
||||
from nilmdb.utils.timer import Timer
|
||||
from nilmdb.utils.iteratorizer import Iteratorizer
|
||||
from nilmdb.utils.serializer import Serializer
|
||||
from nilmdb.utils.lrucache import lru_cache
|
||||
from nilmdb.utils.diskusage import du, human_size
|
||||
from nilmdb.utils.mustclose import must_close
|
||||
from nilmdb.utils.urllib import urlencode
|
||||
from nilmdb.utils import atomic
|
||||
|
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
from math import log
|
||||
|
||||
def sizeof_fmt(num):
|
||||
def human_size(num):
|
||||
"""Human friendly file size"""
|
||||
unit_list = zip(['bytes', 'kiB', 'MiB', 'GiB', 'TiB'], [0, 0, 1, 2, 2])
|
||||
if num > 1:
|
||||
@@ -15,15 +15,11 @@ def sizeof_fmt(num):
|
||||
if num == 1: # pragma: no cover
|
||||
return '1 byte'
|
||||
|
||||
def du_bytes(path):
|
||||
def du(path):
|
||||
"""Like du -sb, returns total size of path in bytes."""
|
||||
size = os.path.getsize(path)
|
||||
if os.path.isdir(path):
|
||||
for file in os.listdir(path):
|
||||
filepath = os.path.join(path, file)
|
||||
size += du_bytes(filepath)
|
||||
for thisfile in os.listdir(path):
|
||||
filepath = os.path.join(path, thisfile)
|
||||
size += du(filepath)
|
||||
return size
|
||||
|
||||
def du(path):
|
||||
"""Like du -sh, returns total size of path as a human-readable string."""
|
||||
return sizeof_fmt(du_bytes(path))
|
||||
|
@@ -95,5 +95,5 @@ def Iteratorizer(function, curl_hack = False):
|
||||
while thread.isAlive():
|
||||
try:
|
||||
queue.get(True, 0.01)
|
||||
except:
|
||||
except: # pragma: no cover
|
||||
pass
|
||||
|
@@ -5,7 +5,6 @@
|
||||
|
||||
import collections
|
||||
import decorator
|
||||
import warnings
|
||||
|
||||
def lru_cache(size = 10, onremove = None, keys = slice(None)):
|
||||
"""Least-recently-used cache decorator.
|
||||
|
@@ -1,8 +0,0 @@
|
||||
import itertools
|
||||
|
||||
def pairwise(iterable):
|
||||
"s -> (s0,s1), (s1,s2), ..., (sn,None)"
|
||||
a, b = itertools.tee(iterable)
|
||||
next(b, None)
|
||||
return itertools.izip_longest(a, b)
|
||||
|
@@ -38,6 +38,7 @@ def must_close(errorfile = sys.stderr, wrap_verify = False):
|
||||
|
||||
@wrap_class_method
|
||||
def close(orig, self, *args, **kwargs):
|
||||
if "_must_close" in self.__dict__:
|
||||
del self._must_close
|
||||
return orig(self, *args, **kwargs)
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
# Simple timer to time a block of code, for optimization debugging
|
||||
# use like:
|
||||
# with nilmdb.Timer("flush"):
|
||||
# with nilmdb.utils.Timer("flush"):
|
||||
# foo.flush()
|
||||
|
||||
from __future__ import print_function
|
||||
|
@@ -3,19 +3,16 @@
|
||||
from nilmdb.utils.printf import *
|
||||
from nilmdb.utils import datetime_tz
|
||||
|
||||
import time
|
||||
import os
|
||||
|
||||
class Timestamper(object):
|
||||
"""A file-like object that adds timestamps to lines of an input file."""
|
||||
def __init__(self, file, ts_iter):
|
||||
def __init__(self, infile, ts_iter):
|
||||
"""file: filename, or another file-like object
|
||||
ts_iter: iterator that returns a timestamp string for
|
||||
each line of the file"""
|
||||
if isinstance(file, basestring):
|
||||
self.file = open(file, "r")
|
||||
if isinstance(infile, basestring):
|
||||
self.file = open(infile, "r")
|
||||
else:
|
||||
self.file = file
|
||||
self.file = infile
|
||||
self.ts_iter = ts_iter
|
||||
|
||||
def close(self):
|
||||
@@ -54,7 +51,7 @@ class Timestamper(object):
|
||||
|
||||
class TimestamperRate(Timestamper):
|
||||
"""Timestamper that uses a start time and a fixed rate"""
|
||||
def __init__(self, file, start, rate, end = None):
|
||||
def __init__(self, infile, start, rate, end = None):
|
||||
"""
|
||||
file: file name or object
|
||||
|
||||
@@ -76,7 +73,7 @@ class TimestamperRate(Timestamper):
|
||||
# Handle case where we're passed a datetime or datetime_tz object
|
||||
if "totimestamp" in dir(start):
|
||||
start = start.totimestamp()
|
||||
Timestamper.__init__(self, file, iterator(start, rate, end))
|
||||
Timestamper.__init__(self, infile, iterator(start, rate, end))
|
||||
self.start = start
|
||||
self.rate = rate
|
||||
def __str__(self):
|
||||
@@ -87,21 +84,21 @@ class TimestamperRate(Timestamper):
|
||||
|
||||
class TimestamperNow(Timestamper):
|
||||
"""Timestamper that uses current time"""
|
||||
def __init__(self, file):
|
||||
def __init__(self, infile):
|
||||
def iterator():
|
||||
while True:
|
||||
now = datetime_tz.datetime_tz.utcnow().totimestamp()
|
||||
yield sprintf("%.6f ", now)
|
||||
Timestamper.__init__(self, file, iterator())
|
||||
Timestamper.__init__(self, infile, iterator())
|
||||
def __str__(self):
|
||||
return "TimestamperNow(...)"
|
||||
|
||||
class TimestamperNull(Timestamper):
|
||||
"""Timestamper that adds nothing to each line"""
|
||||
def __init__(self, file):
|
||||
def __init__(self, infile):
|
||||
def iterator():
|
||||
while True:
|
||||
yield ""
|
||||
Timestamper.__init__(self, file, iterator())
|
||||
Timestamper.__init__(self, infile, iterator())
|
||||
def __str__(self):
|
||||
return "TimestamperNull(...)"
|
||||
|
@@ -1,6 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import nilmdb
|
||||
import sys
|
||||
|
||||
nilmdb.cmdline.Cmdline(sys.argv[1:]).run()
|
35
runserver.py
35
runserver.py
@@ -1,35 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import nilmdb
|
||||
import argparse
|
||||
|
||||
formatter = argparse.ArgumentDefaultsHelpFormatter
|
||||
parser = argparse.ArgumentParser(description='Run the NILM server',
|
||||
formatter_class = formatter)
|
||||
parser.add_argument('-p', '--port', help='Port number', type=int, default=12380)
|
||||
parser.add_argument('-d', '--database', help='Database directory', default="db")
|
||||
parser.add_argument('-y', '--yappi', help='Run with yappi profiler',
|
||||
action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Start web app on a custom port
|
||||
db = nilmdb.NilmDB(args.database)
|
||||
server = nilmdb.Server(db, host = "127.0.0.1",
|
||||
port = args.port,
|
||||
embedded = False)
|
||||
|
||||
|
||||
if args.yappi:
|
||||
print "Running in yappi"
|
||||
try:
|
||||
import yappi
|
||||
yappi.start()
|
||||
server.start(blocking = True)
|
||||
finally:
|
||||
yappi.stop()
|
||||
print "Try: yappi.print_stats(sort_type=yappi.SORTTYPE_TTOT,limit=50)"
|
||||
from IPython import embed
|
||||
embed()
|
||||
else:
|
||||
server.start(blocking = True)
|
||||
db.close()
|
107
setup.py
107
setup.py
@@ -1,5 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# To release a new version, tag it:
|
||||
# git tag -a nilmdb-1.1 -m "Version 1.1"
|
||||
# git push --tags
|
||||
# Then just package it up:
|
||||
# python setup.py sdist
|
||||
|
||||
# This is supposed to be using Distribute:
|
||||
#
|
||||
# distutils provides a "setup" method.
|
||||
@@ -9,32 +15,106 @@
|
||||
# So we don't really know if this is using the old setuptools or the
|
||||
# Distribute-provided version of setuptools.
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
from distutils.extension import Extension
|
||||
import traceback
|
||||
import sys
|
||||
import os
|
||||
|
||||
from Cython.Build import cythonize
|
||||
try:
|
||||
from setuptools import setup, find_packages
|
||||
from distutils.extension import Extension
|
||||
import distutils.version
|
||||
except ImportError:
|
||||
traceback.print_exc()
|
||||
print "Please install the prerequisites listed in README.txt"
|
||||
sys.exit(1)
|
||||
|
||||
# Versioneer manages version numbers from git tags.
|
||||
# https://github.com/warner/python-versioneer
|
||||
import versioneer
|
||||
versioneer.versionfile_source = 'nilmdb/_version.py'
|
||||
versioneer.versionfile_build = 'nilmdb/_version.py'
|
||||
versioneer.tag_prefix = 'nilmdb-'
|
||||
versioneer.parentdir_prefix = 'nilmdb-'
|
||||
|
||||
# Hack to workaround logging/multiprocessing issue:
|
||||
# https://groups.google.com/d/msg/nose-users/fnJ-kAUbYHQ/_UsLN786ygcJ
|
||||
try: import multiprocessing
|
||||
except: pass
|
||||
|
||||
# Build cython modules.
|
||||
cython_modules = cythonize("**/*.pyx")
|
||||
# Use Cython if it's new enough, otherwise use preexisting C files.
|
||||
cython_modules = [ 'nilmdb.server.interval',
|
||||
'nilmdb.server.layout',
|
||||
'nilmdb.server.rbtree' ]
|
||||
try:
|
||||
import Cython
|
||||
from Cython.Build import cythonize
|
||||
if (distutils.version.LooseVersion(Cython.__version__) <
|
||||
distutils.version.LooseVersion("0.16")):
|
||||
print "Cython version", Cython.__version__, "is too old; not using it."
|
||||
raise ImportError()
|
||||
use_cython = True
|
||||
except ImportError:
|
||||
use_cython = False
|
||||
|
||||
ext_modules = []
|
||||
for modulename in cython_modules:
|
||||
filename = modulename.replace('.','/')
|
||||
if use_cython:
|
||||
ext_modules.extend(cythonize(filename + ".pyx"))
|
||||
else:
|
||||
cfile = filename + ".c"
|
||||
if not os.path.exists(cfile):
|
||||
raise Exception("Missing source file " + cfile + ". "
|
||||
"Try installing cython >= 0.16.")
|
||||
ext_modules.append(Extension(modulename, [ cfile ]))
|
||||
|
||||
# We need a MANIFEST.in. Generate it here rather than polluting the
|
||||
# repository with yet another setup-related file.
|
||||
with open("MANIFEST.in", "w") as m:
|
||||
m.write("""
|
||||
# Root
|
||||
include README.txt
|
||||
include setup.cfg
|
||||
include setup.py
|
||||
include versioneer.py
|
||||
include Makefile
|
||||
include .coveragerc
|
||||
include .pylintrc
|
||||
|
||||
# Cython files -- include source.
|
||||
recursive-include nilmdb/server *.pyx *.pyxdep *.pxd
|
||||
|
||||
# Tests
|
||||
recursive-include tests *.py
|
||||
recursive-include tests/data *
|
||||
include tests/test.order
|
||||
|
||||
# Docs
|
||||
recursive-include docs Makefile *.md
|
||||
""")
|
||||
|
||||
# Run setup
|
||||
setup(name='nilmdb',
|
||||
version = '1.0',
|
||||
version = versioneer.get_version(),
|
||||
cmdclass = versioneer.get_cmdclass(),
|
||||
url = 'https://git.jim.sh/jim/lees/nilmdb.git',
|
||||
author = 'Jim Paris',
|
||||
description = "NILM Database",
|
||||
long_description = "NILM Database",
|
||||
license = "Proprietary",
|
||||
author_email = 'jim@jtan.com',
|
||||
tests_require = [ 'nose',
|
||||
'coverage',
|
||||
],
|
||||
setup_requires = [ 'cython',
|
||||
setup_requires = [ 'distribute',
|
||||
],
|
||||
install_requires = [ 'distribute',
|
||||
'decorator',
|
||||
install_requires = [ 'decorator',
|
||||
'cherrypy >= 3.2',
|
||||
'simplejson',
|
||||
'pycurl',
|
||||
'python-dateutil',
|
||||
'pytz',
|
||||
'psutil >= 0.3.0',
|
||||
],
|
||||
packages = [ 'nilmdb',
|
||||
'nilmdb.utils',
|
||||
@@ -42,7 +122,14 @@ setup(name='nilmdb',
|
||||
'nilmdb.server',
|
||||
'nilmdb.client',
|
||||
'nilmdb.cmdline',
|
||||
'nilmdb.scripts',
|
||||
],
|
||||
ext_modules = cython_modules,
|
||||
entry_points = {
|
||||
'console_scripts': [
|
||||
'nilmtool = nilmdb.scripts.nilmtool:main',
|
||||
'nilmdb-server = nilmdb.scripts.nilmdb_server:main',
|
||||
],
|
||||
},
|
||||
ext_modules = ext_modules,
|
||||
zip_safe = False,
|
||||
)
|
||||
|
@@ -6,6 +6,9 @@ import sys
|
||||
import glob
|
||||
from collections import OrderedDict
|
||||
|
||||
# Change into parent dir
|
||||
os.chdir(os.path.dirname(os.path.realpath(__file__)) + "/..")
|
||||
|
||||
class JimOrderPlugin(nose.plugins.Plugin):
|
||||
"""When searching for tests and encountering a directory that
|
||||
contains a 'test.order' file, run tests listed in that file, in the
|
@@ -18,10 +18,12 @@ import simplejson as json
|
||||
import unittest
|
||||
import warnings
|
||||
import resource
|
||||
import time
|
||||
|
||||
from testutil.helpers import *
|
||||
|
||||
testdb = "tests/client-testdb"
|
||||
testurl = "http://localhost:12380/"
|
||||
|
||||
def setup_module():
|
||||
global test_server, test_db
|
||||
@@ -44,39 +46,44 @@ def teardown_module():
|
||||
|
||||
class TestClient(object):
|
||||
|
||||
def test_client_1_basic(self):
|
||||
def test_client_01_basic(self):
|
||||
# Test a fake host
|
||||
client = nilmdb.Client(url = "http://localhost:1/")
|
||||
with assert_raises(nilmdb.client.ServerError):
|
||||
client.version()
|
||||
client.close()
|
||||
|
||||
# Trigger same error with a PUT request
|
||||
client = nilmdb.Client(url = "http://localhost:1/")
|
||||
with assert_raises(nilmdb.client.ServerError):
|
||||
client.version()
|
||||
client.close()
|
||||
|
||||
# Then a fake URL on a real host
|
||||
client = nilmdb.Client(url = "http://localhost:12380/fake/")
|
||||
with assert_raises(nilmdb.client.ClientError):
|
||||
client.version()
|
||||
client.close()
|
||||
|
||||
# Now a real URL with no http:// prefix
|
||||
client = nilmdb.Client(url = "localhost:12380")
|
||||
version = client.version()
|
||||
client.close()
|
||||
|
||||
# Now use the real URL
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
client = nilmdb.Client(url = testurl)
|
||||
version = client.version()
|
||||
eq_(distutils.version.StrictVersion(version),
|
||||
distutils.version.StrictVersion(test_server.version))
|
||||
eq_(distutils.version.LooseVersion(version),
|
||||
distutils.version.LooseVersion(test_server.version))
|
||||
|
||||
# Bad URLs should give 404, not 500
|
||||
with assert_raises(ClientError):
|
||||
client.http.get("/stream/create")
|
||||
client.close()
|
||||
|
||||
def test_client_2_createlist(self):
|
||||
def test_client_02_createlist(self):
|
||||
# Basic stream tests, like those in test_nilmdb:test_stream
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
client = nilmdb.Client(url = testurl)
|
||||
|
||||
# Database starts empty
|
||||
eq_(client.stream_list(), [])
|
||||
@@ -101,8 +108,10 @@ class TestClient(object):
|
||||
["/newton/zzz/rawnotch", "RawNotchedData"]
|
||||
])
|
||||
# Match just one type or one path
|
||||
eq_(client.stream_list(layout="RawData"), [ ["/newton/raw", "RawData"] ])
|
||||
eq_(client.stream_list(path="/newton/raw"), [ ["/newton/raw", "RawData"] ])
|
||||
eq_(client.stream_list(layout="RawData"),
|
||||
[ ["/newton/raw", "RawData"] ])
|
||||
eq_(client.stream_list(path="/newton/raw"),
|
||||
[ ["/newton/raw", "RawData"] ])
|
||||
|
||||
# Try messing with resource limits to trigger errors and get
|
||||
# more coverage. Here, make it so we can only create files 1
|
||||
@@ -114,9 +123,10 @@ class TestClient(object):
|
||||
client.stream_create("/newton/hello", "RawData")
|
||||
resource.setrlimit(resource.RLIMIT_FSIZE, limit)
|
||||
|
||||
client.close()
|
||||
|
||||
def test_client_3_metadata(self):
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
def test_client_03_metadata(self):
|
||||
client = nilmdb.Client(url = testurl)
|
||||
|
||||
# Set / get metadata
|
||||
eq_(client.stream_get_metadata("/newton/prep"), {})
|
||||
@@ -131,9 +141,10 @@ class TestClient(object):
|
||||
client.stream_update_metadata("/newton/raw", meta3)
|
||||
eq_(client.stream_get_metadata("/newton/prep"), meta1)
|
||||
eq_(client.stream_get_metadata("/newton/raw"), meta1)
|
||||
eq_(client.stream_get_metadata("/newton/raw", [ "description" ] ), meta2)
|
||||
eq_(client.stream_get_metadata("/newton/raw", [ "description",
|
||||
"v_scale" ] ), meta1)
|
||||
eq_(client.stream_get_metadata("/newton/raw",
|
||||
[ "description" ] ), meta2)
|
||||
eq_(client.stream_get_metadata("/newton/raw",
|
||||
[ "description", "v_scale" ] ), meta1)
|
||||
|
||||
# missing key
|
||||
eq_(client.stream_get_metadata("/newton/raw", "descr"),
|
||||
@@ -146,9 +157,10 @@ class TestClient(object):
|
||||
client.stream_set_metadata("/newton/prep", [1,2,3])
|
||||
with assert_raises(ClientError):
|
||||
client.stream_update_metadata("/newton/prep", [1,2,3])
|
||||
client.close()
|
||||
|
||||
def test_client_4_insert(self):
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
def test_client_04_insert(self):
|
||||
client = nilmdb.Client(url = testurl)
|
||||
|
||||
datetime_tz.localtz_set("America/New_York")
|
||||
|
||||
@@ -177,12 +189,33 @@ class TestClient(object):
|
||||
result = client.stream_insert("/newton/prep", data)
|
||||
eq_(result, None)
|
||||
|
||||
# Try forcing a server request with empty data
|
||||
# It's OK to insert an empty interval
|
||||
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
||||
"start": 1, "end": 2 })
|
||||
eq_(list(client.stream_intervals("/newton/prep")), [[1, 2]])
|
||||
client.stream_remove("/newton/prep")
|
||||
eq_(list(client.stream_intervals("/newton/prep")), [])
|
||||
|
||||
# Timestamps can be negative too
|
||||
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
||||
"start": -2, "end": -1 })
|
||||
eq_(list(client.stream_intervals("/newton/prep")), [[-2, -1]])
|
||||
client.stream_remove("/newton/prep")
|
||||
eq_(list(client.stream_intervals("/newton/prep")), [])
|
||||
|
||||
# Intervals that end at zero shouldn't be any different
|
||||
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
||||
"start": -1, "end": 0 })
|
||||
eq_(list(client.stream_intervals("/newton/prep")), [[-1, 0]])
|
||||
client.stream_remove("/newton/prep")
|
||||
eq_(list(client.stream_intervals("/newton/prep")), [])
|
||||
|
||||
# Try forcing a server request with equal start and end
|
||||
with assert_raises(ClientError) as e:
|
||||
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
||||
"start": 0, "end": 0 })
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("no data provided", str(e.exception))
|
||||
in_("start must precede end", str(e.exception))
|
||||
|
||||
# Specify start/end (starts too late)
|
||||
data = timestamper.TimestamperRate(testfile, start, 120)
|
||||
@@ -208,7 +241,6 @@ class TestClient(object):
|
||||
data = timestamper.TimestamperRate(testfile, start, 120)
|
||||
result = client.stream_insert("/newton/prep", data,
|
||||
start, start + 119.999777)
|
||||
eq_(result, "ok")
|
||||
|
||||
# Verify the intervals. Should be just one, even if the data
|
||||
# was inserted in chunks, due to nilmdb interval concatenation.
|
||||
@@ -222,20 +254,33 @@ class TestClient(object):
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("verlap", str(e.exception))
|
||||
|
||||
def test_client_5_extractremove(self):
|
||||
# Misc tests for extract and remove. Most of them are in test_cmdline.
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
client.close()
|
||||
|
||||
for x in client.stream_extract("/newton/prep", 123, 123):
|
||||
def test_client_05_extractremove(self):
|
||||
# Misc tests for extract and remove. Most of them are in test_cmdline.
|
||||
client = nilmdb.Client(url = testurl)
|
||||
|
||||
for x in client.stream_extract("/newton/prep", 999123, 999124):
|
||||
raise AssertionError("shouldn't be any data for this request")
|
||||
|
||||
with assert_raises(ClientError) as e:
|
||||
client.stream_remove("/newton/prep", 123, 120)
|
||||
|
||||
def test_client_6_generators(self):
|
||||
# Test the exception we get if we nest requests
|
||||
with assert_raises(Exception) as e:
|
||||
for data in client.stream_extract("/newton/prep"):
|
||||
x = client.stream_intervals("/newton/prep")
|
||||
in_("nesting calls is not supported", str(e.exception))
|
||||
|
||||
# Test count
|
||||
eq_(client.stream_count("/newton/prep"), 14400)
|
||||
|
||||
client.close()
|
||||
|
||||
def test_client_06_generators(self):
|
||||
# A lot of the client functionality is already tested by test_cmdline,
|
||||
# but this gets a bit more coverage that cmdline misses.
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
client = nilmdb.Client(url = testurl)
|
||||
|
||||
# Trigger a client error in generator
|
||||
start = datetime_tz.datetime_tz.smartparse("20120323T2000")
|
||||
@@ -246,7 +291,7 @@ class TestClient(object):
|
||||
start.totimestamp(),
|
||||
end.totimestamp()).next()
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("end before start", str(e.exception))
|
||||
in_("start must precede end", str(e.exception))
|
||||
|
||||
# Trigger a curl error in generator
|
||||
with assert_raises(ServerError) as e:
|
||||
@@ -272,7 +317,7 @@ class TestClient(object):
|
||||
{ "path": "/newton/prep",
|
||||
"start": 0, "end": 0 }).next()
|
||||
in_("400 Bad Request", str(e.exception))
|
||||
in_("no data provided", str(e.exception))
|
||||
in_("start must precede end", str(e.exception))
|
||||
|
||||
# Check 404 for missing streams
|
||||
for function in [ client.stream_intervals, client.stream_extract ]:
|
||||
@@ -281,13 +326,15 @@ class TestClient(object):
|
||||
in_("404 Not Found", str(e.exception))
|
||||
in_("No such stream", str(e.exception))
|
||||
|
||||
def test_client_7_headers(self):
|
||||
client.close()
|
||||
|
||||
def test_client_07_headers(self):
|
||||
# Make sure that /stream/intervals and /stream/extract
|
||||
# properly return streaming, chunked, text/plain response.
|
||||
# Pokes around in client.http internals a bit to look at the
|
||||
# response headers.
|
||||
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
client = nilmdb.Client(url = testurl)
|
||||
http = client.http
|
||||
|
||||
# Use a warning rather than returning a test failure, so that we can
|
||||
@@ -307,7 +354,7 @@ class TestClient(object):
|
||||
x = http.get("stream/extract",
|
||||
{ "path": "/newton/prep",
|
||||
"start": "123",
|
||||
"end": "123" }, retjson=False)
|
||||
"end": "124" }, retjson=False)
|
||||
if "Transfer-Encoding: chunked" not in http._headers:
|
||||
warnings.warn("Non-chunked HTTP response for /stream/extract")
|
||||
if "Content-Type: text/plain;charset=utf-8" not in http._headers:
|
||||
@@ -320,9 +367,11 @@ class TestClient(object):
|
||||
"header in /stream/extract response:\n" +
|
||||
http._headers)
|
||||
|
||||
def test_client_8_unicode(self):
|
||||
client.close()
|
||||
|
||||
def test_client_08_unicode(self):
|
||||
# Basic Unicode tests
|
||||
client = nilmdb.Client(url = "http://localhost:12380/")
|
||||
client = nilmdb.Client(url = testurl)
|
||||
|
||||
# Delete streams that exist
|
||||
for stream in client.stream_list():
|
||||
@@ -356,3 +405,174 @@ class TestClient(object):
|
||||
eq_(client.stream_get_metadata(raw[0]), meta1)
|
||||
eq_(client.stream_get_metadata(raw[0], [ "alpha" ]), meta2)
|
||||
eq_(client.stream_get_metadata(raw[0], [ "alpha", "β" ]), meta1)
|
||||
|
||||
client.close()
|
||||
|
||||
def test_client_09_closing(self):
|
||||
# Make sure we actually close sockets correctly. New
|
||||
# connections will block for a while if they're not, since the
|
||||
# server will stop accepting new connections.
|
||||
for test in [1, 2]:
|
||||
start = time.time()
|
||||
for i in range(50):
|
||||
if time.time() - start > 15:
|
||||
raise AssertionError("Connections seem to be blocking... "
|
||||
"probably not closing properly.")
|
||||
if test == 1:
|
||||
# explicit close
|
||||
client = nilmdb.Client(url = testurl)
|
||||
with assert_raises(ClientError) as e:
|
||||
client.stream_remove("/newton/prep", 123, 120)
|
||||
client.close() # remove this to see the failure
|
||||
elif test == 2:
|
||||
# use the context manager
|
||||
with nilmdb.Client(url = testurl) as c:
|
||||
with assert_raises(ClientError) as e:
|
||||
c.stream_remove("/newton/prep", 123, 120)
|
||||
|
||||
def test_client_10_context(self):
|
||||
# Test using the client's stream insertion context manager to
|
||||
# insert data.
|
||||
client = nilmdb.Client(testurl)
|
||||
|
||||
client.stream_create("/context/test", "uint16_1")
|
||||
with client.stream_insert_context("/context/test") as ctx:
|
||||
# override _max_data to trigger frequent server updates
|
||||
ctx._max_data = 15
|
||||
|
||||
with assert_raises(ValueError):
|
||||
ctx.insert_line("100 1")
|
||||
|
||||
ctx.insert_line("100 1\n")
|
||||
ctx.insert_iter([ "101 1\n",
|
||||
"102 1\n",
|
||||
"103 1\n" ])
|
||||
ctx.insert_line("104 1\n")
|
||||
ctx.insert_line("105 1\n")
|
||||
ctx.finalize()
|
||||
|
||||
ctx.insert_line("106 1\n")
|
||||
ctx.update_end(106.5)
|
||||
ctx.finalize()
|
||||
ctx.update_start(106.8)
|
||||
ctx.insert_line("107 1\n")
|
||||
ctx.insert_line("108 1\n")
|
||||
ctx.insert_line("109 1\n")
|
||||
ctx.insert_line("110 1\n")
|
||||
ctx.insert_line("111 1\n")
|
||||
ctx.update_end(113)
|
||||
ctx.insert_line("112 1\n")
|
||||
ctx.update_end(114)
|
||||
ctx.insert_line("113 1\n")
|
||||
ctx.update_end(115)
|
||||
ctx.insert_line("114 1\n")
|
||||
ctx.finalize()
|
||||
|
||||
with assert_raises(ClientError):
|
||||
with client.stream_insert_context("/context/test", 100, 200) as ctx:
|
||||
ctx.insert_line("115 1\n")
|
||||
|
||||
with assert_raises(ClientError):
|
||||
with client.stream_insert_context("/context/test", 200, 300) as ctx:
|
||||
ctx.insert_line("115 1\n")
|
||||
|
||||
with client.stream_insert_context("/context/test", 200, 300) as ctx:
|
||||
# make sure our override wasn't permanent
|
||||
ne_(ctx._max_data, 15)
|
||||
ctx.insert_line("225 1\n")
|
||||
ctx.finalize()
|
||||
|
||||
eq_(list(client.stream_intervals("/context/test")),
|
||||
[ [ 100, 105.000001 ],
|
||||
[ 106, 106.5 ],
|
||||
[ 106.8, 115 ],
|
||||
[ 200, 300 ] ])
|
||||
|
||||
client.stream_destroy("/context/test")
|
||||
client.close()
|
||||
|
||||
def test_client_11_emptyintervals(self):
|
||||
# Empty intervals are ok! If recording detection events
|
||||
# by inserting rows into the database, we want to be able to
|
||||
# have an interval where no events occurred. Test them here.
|
||||
client = nilmdb.Client(testurl)
|
||||
client.stream_create("/empty/test", "uint16_1")
|
||||
|
||||
def info():
|
||||
result = []
|
||||
for interval in list(client.stream_intervals("/empty/test")):
|
||||
result.append((client.stream_count("/empty/test", *interval),
|
||||
interval))
|
||||
return result
|
||||
|
||||
eq_(info(), [])
|
||||
|
||||
# Insert a region with just a few points
|
||||
with client.stream_insert_context("/empty/test") as ctx:
|
||||
ctx.update_start(100)
|
||||
ctx.insert_line("140 1\n")
|
||||
ctx.insert_line("150 1\n")
|
||||
ctx.insert_line("160 1\n")
|
||||
ctx.update_end(200)
|
||||
ctx.finalize()
|
||||
|
||||
eq_(info(), [(3, [100, 200])])
|
||||
|
||||
# Delete chunk, which will leave one data point and two intervals
|
||||
client.stream_remove("/empty/test", 145, 175)
|
||||
eq_(info(), [(1, [100, 145]),
|
||||
(0, [175, 200])])
|
||||
|
||||
# Try also creating a completely empty interval from scratch,
|
||||
# in a few different ways.
|
||||
client.stream_insert_block("/empty/test", "", 300, 350)
|
||||
client.stream_insert("/empty/test", [], 400, 450)
|
||||
with client.stream_insert_context("/empty/test", 500, 550):
|
||||
pass
|
||||
|
||||
# If enough timestamps aren't provided, empty streams won't be created.
|
||||
client.stream_insert("/empty/test", [])
|
||||
with client.stream_insert_context("/empty/test"):
|
||||
pass
|
||||
client.stream_insert("/empty/test", [], start = 600)
|
||||
with client.stream_insert_context("/empty/test", start = 700):
|
||||
pass
|
||||
client.stream_insert("/empty/test", [], end = 850)
|
||||
with client.stream_insert_context("/empty/test", end = 950):
|
||||
pass
|
||||
|
||||
# Try various things that might cause problems
|
||||
with client.stream_insert_context("/empty/test", 1000, 1050):
|
||||
ctx.finalize() # inserts [1000, 1050]
|
||||
ctx.finalize() # nothing
|
||||
ctx.finalize() # nothing
|
||||
ctx.insert_line("1100 1\n")
|
||||
ctx.finalize() # inserts [1100, 1100.000001]
|
||||
ctx.update_start(1199)
|
||||
ctx.insert_line("1200 1\n")
|
||||
ctx.update_end(1250)
|
||||
ctx.finalize() # inserts [1199, 1250]
|
||||
ctx.update_start(1299)
|
||||
ctx.finalize() # nothing
|
||||
ctx.update_end(1350)
|
||||
ctx.finalize() # nothing
|
||||
ctx.update_start(1400)
|
||||
ctx.update_end(1450)
|
||||
ctx.finalize()
|
||||
# implicit last finalize inserts [1400, 1450]
|
||||
|
||||
# Check everything
|
||||
eq_(info(), [(1, [100, 145]),
|
||||
(0, [175, 200]),
|
||||
(0, [300, 350]),
|
||||
(0, [400, 450]),
|
||||
(0, [500, 550]),
|
||||
(0, [1000, 1050]),
|
||||
(1, [1100, 1100.000001]),
|
||||
(1, [1199, 1250]),
|
||||
(0, [1400, 1450]),
|
||||
])
|
||||
|
||||
# Clean up
|
||||
client.stream_destroy("/empty/test")
|
||||
client.close()
|
||||
|
@@ -194,9 +194,11 @@ class TestCmdline(object):
|
||||
def test_02_info(self):
|
||||
self.ok("info")
|
||||
self.contain("Server URL: http://localhost:12380/")
|
||||
self.contain("Client version: " + nilmdb.__version__)
|
||||
self.contain("Server version: " + test_server.version)
|
||||
self.contain("Server database path")
|
||||
self.contain("Server database size")
|
||||
self.contain("Server database free space")
|
||||
|
||||
def test_03_createlist(self):
|
||||
# Basic stream tests, like those in test_client.
|
||||
@@ -272,7 +274,7 @@ class TestCmdline(object):
|
||||
|
||||
# reversed range
|
||||
self.fail("list /newton/prep --start 2020-01-01 --end 2000-01-01")
|
||||
self.contain("start is after end")
|
||||
self.contain("start must precede end")
|
||||
|
||||
def test_04_metadata(self):
|
||||
# Set / get metadata
|
||||
@@ -442,7 +444,7 @@ class TestCmdline(object):
|
||||
self.contain("no intervals")
|
||||
|
||||
self.ok("list --detail --path *prep --start='23 Mar 2012 10:05:15.50'"
|
||||
+ " --end='23 Mar 2012 10:05:15.50'")
|
||||
+ " --end='23 Mar 2012 10:05:15.51'")
|
||||
lines_(self.captured, 2)
|
||||
self.contain("10:05:15.500")
|
||||
|
||||
@@ -471,29 +473,29 @@ class TestCmdline(object):
|
||||
|
||||
# empty ranges return error 2
|
||||
self.fail("extract -a /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30' " +
|
||||
"--end '23 Mar 2012 10:00:30'",
|
||||
"--start '23 Mar 2012 20:00:30' " +
|
||||
"--end '23 Mar 2012 20:00:31'",
|
||||
exitcode = 2, require_error = False)
|
||||
self.contain("no data")
|
||||
self.fail("extract -a /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30.000001' " +
|
||||
"--end '23 Mar 2012 10:00:30.000001'",
|
||||
"--start '23 Mar 2012 20:00:30.000001' " +
|
||||
"--end '23 Mar 2012 20:00:30.000002'",
|
||||
exitcode = 2, require_error = False)
|
||||
self.contain("no data")
|
||||
self.fail("extract -a /newton/prep " +
|
||||
"--start '23 Mar 2022 10:00:30' " +
|
||||
"--end '23 Mar 2022 10:00:30'",
|
||||
"--end '23 Mar 2022 10:00:31'",
|
||||
exitcode = 2, require_error = False)
|
||||
self.contain("no data")
|
||||
|
||||
# but are ok if we're just counting results
|
||||
self.ok("extract --count /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30' " +
|
||||
"--end '23 Mar 2012 10:00:30'")
|
||||
"--start '23 Mar 2012 20:00:30' " +
|
||||
"--end '23 Mar 2012 20:00:31'")
|
||||
self.match("0\n")
|
||||
self.ok("extract -c /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30.000001' " +
|
||||
"--end '23 Mar 2012 10:00:30.000001'")
|
||||
"--start '23 Mar 2012 20:00:30.000001' " +
|
||||
"--end '23 Mar 2012 20:00:30.000002'")
|
||||
self.match("0\n")
|
||||
|
||||
# Check various dumps against stored copies of how they should appear
|
||||
@@ -540,31 +542,31 @@ class TestCmdline(object):
|
||||
self.fail("remove /no/such/foo --start 2000-01-01 --end 2020-01-01")
|
||||
self.contain("No stream at path")
|
||||
|
||||
# empty or backward ranges return errors
|
||||
self.fail("remove /newton/prep --start 2020-01-01 --end 2000-01-01")
|
||||
self.contain("start is after end")
|
||||
self.contain("start must precede end")
|
||||
|
||||
# empty ranges return success, backwards ranges return error
|
||||
self.ok("remove /newton/prep " +
|
||||
self.fail("remove /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30' " +
|
||||
"--end '23 Mar 2012 10:00:30'")
|
||||
self.match("")
|
||||
self.ok("remove /newton/prep " +
|
||||
self.contain("start must precede end")
|
||||
self.fail("remove /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30.000001' " +
|
||||
"--end '23 Mar 2012 10:00:30.000001'")
|
||||
self.match("")
|
||||
self.ok("remove /newton/prep " +
|
||||
self.contain("start must precede end")
|
||||
self.fail("remove /newton/prep " +
|
||||
"--start '23 Mar 2022 10:00:30' " +
|
||||
"--end '23 Mar 2022 10:00:30'")
|
||||
self.match("")
|
||||
self.contain("start must precede end")
|
||||
|
||||
# Verbose
|
||||
self.ok("remove -c /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30' " +
|
||||
"--end '23 Mar 2012 10:00:30'")
|
||||
"--start '23 Mar 2022 20:00:30' " +
|
||||
"--end '23 Mar 2022 20:00:31'")
|
||||
self.match("0\n")
|
||||
self.ok("remove --count /newton/prep " +
|
||||
"--start '23 Mar 2012 10:00:30' " +
|
||||
"--end '23 Mar 2012 10:00:30'")
|
||||
"--start '23 Mar 2022 20:00:30' " +
|
||||
"--end '23 Mar 2022 20:00:31'")
|
||||
self.match("0\n")
|
||||
|
||||
# Make sure we have the data we expect
|
||||
@@ -765,7 +767,7 @@ class TestCmdline(object):
|
||||
"tests/data/prep-20120323T1000")
|
||||
|
||||
# Should take up about 2.8 MB here (including directory entries)
|
||||
du_before = nilmdb.utils.diskusage.du_bytes(testdb)
|
||||
du_before = nilmdb.utils.diskusage.du(testdb)
|
||||
|
||||
# Make sure we have the data we expect
|
||||
self.ok("list --detail")
|
||||
@@ -815,7 +817,7 @@ class TestCmdline(object):
|
||||
|
||||
# We have 1/8 of the data that we had before, so the file size
|
||||
# should have dropped below 1/4 of what it used to be
|
||||
du_after = nilmdb.utils.diskusage.du_bytes(testdb)
|
||||
du_after = nilmdb.utils.diskusage.du(testdb)
|
||||
lt_(du_after, (du_before / 4))
|
||||
|
||||
# Remove anything that came from the 10:02 data file
|
||||
|
@@ -55,7 +55,7 @@ class TestInterval:
|
||||
for x in [ "03/24/2012", "03/25/2012", "03/26/2012" ] ]
|
||||
|
||||
# basic construction
|
||||
i = Interval(d1, d1)
|
||||
i = Interval(d1, d2)
|
||||
i = Interval(d1, d3)
|
||||
eq_(i.start, d1)
|
||||
eq_(i.end, d3)
|
||||
@@ -77,8 +77,8 @@ class TestInterval:
|
||||
assert(Interval(d1, d3) > Interval(d1, d2))
|
||||
assert(Interval(d1, d2) < Interval(d2, d3))
|
||||
assert(Interval(d1, d3) < Interval(d2, d3))
|
||||
assert(Interval(d2, d2) > Interval(d1, d3))
|
||||
assert(Interval(d3, d3) == Interval(d3, d3))
|
||||
assert(Interval(d2, d2+0.01) > Interval(d1, d3))
|
||||
assert(Interval(d3, d3+0.01) == Interval(d3, d3+0.01))
|
||||
#with assert_raises(TypeError): # was AttributeError, that's wrong
|
||||
# x = (i == 123)
|
||||
|
||||
@@ -293,7 +293,7 @@ class TestIntervalDB:
|
||||
# actual start, end can be a subset
|
||||
a = DBInterval(150, 200, 100, 200, 10000, 20000)
|
||||
b = DBInterval(100, 150, 100, 200, 10000, 20000)
|
||||
c = DBInterval(150, 150, 100, 200, 10000, 20000)
|
||||
c = DBInterval(150, 160, 100, 200, 10000, 20000)
|
||||
|
||||
# Make a set of DBIntervals
|
||||
iseta = IntervalSet([a, b])
|
||||
|
@@ -93,6 +93,13 @@ class Test00Nilmdb(object): # named 00 so it runs first
|
||||
eq_(db.stream_get_metadata("/newton/prep"), meta1)
|
||||
eq_(db.stream_get_metadata("/newton/raw"), meta1)
|
||||
|
||||
# fill in some test coverage for start >= end
|
||||
with assert_raises(nilmdb.server.NilmDBError):
|
||||
db.stream_remove("/newton/prep", 0, 0)
|
||||
with assert_raises(nilmdb.server.NilmDBError):
|
||||
db.stream_remove("/newton/prep", 1, 0)
|
||||
db.stream_remove("/newton/prep", 0, 1)
|
||||
|
||||
db.close()
|
||||
|
||||
class TestBlockingServer(object):
|
||||
@@ -151,8 +158,8 @@ class TestServer(object):
|
||||
eq_(e.exception.code, 404)
|
||||
|
||||
# Check version
|
||||
eq_(distutils.version.StrictVersion(getjson("/version")),
|
||||
distutils.version.StrictVersion(self.server.version))
|
||||
eq_(distutils.version.LooseVersion(getjson("/version")),
|
||||
distutils.version.LooseVersion(nilmdb.__version__))
|
||||
|
||||
def test_stream_list(self):
|
||||
# Known streams that got populated by an earlier test (test_nilmdb)
|
||||
|
656
versioneer.py
Normal file
656
versioneer.py
Normal file
@@ -0,0 +1,656 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
"""versioneer.py
|
||||
|
||||
(like a rocketeer, but for versions)
|
||||
|
||||
* https://github.com/warner/python-versioneer
|
||||
* Brian Warner
|
||||
* License: Public Domain
|
||||
* Version: 0.7+
|
||||
|
||||
This file helps distutils-based projects manage their version number by just
|
||||
creating version-control tags.
|
||||
|
||||
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
|
||||
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
|
||||
version number by asking your version-control tool about the current
|
||||
checkout. The version number will be written into a generated _version.py
|
||||
file of your choosing, where it can be included by your __init__.py
|
||||
|
||||
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
|
||||
compute a version number by looking at the name of the directory created when
|
||||
te tarball is unpacked. This conventionally includes both the name of the
|
||||
project and a version number.
|
||||
|
||||
For users who work from a tarball built by 'setup.py sdist', it will get a
|
||||
version number from a previously-generated _version.py file.
|
||||
|
||||
As a result, loading code directly from the source tree will not result in a
|
||||
real version. If you want real versions from VCS trees (where you frequently
|
||||
update from the upstream repository, or do new development), you will need to
|
||||
do a 'setup.py version' after each update, and load code from the build/
|
||||
directory.
|
||||
|
||||
You need to provide this code with a few configuration values:
|
||||
|
||||
versionfile_source:
|
||||
A project-relative pathname into which the generated version strings
|
||||
should be written. This is usually a _version.py next to your project's
|
||||
main __init__.py file. If your project uses src/myproject/__init__.py,
|
||||
this should be 'src/myproject/_version.py'. This file should be checked
|
||||
in to your VCS as usual: the copy created below by 'setup.py
|
||||
update_files' will include code that parses expanded VCS keywords in
|
||||
generated tarballs. The 'build' and 'sdist' commands will replace it with
|
||||
a copy that has just the calculated version string.
|
||||
|
||||
versionfile_build:
|
||||
Like versionfile_source, but relative to the build directory instead of
|
||||
the source directory. These will differ when your setup.py uses
|
||||
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
|
||||
then you will probably have versionfile_build='myproject/_version.py' and
|
||||
versionfile_source='src/myproject/_version.py'.
|
||||
|
||||
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
|
||||
VCS tags. If your tags look like 'myproject-1.2.0', then you
|
||||
should use tag_prefix='myproject-'. If you use unprefixed tags
|
||||
like '1.2.0', this should be an empty string.
|
||||
|
||||
parentdir_prefix: a string, frequently the same as tag_prefix, which
|
||||
appears at the start of all unpacked tarball filenames. If
|
||||
your tarball unpacks into 'myproject-1.2.0', this should
|
||||
be 'myproject-'.
|
||||
|
||||
To use it:
|
||||
|
||||
1: include this file in the top level of your project
|
||||
2: make the following changes to the top of your setup.py:
|
||||
import versioneer
|
||||
versioneer.versionfile_source = 'src/myproject/_version.py'
|
||||
versioneer.versionfile_build = 'myproject/_version.py'
|
||||
versioneer.tag_prefix = '' # tags are like 1.2.0
|
||||
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
|
||||
3: add the following arguments to the setup() call in your setup.py:
|
||||
version=versioneer.get_version(),
|
||||
cmdclass=versioneer.get_cmdclass(),
|
||||
4: run 'setup.py update_files', which will create _version.py, and will
|
||||
append the following to your __init__.py:
|
||||
from _version import __version__
|
||||
5: modify your MANIFEST.in to include versioneer.py
|
||||
6: add both versioneer.py and the generated _version.py to your VCS
|
||||
"""
|
||||
|
||||
import os, sys, re
|
||||
from distutils.core import Command
|
||||
from distutils.command.sdist import sdist as _sdist
|
||||
from distutils.command.build import build as _build
|
||||
|
||||
versionfile_source = None
|
||||
versionfile_build = None
|
||||
tag_prefix = None
|
||||
parentdir_prefix = None
|
||||
|
||||
VCS = "git"
|
||||
IN_LONG_VERSION_PY = False
|
||||
|
||||
|
||||
LONG_VERSION_PY = '''
|
||||
IN_LONG_VERSION_PY = True
|
||||
# This file helps to compute a version number in source trees obtained from
|
||||
# git-archive tarball (such as those provided by githubs download-from-tag
|
||||
# feature). Distribution tarballs (build by setup.py sdist) and build
|
||||
# directories (produced by setup.py build) will contain a much shorter file
|
||||
# that just contains the computed version number.
|
||||
|
||||
# This file is released into the public domain. Generated by
|
||||
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
|
||||
|
||||
# these strings will be replaced by git during git-archive
|
||||
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
|
||||
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
|
||||
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def run_command(args, cwd=None, verbose=False):
|
||||
try:
|
||||
# remember shell=False, so use git.cmd on windows, not just git
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
|
||||
except EnvironmentError:
|
||||
e = sys.exc_info()[1]
|
||||
if verbose:
|
||||
print("unable to run %%s" %% args[0])
|
||||
print(e)
|
||||
return None
|
||||
stdout = p.communicate()[0].strip()
|
||||
if sys.version >= '3':
|
||||
stdout = stdout.decode()
|
||||
if p.returncode != 0:
|
||||
if verbose:
|
||||
print("unable to run %%s (error)" %% args[0])
|
||||
return None
|
||||
return stdout
|
||||
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os.path
|
||||
|
||||
def get_expanded_variables(versionfile_source):
|
||||
# the code embedded in _version.py can just fetch the value of these
|
||||
# variables. When used from setup.py, we don't want to import
|
||||
# _version.py, so we do it with a regexp instead. This function is not
|
||||
# used from _version.py.
|
||||
variables = {}
|
||||
try:
|
||||
for line in open(versionfile_source,"r").readlines():
|
||||
if line.strip().startswith("git_refnames ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
variables["refnames"] = mo.group(1)
|
||||
if line.strip().startswith("git_full ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
variables["full"] = mo.group(1)
|
||||
except EnvironmentError:
|
||||
pass
|
||||
return variables
|
||||
|
||||
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
|
||||
refnames = variables["refnames"].strip()
|
||||
if refnames.startswith("$Format"):
|
||||
if verbose:
|
||||
print("variables are unexpanded, not using")
|
||||
return {} # unexpanded, so not in an unpacked git-archive tarball
|
||||
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
||||
for ref in list(refs):
|
||||
if not re.search(r'\d', ref):
|
||||
if verbose:
|
||||
print("discarding '%%s', no digits" %% ref)
|
||||
refs.discard(ref)
|
||||
# Assume all version tags have a digit. git's %%d expansion
|
||||
# behaves like git log --decorate=short and strips out the
|
||||
# refs/heads/ and refs/tags/ prefixes that would let us
|
||||
# distinguish between branches and tags. By ignoring refnames
|
||||
# without digits, we filter out many common branch names like
|
||||
# "release" and "stabilization", as well as "HEAD" and "master".
|
||||
if verbose:
|
||||
print("remaining refs: %%s" %% ",".join(sorted(refs)))
|
||||
for ref in sorted(refs):
|
||||
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
||||
if ref.startswith(tag_prefix):
|
||||
r = ref[len(tag_prefix):]
|
||||
if verbose:
|
||||
print("picking %%s" %% r)
|
||||
return { "version": r,
|
||||
"full": variables["full"].strip() }
|
||||
# no suitable tags, so we use the full revision id
|
||||
if verbose:
|
||||
print("no suitable tags, using full revision id")
|
||||
return { "version": variables["full"].strip(),
|
||||
"full": variables["full"].strip() }
|
||||
|
||||
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
|
||||
# this runs 'git' from the root of the source tree. That either means
|
||||
# someone ran a setup.py command (and this code is in versioneer.py, so
|
||||
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
|
||||
# the source tree), or someone ran a project-specific entry point (and
|
||||
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
|
||||
# containing directory is somewhere deeper in the source tree). This only
|
||||
# gets called if the git-archive 'subst' variables were *not* expanded,
|
||||
# and _version.py hasn't already been rewritten with a short version
|
||||
# string, meaning we're inside a checked out source tree.
|
||||
|
||||
try:
|
||||
here = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
|
||||
return {} # not always correct
|
||||
|
||||
# versionfile_source is the relative path from the top of the source tree
|
||||
# (where the .git directory might live) to this file. Invert this to find
|
||||
# the root from __file__.
|
||||
root = here
|
||||
if IN_LONG_VERSION_PY:
|
||||
for i in range(len(versionfile_source.split("/"))):
|
||||
root = os.path.dirname(root)
|
||||
else:
|
||||
root = os.path.dirname(here)
|
||||
if not os.path.exists(os.path.join(root, ".git")):
|
||||
if verbose:
|
||||
print("no .git in %%s" %% root)
|
||||
return {}
|
||||
|
||||
GIT = "git"
|
||||
if sys.platform == "win32":
|
||||
GIT = "git.cmd"
|
||||
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
|
||||
cwd=root)
|
||||
if stdout is None:
|
||||
return {}
|
||||
if not stdout.startswith(tag_prefix):
|
||||
if verbose:
|
||||
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
|
||||
return {}
|
||||
tag = stdout[len(tag_prefix):]
|
||||
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
|
||||
if stdout is None:
|
||||
return {}
|
||||
full = stdout.strip()
|
||||
if tag.endswith("-dirty"):
|
||||
full += "-dirty"
|
||||
return {"version": tag, "full": full}
|
||||
|
||||
|
||||
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
|
||||
if IN_LONG_VERSION_PY:
|
||||
# We're running from _version.py. If it's from a source tree
|
||||
# (execute-in-place), we can work upwards to find the root of the
|
||||
# tree, and then check the parent directory for a version string. If
|
||||
# it's in an installed application, there's no hope.
|
||||
try:
|
||||
here = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
# py2exe/bbfreeze/non-CPython don't have __file__
|
||||
return {} # without __file__, we have no hope
|
||||
# versionfile_source is the relative path from the top of the source
|
||||
# tree to _version.py. Invert this to find the root from __file__.
|
||||
root = here
|
||||
for i in range(len(versionfile_source.split("/"))):
|
||||
root = os.path.dirname(root)
|
||||
else:
|
||||
# we're running from versioneer.py, which means we're running from
|
||||
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
|
||||
here = os.path.abspath(sys.argv[0])
|
||||
root = os.path.dirname(here)
|
||||
|
||||
# Source tarballs conventionally unpack into a directory that includes
|
||||
# both the project name and a version string.
|
||||
dirname = os.path.basename(root)
|
||||
if not dirname.startswith(parentdir_prefix):
|
||||
if verbose:
|
||||
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
|
||||
(root, dirname, parentdir_prefix))
|
||||
return None
|
||||
return {"version": dirname[len(parentdir_prefix):], "full": ""}
|
||||
|
||||
tag_prefix = "%(TAG_PREFIX)s"
|
||||
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
|
||||
versionfile_source = "%(VERSIONFILE_SOURCE)s"
|
||||
|
||||
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
|
||||
variables = { "refnames": git_refnames, "full": git_full }
|
||||
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
|
||||
if not ver:
|
||||
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
|
||||
if not ver:
|
||||
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
|
||||
verbose)
|
||||
if not ver:
|
||||
ver = default
|
||||
return ver
|
||||
|
||||
'''
|
||||
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def run_command(args, cwd=None, verbose=False):
|
||||
try:
|
||||
# remember shell=False, so use git.cmd on windows, not just git
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
|
||||
except EnvironmentError:
|
||||
e = sys.exc_info()[1]
|
||||
if verbose:
|
||||
print("unable to run %s" % args[0])
|
||||
print(e)
|
||||
return None
|
||||
stdout = p.communicate()[0].strip()
|
||||
if sys.version >= '3':
|
||||
stdout = stdout.decode()
|
||||
if p.returncode != 0:
|
||||
if verbose:
|
||||
print("unable to run %s (error)" % args[0])
|
||||
return None
|
||||
return stdout
|
||||
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os.path
|
||||
|
||||
def get_expanded_variables(versionfile_source):
|
||||
# the code embedded in _version.py can just fetch the value of these
|
||||
# variables. When used from setup.py, we don't want to import
|
||||
# _version.py, so we do it with a regexp instead. This function is not
|
||||
# used from _version.py.
|
||||
variables = {}
|
||||
try:
|
||||
for line in open(versionfile_source,"r").readlines():
|
||||
if line.strip().startswith("git_refnames ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
variables["refnames"] = mo.group(1)
|
||||
if line.strip().startswith("git_full ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
variables["full"] = mo.group(1)
|
||||
except EnvironmentError:
|
||||
pass
|
||||
return variables
|
||||
|
||||
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
|
||||
refnames = variables["refnames"].strip()
|
||||
if refnames.startswith("$Format"):
|
||||
if verbose:
|
||||
print("variables are unexpanded, not using")
|
||||
return {} # unexpanded, so not in an unpacked git-archive tarball
|
||||
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
||||
for ref in list(refs):
|
||||
if not re.search(r'\d', ref):
|
||||
if verbose:
|
||||
print("discarding '%s', no digits" % ref)
|
||||
refs.discard(ref)
|
||||
# Assume all version tags have a digit. git's %d expansion
|
||||
# behaves like git log --decorate=short and strips out the
|
||||
# refs/heads/ and refs/tags/ prefixes that would let us
|
||||
# distinguish between branches and tags. By ignoring refnames
|
||||
# without digits, we filter out many common branch names like
|
||||
# "release" and "stabilization", as well as "HEAD" and "master".
|
||||
if verbose:
|
||||
print("remaining refs: %s" % ",".join(sorted(refs)))
|
||||
for ref in sorted(refs):
|
||||
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
||||
if ref.startswith(tag_prefix):
|
||||
r = ref[len(tag_prefix):]
|
||||
if verbose:
|
||||
print("picking %s" % r)
|
||||
return { "version": r,
|
||||
"full": variables["full"].strip() }
|
||||
# no suitable tags, so we use the full revision id
|
||||
if verbose:
|
||||
print("no suitable tags, using full revision id")
|
||||
return { "version": variables["full"].strip(),
|
||||
"full": variables["full"].strip() }
|
||||
|
||||
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
|
||||
# this runs 'git' from the root of the source tree. That either means
|
||||
# someone ran a setup.py command (and this code is in versioneer.py, so
|
||||
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
|
||||
# the source tree), or someone ran a project-specific entry point (and
|
||||
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
|
||||
# containing directory is somewhere deeper in the source tree). This only
|
||||
# gets called if the git-archive 'subst' variables were *not* expanded,
|
||||
# and _version.py hasn't already been rewritten with a short version
|
||||
# string, meaning we're inside a checked out source tree.
|
||||
|
||||
try:
|
||||
here = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
|
||||
return {} # not always correct
|
||||
|
||||
# versionfile_source is the relative path from the top of the source tree
|
||||
# (where the .git directory might live) to this file. Invert this to find
|
||||
# the root from __file__.
|
||||
root = here
|
||||
if IN_LONG_VERSION_PY:
|
||||
for i in range(len(versionfile_source.split("/"))):
|
||||
root = os.path.dirname(root)
|
||||
else:
|
||||
root = os.path.dirname(here)
|
||||
if not os.path.exists(os.path.join(root, ".git")):
|
||||
if verbose:
|
||||
print("no .git in %s" % root)
|
||||
return {}
|
||||
|
||||
GIT = "git"
|
||||
if sys.platform == "win32":
|
||||
GIT = "git.cmd"
|
||||
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
|
||||
cwd=root)
|
||||
if stdout is None:
|
||||
return {}
|
||||
if not stdout.startswith(tag_prefix):
|
||||
if verbose:
|
||||
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
|
||||
return {}
|
||||
tag = stdout[len(tag_prefix):]
|
||||
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
|
||||
if stdout is None:
|
||||
return {}
|
||||
full = stdout.strip()
|
||||
if tag.endswith("-dirty"):
|
||||
full += "-dirty"
|
||||
return {"version": tag, "full": full}
|
||||
|
||||
|
||||
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
|
||||
if IN_LONG_VERSION_PY:
|
||||
# We're running from _version.py. If it's from a source tree
|
||||
# (execute-in-place), we can work upwards to find the root of the
|
||||
# tree, and then check the parent directory for a version string. If
|
||||
# it's in an installed application, there's no hope.
|
||||
try:
|
||||
here = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
# py2exe/bbfreeze/non-CPython don't have __file__
|
||||
return {} # without __file__, we have no hope
|
||||
# versionfile_source is the relative path from the top of the source
|
||||
# tree to _version.py. Invert this to find the root from __file__.
|
||||
root = here
|
||||
for i in range(len(versionfile_source.split("/"))):
|
||||
root = os.path.dirname(root)
|
||||
else:
|
||||
# we're running from versioneer.py, which means we're running from
|
||||
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
|
||||
here = os.path.abspath(sys.argv[0])
|
||||
root = os.path.dirname(here)
|
||||
|
||||
# Source tarballs conventionally unpack into a directory that includes
|
||||
# both the project name and a version string.
|
||||
dirname = os.path.basename(root)
|
||||
if not dirname.startswith(parentdir_prefix):
|
||||
if verbose:
|
||||
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
|
||||
(root, dirname, parentdir_prefix))
|
||||
return None
|
||||
return {"version": dirname[len(parentdir_prefix):], "full": ""}
|
||||
|
||||
import sys
|
||||
|
||||
def do_vcs_install(versionfile_source, ipy):
|
||||
GIT = "git"
|
||||
if sys.platform == "win32":
|
||||
GIT = "git.cmd"
|
||||
run_command([GIT, "add", "versioneer.py"])
|
||||
run_command([GIT, "add", versionfile_source])
|
||||
run_command([GIT, "add", ipy])
|
||||
present = False
|
||||
try:
|
||||
f = open(".gitattributes", "r")
|
||||
for line in f.readlines():
|
||||
if line.strip().startswith(versionfile_source):
|
||||
if "export-subst" in line.strip().split()[1:]:
|
||||
present = True
|
||||
f.close()
|
||||
except EnvironmentError:
|
||||
pass
|
||||
if not present:
|
||||
f = open(".gitattributes", "a+")
|
||||
f.write("%s export-subst\n" % versionfile_source)
|
||||
f.close()
|
||||
run_command([GIT, "add", ".gitattributes"])
|
||||
|
||||
|
||||
SHORT_VERSION_PY = """
|
||||
# This file was generated by 'versioneer.py' (0.7+) from
|
||||
# revision-control system data, or from the parent directory name of an
|
||||
# unpacked source archive. Distribution tarballs contain a pre-generated copy
|
||||
# of this file.
|
||||
|
||||
version_version = '%(version)s'
|
||||
version_full = '%(full)s'
|
||||
def get_versions(default={}, verbose=False):
|
||||
return {'version': version_version, 'full': version_full}
|
||||
|
||||
"""
|
||||
|
||||
DEFAULT = {"version": "unknown", "full": "unknown"}
|
||||
|
||||
def versions_from_file(filename):
|
||||
versions = {}
|
||||
try:
|
||||
f = open(filename)
|
||||
except EnvironmentError:
|
||||
return versions
|
||||
for line in f.readlines():
|
||||
mo = re.match("version_version = '([^']+)'", line)
|
||||
if mo:
|
||||
versions["version"] = mo.group(1)
|
||||
mo = re.match("version_full = '([^']+)'", line)
|
||||
if mo:
|
||||
versions["full"] = mo.group(1)
|
||||
return versions
|
||||
|
||||
def write_to_version_file(filename, versions):
|
||||
f = open(filename, "w")
|
||||
f.write(SHORT_VERSION_PY % versions)
|
||||
f.close()
|
||||
print("set %s to '%s'" % (filename, versions["version"]))
|
||||
|
||||
|
||||
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
|
||||
default=DEFAULT, verbose=False):
|
||||
# returns dict with two keys: 'version' and 'full'
|
||||
#
|
||||
# extract version from first of _version.py, 'git describe', parentdir.
|
||||
# This is meant to work for developers using a source checkout, for users
|
||||
# of a tarball created by 'setup.py sdist', and for users of a
|
||||
# tarball/zipball created by 'git archive' or github's download-from-tag
|
||||
# feature.
|
||||
|
||||
variables = get_expanded_variables(versionfile_source)
|
||||
if variables:
|
||||
ver = versions_from_expanded_variables(variables, tag_prefix)
|
||||
if ver:
|
||||
if verbose: print("got version from expanded variable %s" % ver)
|
||||
return ver
|
||||
|
||||
ver = versions_from_file(versionfile)
|
||||
if ver:
|
||||
if verbose: print("got version from file %s %s" % (versionfile, ver))
|
||||
return ver
|
||||
|
||||
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
|
||||
if ver:
|
||||
if verbose: print("got version from git %s" % ver)
|
||||
return ver
|
||||
|
||||
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
|
||||
if ver:
|
||||
if verbose: print("got version from parentdir %s" % ver)
|
||||
return ver
|
||||
|
||||
if verbose: print("got version from default %s" % ver)
|
||||
return default
|
||||
|
||||
def get_versions(default=DEFAULT, verbose=False):
|
||||
assert versionfile_source is not None, "please set versioneer.versionfile_source"
|
||||
assert tag_prefix is not None, "please set versioneer.tag_prefix"
|
||||
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
|
||||
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
|
||||
default=default, verbose=verbose)
|
||||
def get_version(verbose=False):
|
||||
return get_versions(verbose=verbose)["version"]
|
||||
|
||||
class cmd_version(Command):
|
||||
description = "report generated version string"
|
||||
user_options = []
|
||||
boolean_options = []
|
||||
def initialize_options(self):
|
||||
pass
|
||||
def finalize_options(self):
|
||||
pass
|
||||
def run(self):
|
||||
ver = get_version(verbose=True)
|
||||
print("Version is currently: %s" % ver)
|
||||
|
||||
|
||||
class cmd_build(_build):
|
||||
def run(self):
|
||||
versions = get_versions(verbose=True)
|
||||
_build.run(self)
|
||||
# now locate _version.py in the new build/ directory and replace it
|
||||
# with an updated value
|
||||
target_versionfile = os.path.join(self.build_lib, versionfile_build)
|
||||
print("UPDATING %s" % target_versionfile)
|
||||
os.unlink(target_versionfile)
|
||||
f = open(target_versionfile, "w")
|
||||
f.write(SHORT_VERSION_PY % versions)
|
||||
f.close()
|
||||
|
||||
class cmd_sdist(_sdist):
|
||||
def run(self):
|
||||
versions = get_versions(verbose=True)
|
||||
self._versioneer_generated_versions = versions
|
||||
# unless we update this, the command will keep using the old version
|
||||
self.distribution.metadata.version = versions["version"]
|
||||
return _sdist.run(self)
|
||||
|
||||
def make_release_tree(self, base_dir, files):
|
||||
_sdist.make_release_tree(self, base_dir, files)
|
||||
# now locate _version.py in the new base_dir directory (remembering
|
||||
# that it may be a hardlink) and replace it with an updated value
|
||||
target_versionfile = os.path.join(base_dir, versionfile_source)
|
||||
print("UPDATING %s" % target_versionfile)
|
||||
os.unlink(target_versionfile)
|
||||
f = open(target_versionfile, "w")
|
||||
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
|
||||
f.close()
|
||||
|
||||
INIT_PY_SNIPPET = """
|
||||
from ._version import get_versions
|
||||
__version__ = get_versions()['version']
|
||||
del get_versions
|
||||
"""
|
||||
|
||||
class cmd_update_files(Command):
|
||||
description = "modify __init__.py and create _version.py"
|
||||
user_options = []
|
||||
boolean_options = []
|
||||
def initialize_options(self):
|
||||
pass
|
||||
def finalize_options(self):
|
||||
pass
|
||||
def run(self):
|
||||
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
|
||||
print(" creating %s" % versionfile_source)
|
||||
f = open(versionfile_source, "w")
|
||||
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
|
||||
"TAG_PREFIX": tag_prefix,
|
||||
"PARENTDIR_PREFIX": parentdir_prefix,
|
||||
"VERSIONFILE_SOURCE": versionfile_source,
|
||||
})
|
||||
f.close()
|
||||
try:
|
||||
old = open(ipy, "r").read()
|
||||
except EnvironmentError:
|
||||
old = ""
|
||||
if INIT_PY_SNIPPET not in old:
|
||||
print(" appending to %s" % ipy)
|
||||
f = open(ipy, "a")
|
||||
f.write(INIT_PY_SNIPPET)
|
||||
f.close()
|
||||
else:
|
||||
print(" %s unmodified" % ipy)
|
||||
do_vcs_install(versionfile_source, ipy)
|
||||
|
||||
def get_cmdclass():
|
||||
return {'version': cmd_version,
|
||||
'update_files': cmd_update_files,
|
||||
'build': cmd_build,
|
||||
'sdist': cmd_sdist,
|
||||
}
|
Reference in New Issue
Block a user