Compare commits
No commits in common. "master" and "python2" have entirely different histories.
|
@ -1,11 +1,10 @@
|
||||||
# -*- conf -*-
|
# -*- conf -*-
|
||||||
|
|
||||||
[run]
|
[run]
|
||||||
branch = True
|
# branch = True
|
||||||
|
|
||||||
[report]
|
[report]
|
||||||
exclude_lines =
|
exclude_lines =
|
||||||
pragma: no cover
|
pragma: no cover
|
||||||
if 0:
|
if 0:
|
||||||
omit = nilmdb/scripts,nilmdb/_version.py,nilmdb/fsck
|
omit = nilmdb/utils/datetime_tz*,nilmdb/scripts,nilmdb/_version.py,nilmdb/fsck
|
||||||
show_missing = True
|
|
||||||
|
|
7
.gitignore
vendored
7
.gitignore
vendored
|
@ -4,7 +4,6 @@ tests/*testdb/
|
||||||
db/
|
db/
|
||||||
|
|
||||||
# Compiled / cythonized files
|
# Compiled / cythonized files
|
||||||
README.html
|
|
||||||
docs/*.html
|
docs/*.html
|
||||||
build/
|
build/
|
||||||
*.pyc
|
*.pyc
|
||||||
|
@ -16,8 +15,10 @@ nilmdb/server/rbtree.c
|
||||||
# Setup junk
|
# Setup junk
|
||||||
dist/
|
dist/
|
||||||
nilmdb.egg-info/
|
nilmdb.egg-info/
|
||||||
venv/
|
|
||||||
.eggs/
|
# This gets generated as needed by setup.py
|
||||||
|
MANIFEST.in
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
# Misc
|
# Misc
|
||||||
timeit*out
|
timeit*out
|
||||||
|
|
250
.pylintrc
Normal file
250
.pylintrc
Normal file
|
@ -0,0 +1,250 @@
|
||||||
|
# -*- conf -*-
|
||||||
|
[MASTER]
|
||||||
|
|
||||||
|
# Specify a configuration file.
|
||||||
|
#rcfile=
|
||||||
|
|
||||||
|
# Python code to execute, usually for sys.path manipulation such as
|
||||||
|
# pygtk.require().
|
||||||
|
#init-hook=
|
||||||
|
|
||||||
|
# Profiled execution.
|
||||||
|
profile=no
|
||||||
|
|
||||||
|
# Add files or directories to the blacklist. They should be base names, not
|
||||||
|
# paths.
|
||||||
|
ignore=datetime_tz
|
||||||
|
|
||||||
|
# Pickle collected data for later comparisons.
|
||||||
|
persistent=no
|
||||||
|
|
||||||
|
# List of plugins (as comma separated values of python modules names) to load,
|
||||||
|
# usually to register additional checkers.
|
||||||
|
load-plugins=
|
||||||
|
|
||||||
|
|
||||||
|
[MESSAGES CONTROL]
|
||||||
|
|
||||||
|
# Enable the message, report, category or checker with the given id(s). You can
|
||||||
|
# either give multiple identifier separated by comma (,) or put this option
|
||||||
|
# multiple time.
|
||||||
|
#enable=
|
||||||
|
|
||||||
|
# Disable the message, report, category or checker with the given id(s). You
|
||||||
|
# can either give multiple identifier separated by comma (,) or put this option
|
||||||
|
# multiple time (only on the command line, not in the configuration file where
|
||||||
|
# it should appear only once).
|
||||||
|
disable=C0111,R0903,R0201,R0914,R0912,W0142,W0703,W0702
|
||||||
|
|
||||||
|
|
||||||
|
[REPORTS]
|
||||||
|
|
||||||
|
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||||
|
# (visual studio) and html
|
||||||
|
output-format=parseable
|
||||||
|
|
||||||
|
# Include message's id in output
|
||||||
|
include-ids=yes
|
||||||
|
|
||||||
|
# Put messages in a separate file for each module / package specified on the
|
||||||
|
# command line instead of printing them on stdout. Reports (if any) will be
|
||||||
|
# written in a file name "pylint_global.[txt|html]".
|
||||||
|
files-output=no
|
||||||
|
|
||||||
|
# Tells whether to display a full report or only the messages
|
||||||
|
reports=yes
|
||||||
|
|
||||||
|
# Python expression which should return a note less than 10 (10 is the highest
|
||||||
|
# note). You have access to the variables errors warning, statement which
|
||||||
|
# respectively contain the number of errors / warnings messages and the total
|
||||||
|
# number of statements analyzed. This is used by the global evaluation report
|
||||||
|
# (RP0004).
|
||||||
|
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||||
|
|
||||||
|
# Add a comment according to your evaluation note. This is used by the global
|
||||||
|
# evaluation report (RP0004).
|
||||||
|
comment=no
|
||||||
|
|
||||||
|
|
||||||
|
[SIMILARITIES]
|
||||||
|
|
||||||
|
# Minimum lines number of a similarity.
|
||||||
|
min-similarity-lines=4
|
||||||
|
|
||||||
|
# Ignore comments when computing similarities.
|
||||||
|
ignore-comments=yes
|
||||||
|
|
||||||
|
# Ignore docstrings when computing similarities.
|
||||||
|
ignore-docstrings=yes
|
||||||
|
|
||||||
|
|
||||||
|
[TYPECHECK]
|
||||||
|
|
||||||
|
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||||
|
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||||
|
ignore-mixin-members=yes
|
||||||
|
|
||||||
|
# List of classes names for which member attributes should not be checked
|
||||||
|
# (useful for classes with attributes dynamically set).
|
||||||
|
ignored-classes=SQLObject
|
||||||
|
|
||||||
|
# When zope mode is activated, add a predefined set of Zope acquired attributes
|
||||||
|
# to generated-members.
|
||||||
|
zope=no
|
||||||
|
|
||||||
|
# List of members which are set dynamically and missed by pylint inference
|
||||||
|
# system, and so shouldn't trigger E0201 when accessed. Python regular
|
||||||
|
# expressions are accepted.
|
||||||
|
generated-members=REQUEST,acl_users,aq_parent
|
||||||
|
|
||||||
|
|
||||||
|
[FORMAT]
|
||||||
|
|
||||||
|
# Maximum number of characters on a single line.
|
||||||
|
max-line-length=80
|
||||||
|
|
||||||
|
# Maximum number of lines in a module
|
||||||
|
max-module-lines=1000
|
||||||
|
|
||||||
|
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||||
|
# tab).
|
||||||
|
indent-string=' '
|
||||||
|
|
||||||
|
|
||||||
|
[MISCELLANEOUS]
|
||||||
|
|
||||||
|
# List of note tags to take in consideration, separated by a comma.
|
||||||
|
notes=FIXME,XXX,TODO
|
||||||
|
|
||||||
|
|
||||||
|
[VARIABLES]
|
||||||
|
|
||||||
|
# Tells whether we should check for unused import in __init__ files.
|
||||||
|
init-import=no
|
||||||
|
|
||||||
|
# A regular expression matching the beginning of the name of dummy variables
|
||||||
|
# (i.e. not used).
|
||||||
|
dummy-variables-rgx=_|dummy
|
||||||
|
|
||||||
|
# List of additional names supposed to be defined in builtins. Remember that
|
||||||
|
# you should avoid to define new builtins when possible.
|
||||||
|
additional-builtins=
|
||||||
|
|
||||||
|
|
||||||
|
[BASIC]
|
||||||
|
|
||||||
|
# Required attributes for module, separated by a comma
|
||||||
|
required-attributes=
|
||||||
|
|
||||||
|
# List of builtins function names that should not be used, separated by a comma
|
||||||
|
bad-functions=apply,input
|
||||||
|
|
||||||
|
# Regular expression which should only match correct module names
|
||||||
|
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct module level names
|
||||||
|
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|version)$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct class names
|
||||||
|
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct function names
|
||||||
|
function-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct method names
|
||||||
|
method-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct instance attribute names
|
||||||
|
attr-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct argument names
|
||||||
|
argument-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct variable names
|
||||||
|
variable-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct list comprehension /
|
||||||
|
# generator expression variable names
|
||||||
|
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||||
|
|
||||||
|
# Good variable names which should always be accepted, separated by a comma
|
||||||
|
good-names=i,j,k,ex,Run,_
|
||||||
|
|
||||||
|
# Bad variable names which should always be refused, separated by a comma
|
||||||
|
bad-names=foo,bar,baz,toto,tutu,tata
|
||||||
|
|
||||||
|
# Regular expression which should only match functions or classes name which do
|
||||||
|
# not require a docstring
|
||||||
|
no-docstring-rgx=__.*__
|
||||||
|
|
||||||
|
|
||||||
|
[CLASSES]
|
||||||
|
|
||||||
|
# List of interface methods to ignore, separated by a comma. This is used for
|
||||||
|
# instance to not check methods defines in Zope's Interface base class.
|
||||||
|
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
|
||||||
|
|
||||||
|
# List of method names used to declare (i.e. assign) instance attributes.
|
||||||
|
defining-attr-methods=__init__,__new__,setUp
|
||||||
|
|
||||||
|
# List of valid names for the first argument in a class method.
|
||||||
|
valid-classmethod-first-arg=cls
|
||||||
|
|
||||||
|
|
||||||
|
[DESIGN]
|
||||||
|
|
||||||
|
# Maximum number of arguments for function / method
|
||||||
|
max-args=5
|
||||||
|
|
||||||
|
# Argument names that match this expression will be ignored. Default to name
|
||||||
|
# with leading underscore
|
||||||
|
ignored-argument-names=_.*
|
||||||
|
|
||||||
|
# Maximum number of locals for function / method body
|
||||||
|
max-locals=15
|
||||||
|
|
||||||
|
# Maximum number of return / yield for function / method body
|
||||||
|
max-returns=6
|
||||||
|
|
||||||
|
# Maximum number of branch for function / method body
|
||||||
|
max-branchs=12
|
||||||
|
|
||||||
|
# Maximum number of statements in function / method body
|
||||||
|
max-statements=50
|
||||||
|
|
||||||
|
# Maximum number of parents for a class (see R0901).
|
||||||
|
max-parents=7
|
||||||
|
|
||||||
|
# Maximum number of attributes for a class (see R0902).
|
||||||
|
max-attributes=7
|
||||||
|
|
||||||
|
# Minimum number of public methods for a class (see R0903).
|
||||||
|
min-public-methods=2
|
||||||
|
|
||||||
|
# Maximum number of public methods for a class (see R0904).
|
||||||
|
max-public-methods=20
|
||||||
|
|
||||||
|
|
||||||
|
[IMPORTS]
|
||||||
|
|
||||||
|
# Deprecated modules which should not be used, separated by a comma
|
||||||
|
deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
|
||||||
|
|
||||||
|
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||||
|
# given file (report RP0402 must not be disabled)
|
||||||
|
import-graph=
|
||||||
|
|
||||||
|
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||||
|
# not be disabled)
|
||||||
|
ext-import-graph=
|
||||||
|
|
||||||
|
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||||
|
# not be disabled)
|
||||||
|
int-import-graph=
|
||||||
|
|
||||||
|
|
||||||
|
[EXCEPTIONS]
|
||||||
|
|
||||||
|
# Exceptions that will emit a warning when being caught. Defaults to
|
||||||
|
# "Exception"
|
||||||
|
overgeneral-exceptions=Exception
|
29
MANIFEST.in
29
MANIFEST.in
|
@ -1,29 +0,0 @@
|
||||||
# Root
|
|
||||||
include README.txt
|
|
||||||
include setup.cfg
|
|
||||||
include setup.py
|
|
||||||
include versioneer.py
|
|
||||||
include Makefile
|
|
||||||
include .coveragerc
|
|
||||||
include .pylintrc
|
|
||||||
include requirements.txt
|
|
||||||
|
|
||||||
# Cython files -- include .pyx source, but not the generated .c files
|
|
||||||
# (Downstream systems must have cython installed in order to build)
|
|
||||||
recursive-include nilmdb/server *.pyx *.pyxdep *.pxd
|
|
||||||
exclude nilmdb/server/interval.c
|
|
||||||
exclude nilmdb/server/rbtree.c
|
|
||||||
|
|
||||||
# Version
|
|
||||||
include nilmdb/_version.py
|
|
||||||
|
|
||||||
# Tests
|
|
||||||
recursive-include tests *.py
|
|
||||||
recursive-include tests/data *
|
|
||||||
include tests/test.order
|
|
||||||
|
|
||||||
# Docs
|
|
||||||
recursive-include docs Makefile *.md
|
|
||||||
|
|
||||||
# Extras
|
|
||||||
recursive-include extras *
|
|
30
Makefile
30
Makefile
|
@ -2,49 +2,45 @@
|
||||||
all: test
|
all: test
|
||||||
|
|
||||||
version:
|
version:
|
||||||
python3 setup.py version
|
python setup.py version
|
||||||
|
|
||||||
build:
|
build:
|
||||||
python3 setup.py build_ext --inplace
|
python setup.py build_ext --inplace
|
||||||
|
|
||||||
dist: sdist
|
dist: sdist
|
||||||
sdist:
|
sdist:
|
||||||
python3 setup.py sdist
|
python setup.py sdist
|
||||||
|
|
||||||
install:
|
install:
|
||||||
python3 setup.py install
|
python setup.py install
|
||||||
|
|
||||||
develop:
|
develop:
|
||||||
python3 setup.py develop
|
python setup.py develop
|
||||||
|
|
||||||
docs:
|
docs:
|
||||||
make -C docs
|
make -C docs
|
||||||
|
|
||||||
ctrl: flake
|
|
||||||
flake:
|
|
||||||
flake8 nilmdb
|
|
||||||
lint:
|
lint:
|
||||||
pylint3 --rcfile=setup.cfg nilmdb
|
pylint --rcfile=.pylintrc nilmdb
|
||||||
|
|
||||||
test:
|
test:
|
||||||
ifneq ($(INSIDE_EMACS),)
|
ifeq ($(INSIDE_EMACS), t)
|
||||||
# Use the slightly more flexible script
|
# Use the slightly more flexible script
|
||||||
python3 setup.py build_ext --inplace
|
python setup.py build_ext --inplace
|
||||||
python3 tests/runtests.py
|
python tests/runtests.py
|
||||||
else
|
else
|
||||||
# Let setup.py check dependencies, build stuff, and run the test
|
# Let setup.py check dependencies, build stuff, and run the test
|
||||||
python3 setup.py nosetests
|
python setup.py nosetests
|
||||||
endif
|
endif
|
||||||
|
|
||||||
clean::
|
clean::
|
||||||
find . -name '*.pyc' -o -name '__pycache__' -print0 | xargs -0 rm -rf
|
find . -name '*pyc' | xargs rm -f
|
||||||
rm -f .coverage
|
rm -f .coverage
|
||||||
rm -rf tests/*testdb*
|
rm -rf tests/*testdb*
|
||||||
rm -rf nilmdb.egg-info/ build/ nilmdb/server/*.so
|
rm -rf nilmdb.egg-info/ build/ nilmdb/server/*.so MANIFEST.in
|
||||||
make -C docs clean
|
make -C docs clean
|
||||||
|
|
||||||
gitclean::
|
gitclean::
|
||||||
git clean -dXf
|
git clean -dXf
|
||||||
|
|
||||||
.PHONY: all version build dist sdist install docs test
|
.PHONY: all version build dist sdist install docs lint test clean gitclean
|
||||||
.PHONY: ctrl lint flake clean gitclean
|
|
||||||
|
|
40
README.md
40
README.md
|
@ -1,40 +0,0 @@
|
||||||
# nilmdb: Non-Intrusive Load Monitor Database
|
|
||||||
by Jim Paris <jim@jtan.com>
|
|
||||||
|
|
||||||
NilmDB requires Python 3.8 or newer.
|
|
||||||
|
|
||||||
## Prerequisites:
|
|
||||||
|
|
||||||
# Runtime and build environments
|
|
||||||
sudo apt install python3 python3-dev python3-venv python3-pip
|
|
||||||
|
|
||||||
# Create a new Python virtual environment to isolate deps.
|
|
||||||
python3 -m venv ../venv
|
|
||||||
source ../venv/bin/activate # run "deactivate" to leave
|
|
||||||
|
|
||||||
# Install all Python dependencies
|
|
||||||
pip3 install -r requirements.txt
|
|
||||||
|
|
||||||
## Test:
|
|
||||||
|
|
||||||
python3 setup.py nosetests
|
|
||||||
|
|
||||||
## Install:
|
|
||||||
|
|
||||||
Install it into the virtual environment
|
|
||||||
|
|
||||||
python3 setup.py install
|
|
||||||
|
|
||||||
If you want to instead install it system-wide, you will also need to
|
|
||||||
install the requirements system-wide:
|
|
||||||
|
|
||||||
sudo pip3 install -r requirements.txt
|
|
||||||
sudo python3 setup.py install
|
|
||||||
|
|
||||||
## Usage:
|
|
||||||
|
|
||||||
nilmdb-server --help
|
|
||||||
nilmdb-fsck --help
|
|
||||||
nilmtool --help
|
|
||||||
|
|
||||||
See docs/wsgi.md for info on setting up a WSGI application in Apache.
|
|
33
README.txt
Normal file
33
README.txt
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
nilmdb: Non-Intrusive Load Monitor Database
|
||||||
|
by Jim Paris <jim@jtan.com>
|
||||||
|
|
||||||
|
Prerequisites:
|
||||||
|
|
||||||
|
# Runtime and build environments
|
||||||
|
sudo apt-get install python2.7 python2.7-dev python-setuptools cython
|
||||||
|
|
||||||
|
# Base NilmDB dependencies
|
||||||
|
sudo apt-get install python-cherrypy3 python-decorator python-simplejson
|
||||||
|
sudo apt-get install python-requests python-dateutil python-tz
|
||||||
|
sudo apt-get install python-progressbar python-psutil
|
||||||
|
|
||||||
|
# Other dependencies (required by some modules)
|
||||||
|
sudo apt-get install python-numpy
|
||||||
|
|
||||||
|
# Tools for running tests
|
||||||
|
sudo apt-get install python-nose python-coverage
|
||||||
|
|
||||||
|
Test:
|
||||||
|
python setup.py nosetests
|
||||||
|
|
||||||
|
Install:
|
||||||
|
|
||||||
|
python setup.py install
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
nilmdb-server --help
|
||||||
|
nilmdb-fsck --help
|
||||||
|
nilmtool --help
|
||||||
|
|
||||||
|
See docs/wsgi.md for info on setting up a WSGI application in Apache.
|
|
@ -430,7 +430,7 @@ mod_wsgi requires "WSGIChunkedRequest On" to handle
|
||||||
"Transfer-encoding: Chunked" requests. However, `/stream/insert`
|
"Transfer-encoding: Chunked" requests. However, `/stream/insert`
|
||||||
doesn't handle this correctly right now, because:
|
doesn't handle this correctly right now, because:
|
||||||
|
|
||||||
- The `cherrypy.request.body.read()` call needs to be fixed for chunked requests
|
- The `cherrpy.request.body.read()` call needs to be fixed for chunked requests
|
||||||
|
|
||||||
- We don't want to just buffer endlessly in the server, and it will
|
- We don't want to just buffer endlessly in the server, and it will
|
||||||
require some thought on how to handle data in chunks (what to do about
|
require some thought on how to handle data in chunks (what to do about
|
||||||
|
@ -438,32 +438,3 @@ doesn't handle this correctly right now, because:
|
||||||
|
|
||||||
It is probably better to just keep the endpoint management on the client
|
It is probably better to just keep the endpoint management on the client
|
||||||
side, so leave "WSGIChunkedRequest off" for now.
|
side, so leave "WSGIChunkedRequest off" for now.
|
||||||
|
|
||||||
|
|
||||||
Unicode & character encoding
|
|
||||||
----------------------------
|
|
||||||
|
|
||||||
Stream data is passed back and forth as raw `bytes` objects in most
|
|
||||||
places, including the `nilmdb.client` and command-line interfaces.
|
|
||||||
This is done partially for performance reasons, and partially to
|
|
||||||
support the binary insert/extract options, where character-set encoding
|
|
||||||
would not apply.
|
|
||||||
|
|
||||||
For the HTTP server, the raw bytes transferred over HTTP are interpreted
|
|
||||||
as follows:
|
|
||||||
- For `/stream/insert`, the client-provided `Content-Type` is ignored,
|
|
||||||
and the data is read as if it were `application/octet-stream`.
|
|
||||||
- For `/stream/extract`, the returned data is `application/octet-stream`.
|
|
||||||
- All other endpoints communicate via JSON, which is specified to always
|
|
||||||
be encoded as UTF-8. This includes:
|
|
||||||
- `/version`
|
|
||||||
- `/dbinfo`
|
|
||||||
- `/stream/list`
|
|
||||||
- `/stream/create`
|
|
||||||
- `/stream/destroy`
|
|
||||||
- `/stream/rename`
|
|
||||||
- `/stream/get_metadata`
|
|
||||||
- `/stream/set_metadata`
|
|
||||||
- `/stream/update_metadata`
|
|
||||||
- `/stream/remove`
|
|
||||||
- `/stream/intervals`
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/python
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import pickle
|
import cPickle as pickle
|
||||||
import argparse
|
import argparse
|
||||||
import fcntl
|
import fcntl
|
||||||
import re
|
import re
|
||||||
|
@ -44,7 +44,7 @@ with open(lock, "w") as f:
|
||||||
maxsize = fix[fixpath]
|
maxsize = fix[fixpath]
|
||||||
if size > maxsize:
|
if size > maxsize:
|
||||||
diff = size - maxsize
|
diff = size - maxsize
|
||||||
print(diff, "too big:", fn)
|
print diff, "too big:", fn
|
||||||
if args.yes:
|
if args.yes:
|
||||||
with open(fn, "a+") as dbfile:
|
with open(fn, "a+") as dbfile:
|
||||||
dbfile.truncate(maxsize)
|
dbfile.truncate(maxsize)
|
||||||
|
|
|
@ -1,5 +1,10 @@
|
||||||
"""Main NilmDB import"""
|
"""Main NilmDB import"""
|
||||||
|
|
||||||
from ._version import get_versions
|
# These aren't imported automatically, because loading the server
|
||||||
|
# stuff isn't always necessary.
|
||||||
|
#from nilmdb.server import NilmDB, Server
|
||||||
|
#from nilmdb.client import Client
|
||||||
|
|
||||||
|
from nilmdb._version import get_versions
|
||||||
__version__ = get_versions()['version']
|
__version__ = get_versions()['version']
|
||||||
del get_versions
|
del get_versions
|
||||||
|
|
|
@ -1,520 +1,197 @@
|
||||||
|
|
||||||
|
IN_LONG_VERSION_PY = True
|
||||||
# This file helps to compute a version number in source trees obtained from
|
# This file helps to compute a version number in source trees obtained from
|
||||||
# git-archive tarball (such as those provided by githubs download-from-tag
|
# git-archive tarball (such as those provided by githubs download-from-tag
|
||||||
# feature). Distribution tarballs (built by setup.py sdist) and build
|
# feature). Distribution tarballs (build by setup.py sdist) and build
|
||||||
# directories (produced by setup.py build) will contain a much shorter file
|
# directories (produced by setup.py build) will contain a much shorter file
|
||||||
# that just contains the computed version number.
|
# that just contains the computed version number.
|
||||||
|
|
||||||
# This file is released into the public domain. Generated by
|
# This file is released into the public domain. Generated by
|
||||||
# versioneer-0.18 (https://github.com/warner/python-versioneer)
|
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
|
||||||
|
|
||||||
|
# these strings will be replaced by git during git-archive
|
||||||
|
git_refnames = "$Format:%d$"
|
||||||
|
git_full = "$Format:%H$"
|
||||||
|
|
||||||
"""Git implementation of _version.py."""
|
|
||||||
|
|
||||||
import errno
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
def run_command(args, cwd=None, verbose=False):
|
||||||
def get_keywords():
|
|
||||||
"""Get the keywords needed to look up the version information."""
|
|
||||||
# these strings will be replaced by git during git-archive.
|
|
||||||
# setup.py/versioneer.py will grep for the variable names, so they must
|
|
||||||
# each be defined on a line of their own. _version.py will just call
|
|
||||||
# get_keywords().
|
|
||||||
git_refnames = "$Format:%d$"
|
|
||||||
git_full = "$Format:%H$"
|
|
||||||
git_date = "$Format:%ci$"
|
|
||||||
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
|
|
||||||
return keywords
|
|
||||||
|
|
||||||
|
|
||||||
class VersioneerConfig:
|
|
||||||
"""Container for Versioneer configuration parameters."""
|
|
||||||
|
|
||||||
|
|
||||||
def get_config():
|
|
||||||
"""Create, populate and return the VersioneerConfig() object."""
|
|
||||||
# these strings are filled in when 'setup.py versioneer' creates
|
|
||||||
# _version.py
|
|
||||||
cfg = VersioneerConfig()
|
|
||||||
cfg.VCS = "git"
|
|
||||||
cfg.style = "pep440"
|
|
||||||
cfg.tag_prefix = "nilmdb-"
|
|
||||||
cfg.parentdir_prefix = "nilmdb-"
|
|
||||||
cfg.versionfile_source = "nilmdb/_version.py"
|
|
||||||
cfg.verbose = False
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
class NotThisMethod(Exception):
|
|
||||||
"""Exception raised if a method is not valid for the current scenario."""
|
|
||||||
|
|
||||||
|
|
||||||
LONG_VERSION_PY = {}
|
|
||||||
HANDLERS = {}
|
|
||||||
|
|
||||||
|
|
||||||
def register_vcs_handler(vcs, method): # decorator
|
|
||||||
"""Decorator to mark a method as the handler for a particular VCS."""
|
|
||||||
def decorate(f):
|
|
||||||
"""Store f in HANDLERS[vcs][method]."""
|
|
||||||
if vcs not in HANDLERS:
|
|
||||||
HANDLERS[vcs] = {}
|
|
||||||
HANDLERS[vcs][method] = f
|
|
||||||
return f
|
|
||||||
return decorate
|
|
||||||
|
|
||||||
|
|
||||||
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
|
|
||||||
env=None):
|
|
||||||
"""Call the given command(s)."""
|
|
||||||
assert isinstance(commands, list)
|
|
||||||
p = None
|
|
||||||
for c in commands:
|
|
||||||
try:
|
try:
|
||||||
dispcmd = str([c] + args)
|
|
||||||
# remember shell=False, so use git.cmd on windows, not just git
|
# remember shell=False, so use git.cmd on windows, not just git
|
||||||
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
|
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=(subprocess.PIPE if hide_stderr
|
|
||||||
else None))
|
|
||||||
break
|
|
||||||
except EnvironmentError:
|
except EnvironmentError:
|
||||||
e = sys.exc_info()[1]
|
e = sys.exc_info()[1]
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
continue
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print("unable to run %s" % dispcmd)
|
print("unable to run %s" % args[0])
|
||||||
print(e)
|
print(e)
|
||||||
return None, None
|
return None
|
||||||
else:
|
|
||||||
if verbose:
|
|
||||||
print("unable to find command, tried %s" % (commands,))
|
|
||||||
return None, None
|
|
||||||
stdout = p.communicate()[0].strip()
|
stdout = p.communicate()[0].strip()
|
||||||
if sys.version_info[0] >= 3:
|
if sys.version >= '3':
|
||||||
stdout = stdout.decode()
|
stdout = stdout.decode()
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
if verbose:
|
if verbose:
|
||||||
print("unable to run %s (error)" % dispcmd)
|
print("unable to run %s (error)" % args[0])
|
||||||
print("stdout was %s" % stdout)
|
return None
|
||||||
return None, p.returncode
|
return stdout
|
||||||
return stdout, p.returncode
|
|
||||||
|
|
||||||
|
|
||||||
def versions_from_parentdir(parentdir_prefix, root, verbose):
|
import sys
|
||||||
"""Try to determine the version from the parent directory name.
|
import re
|
||||||
|
import os.path
|
||||||
|
|
||||||
Source tarballs conventionally unpack into a directory that includes both
|
def get_expanded_variables(versionfile_source):
|
||||||
the project name and a version string. We will also support searching up
|
|
||||||
two directory levels for an appropriately named parent directory
|
|
||||||
"""
|
|
||||||
rootdirs = []
|
|
||||||
|
|
||||||
for i in range(3):
|
|
||||||
dirname = os.path.basename(root)
|
|
||||||
if dirname.startswith(parentdir_prefix):
|
|
||||||
return {"version": dirname[len(parentdir_prefix):],
|
|
||||||
"full-revisionid": None,
|
|
||||||
"dirty": False, "error": None, "date": None}
|
|
||||||
else:
|
|
||||||
rootdirs.append(root)
|
|
||||||
root = os.path.dirname(root) # up a level
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print("Tried directories %s but none started with prefix %s" %
|
|
||||||
(str(rootdirs), parentdir_prefix))
|
|
||||||
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
|
|
||||||
|
|
||||||
|
|
||||||
@register_vcs_handler("git", "get_keywords")
|
|
||||||
def git_get_keywords(versionfile_abs):
|
|
||||||
"""Extract version information from the given file."""
|
|
||||||
# the code embedded in _version.py can just fetch the value of these
|
# the code embedded in _version.py can just fetch the value of these
|
||||||
# keywords. When used from setup.py, we don't want to import _version.py,
|
# variables. When used from setup.py, we don't want to import
|
||||||
# so we do it with a regexp instead. This function is not used from
|
# _version.py, so we do it with a regexp instead. This function is not
|
||||||
# _version.py.
|
# used from _version.py.
|
||||||
keywords = {}
|
variables = {}
|
||||||
try:
|
try:
|
||||||
f = open(versionfile_abs, "r")
|
for line in open(versionfile_source,"r").readlines():
|
||||||
for line in f.readlines():
|
|
||||||
if line.strip().startswith("git_refnames ="):
|
if line.strip().startswith("git_refnames ="):
|
||||||
mo = re.search(r'=\s*"(.*)"', line)
|
mo = re.search(r'=\s*"(.*)"', line)
|
||||||
if mo:
|
if mo:
|
||||||
keywords["refnames"] = mo.group(1)
|
variables["refnames"] = mo.group(1)
|
||||||
if line.strip().startswith("git_full ="):
|
if line.strip().startswith("git_full ="):
|
||||||
mo = re.search(r'=\s*"(.*)"', line)
|
mo = re.search(r'=\s*"(.*)"', line)
|
||||||
if mo:
|
if mo:
|
||||||
keywords["full"] = mo.group(1)
|
variables["full"] = mo.group(1)
|
||||||
if line.strip().startswith("git_date ="):
|
|
||||||
mo = re.search(r'=\s*"(.*)"', line)
|
|
||||||
if mo:
|
|
||||||
keywords["date"] = mo.group(1)
|
|
||||||
f.close()
|
|
||||||
except EnvironmentError:
|
except EnvironmentError:
|
||||||
pass
|
pass
|
||||||
return keywords
|
return variables
|
||||||
|
|
||||||
|
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
|
||||||
@register_vcs_handler("git", "keywords")
|
refnames = variables["refnames"].strip()
|
||||||
def git_versions_from_keywords(keywords, tag_prefix, verbose):
|
|
||||||
"""Get version information from git keywords."""
|
|
||||||
if not keywords:
|
|
||||||
raise NotThisMethod("no keywords at all, weird")
|
|
||||||
date = keywords.get("date")
|
|
||||||
if date is not None:
|
|
||||||
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
|
|
||||||
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
|
|
||||||
# -like" string, which we must then edit to make compliant), because
|
|
||||||
# it's been around since git-1.5.3, and it's too difficult to
|
|
||||||
# discover which version we're using, or to work around using an
|
|
||||||
# older one.
|
|
||||||
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
|
||||||
refnames = keywords["refnames"].strip()
|
|
||||||
if refnames.startswith("$Format"):
|
if refnames.startswith("$Format"):
|
||||||
if verbose:
|
if verbose:
|
||||||
print("keywords are unexpanded, not using")
|
print("variables are unexpanded, not using")
|
||||||
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
|
return {} # unexpanded, so not in an unpacked git-archive tarball
|
||||||
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
||||||
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
|
for ref in list(refs):
|
||||||
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
|
if not re.search(r'\d', ref):
|
||||||
TAG = "tag: "
|
|
||||||
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
|
|
||||||
if not tags:
|
|
||||||
# Either we're using git < 1.8.3, or there really are no tags. We use
|
|
||||||
# a heuristic: assume all version tags have a digit. The old git %d
|
|
||||||
# expansion behaves like git log --decorate=short and strips out the
|
|
||||||
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
|
|
||||||
# between branches and tags. By ignoring refnames without digits, we
|
|
||||||
# filter out many common branch names like "release" and
|
|
||||||
# "stabilization", as well as "HEAD" and "master".
|
|
||||||
tags = set([r for r in refs if re.search(r'\d', r)])
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print("discarding '%s', no digits" % ",".join(refs - tags))
|
print("discarding '%s', no digits" % ref)
|
||||||
|
refs.discard(ref)
|
||||||
|
# Assume all version tags have a digit. git's %d expansion
|
||||||
|
# behaves like git log --decorate=short and strips out the
|
||||||
|
# refs/heads/ and refs/tags/ prefixes that would let us
|
||||||
|
# distinguish between branches and tags. By ignoring refnames
|
||||||
|
# without digits, we filter out many common branch names like
|
||||||
|
# "release" and "stabilization", as well as "HEAD" and "master".
|
||||||
if verbose:
|
if verbose:
|
||||||
print("likely tags: %s" % ",".join(sorted(tags)))
|
print("remaining refs: %s" % ",".join(sorted(refs)))
|
||||||
for ref in sorted(tags):
|
for ref in sorted(refs):
|
||||||
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
||||||
if ref.startswith(tag_prefix):
|
if ref.startswith(tag_prefix):
|
||||||
r = ref[len(tag_prefix):]
|
r = ref[len(tag_prefix):]
|
||||||
if verbose:
|
if verbose:
|
||||||
print("picking %s" % r)
|
print("picking %s" % r)
|
||||||
return {"version": r,
|
return { "version": r,
|
||||||
"full-revisionid": keywords["full"].strip(),
|
"full": variables["full"].strip() }
|
||||||
"dirty": False, "error": None,
|
# no suitable tags, so we use the full revision id
|
||||||
"date": date}
|
|
||||||
# no suitable tags, so version is "0+unknown", but full hex is still there
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print("no suitable tags, using unknown + full revision id")
|
print("no suitable tags, using full revision id")
|
||||||
return {"version": "0+unknown",
|
return { "version": variables["full"].strip(),
|
||||||
"full-revisionid": keywords["full"].strip(),
|
"full": variables["full"].strip() }
|
||||||
"dirty": False, "error": "no suitable tags", "date": None}
|
|
||||||
|
|
||||||
|
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
|
||||||
@register_vcs_handler("git", "pieces_from_vcs")
|
# this runs 'git' from the root of the source tree. That either means
|
||||||
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
|
# someone ran a setup.py command (and this code is in versioneer.py, so
|
||||||
"""Get version from 'git describe' in the root of the source tree.
|
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
|
||||||
|
# the source tree), or someone ran a project-specific entry point (and
|
||||||
This only gets called if the git-archive 'subst' keywords were *not*
|
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
|
||||||
expanded, and _version.py hasn't already been rewritten with a short
|
# containing directory is somewhere deeper in the source tree). This only
|
||||||
version string, meaning we're inside a checked out source tree.
|
# gets called if the git-archive 'subst' variables were *not* expanded,
|
||||||
"""
|
# and _version.py hasn't already been rewritten with a short version
|
||||||
GITS = ["git"]
|
# string, meaning we're inside a checked out source tree.
|
||||||
if sys.platform == "win32":
|
|
||||||
GITS = ["git.cmd", "git.exe"]
|
|
||||||
|
|
||||||
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
|
|
||||||
hide_stderr=True)
|
|
||||||
if rc != 0:
|
|
||||||
if verbose:
|
|
||||||
print("Directory %s not under git control" % root)
|
|
||||||
raise NotThisMethod("'git rev-parse --git-dir' returned error")
|
|
||||||
|
|
||||||
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
|
|
||||||
# if there isn't one, this yields HEX[-dirty] (no NUM)
|
|
||||||
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
|
|
||||||
"--always", "--long",
|
|
||||||
"--match", "%s*" % tag_prefix],
|
|
||||||
cwd=root)
|
|
||||||
# --long was added in git-1.5.5
|
|
||||||
if describe_out is None:
|
|
||||||
raise NotThisMethod("'git describe' failed")
|
|
||||||
describe_out = describe_out.strip()
|
|
||||||
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
|
|
||||||
if full_out is None:
|
|
||||||
raise NotThisMethod("'git rev-parse' failed")
|
|
||||||
full_out = full_out.strip()
|
|
||||||
|
|
||||||
pieces = {}
|
|
||||||
pieces["long"] = full_out
|
|
||||||
pieces["short"] = full_out[:7] # maybe improved later
|
|
||||||
pieces["error"] = None
|
|
||||||
|
|
||||||
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
|
|
||||||
# TAG might have hyphens.
|
|
||||||
git_describe = describe_out
|
|
||||||
|
|
||||||
# look for -dirty suffix
|
|
||||||
dirty = git_describe.endswith("-dirty")
|
|
||||||
pieces["dirty"] = dirty
|
|
||||||
if dirty:
|
|
||||||
git_describe = git_describe[:git_describe.rindex("-dirty")]
|
|
||||||
|
|
||||||
# now we have TAG-NUM-gHEX or HEX
|
|
||||||
|
|
||||||
if "-" in git_describe:
|
|
||||||
# TAG-NUM-gHEX
|
|
||||||
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
|
|
||||||
if not mo:
|
|
||||||
# unparseable. Maybe git-describe is misbehaving?
|
|
||||||
pieces["error"] = ("unable to parse git-describe output: '%s'"
|
|
||||||
% describe_out)
|
|
||||||
return pieces
|
|
||||||
|
|
||||||
# tag
|
|
||||||
full_tag = mo.group(1)
|
|
||||||
if not full_tag.startswith(tag_prefix):
|
|
||||||
if verbose:
|
|
||||||
fmt = "tag '%s' doesn't start with prefix '%s'"
|
|
||||||
print(fmt % (full_tag, tag_prefix))
|
|
||||||
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
|
|
||||||
% (full_tag, tag_prefix))
|
|
||||||
return pieces
|
|
||||||
pieces["closest-tag"] = full_tag[len(tag_prefix):]
|
|
||||||
|
|
||||||
# distance: number of commits since tag
|
|
||||||
pieces["distance"] = int(mo.group(2))
|
|
||||||
|
|
||||||
# commit: short hex revision ID
|
|
||||||
pieces["short"] = mo.group(3)
|
|
||||||
|
|
||||||
else:
|
|
||||||
# HEX: no tags
|
|
||||||
pieces["closest-tag"] = None
|
|
||||||
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
|
|
||||||
cwd=root)
|
|
||||||
pieces["distance"] = int(count_out) # total number of commits
|
|
||||||
|
|
||||||
# commit date: see ISO-8601 comment in git_versions_from_keywords()
|
|
||||||
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
|
|
||||||
cwd=root)[0].strip()
|
|
||||||
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
|
||||||
|
|
||||||
return pieces
|
|
||||||
|
|
||||||
|
|
||||||
def plus_or_dot(pieces):
|
|
||||||
"""Return a + if we don't already have one, else return a ."""
|
|
||||||
if "+" in pieces.get("closest-tag", ""):
|
|
||||||
return "."
|
|
||||||
return "+"
|
|
||||||
|
|
||||||
|
|
||||||
def render_pep440(pieces):
|
|
||||||
"""Build up version string, with post-release "local version identifier".
|
|
||||||
|
|
||||||
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
|
|
||||||
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"] or pieces["dirty"]:
|
|
||||||
rendered += plus_or_dot(pieces)
|
|
||||||
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dirty"
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
|
|
||||||
pieces["short"])
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dirty"
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_pep440_pre(pieces):
|
|
||||||
"""TAG[.post.devDISTANCE] -- No -dirty.
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. 0.post.devDISTANCE
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"]:
|
|
||||||
rendered += ".post.dev%d" % pieces["distance"]
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = "0.post.dev%d" % pieces["distance"]
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_pep440_post(pieces):
|
|
||||||
"""TAG[.postDISTANCE[.dev0]+gHEX] .
|
|
||||||
|
|
||||||
The ".dev0" means dirty. Note that .dev0 sorts backwards
|
|
||||||
(a dirty tree will appear "older" than the corresponding clean one),
|
|
||||||
but you shouldn't be releasing software with -dirty anyways.
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. 0.postDISTANCE[.dev0]
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"] or pieces["dirty"]:
|
|
||||||
rendered += ".post%d" % pieces["distance"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dev0"
|
|
||||||
rendered += plus_or_dot(pieces)
|
|
||||||
rendered += "g%s" % pieces["short"]
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = "0.post%d" % pieces["distance"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dev0"
|
|
||||||
rendered += "+g%s" % pieces["short"]
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_pep440_old(pieces):
|
|
||||||
"""TAG[.postDISTANCE[.dev0]] .
|
|
||||||
|
|
||||||
The ".dev0" means dirty.
|
|
||||||
|
|
||||||
Eexceptions:
|
|
||||||
1: no tags. 0.postDISTANCE[.dev0]
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"] or pieces["dirty"]:
|
|
||||||
rendered += ".post%d" % pieces["distance"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dev0"
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = "0.post%d" % pieces["distance"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dev0"
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_git_describe(pieces):
|
|
||||||
"""TAG[-DISTANCE-gHEX][-dirty].
|
|
||||||
|
|
||||||
Like 'git describe --tags --dirty --always'.
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"]:
|
|
||||||
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = pieces["short"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += "-dirty"
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_git_describe_long(pieces):
|
|
||||||
"""TAG-DISTANCE-gHEX[-dirty].
|
|
||||||
|
|
||||||
Like 'git describe --tags --dirty --always -long'.
|
|
||||||
The distance/hash is unconditional.
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = pieces["short"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += "-dirty"
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render(pieces, style):
|
|
||||||
"""Render the given version pieces into the requested style."""
|
|
||||||
if pieces["error"]:
|
|
||||||
return {"version": "unknown",
|
|
||||||
"full-revisionid": pieces.get("long"),
|
|
||||||
"dirty": None,
|
|
||||||
"error": pieces["error"],
|
|
||||||
"date": None}
|
|
||||||
|
|
||||||
if not style or style == "default":
|
|
||||||
style = "pep440" # the default
|
|
||||||
|
|
||||||
if style == "pep440":
|
|
||||||
rendered = render_pep440(pieces)
|
|
||||||
elif style == "pep440-pre":
|
|
||||||
rendered = render_pep440_pre(pieces)
|
|
||||||
elif style == "pep440-post":
|
|
||||||
rendered = render_pep440_post(pieces)
|
|
||||||
elif style == "pep440-old":
|
|
||||||
rendered = render_pep440_old(pieces)
|
|
||||||
elif style == "git-describe":
|
|
||||||
rendered = render_git_describe(pieces)
|
|
||||||
elif style == "git-describe-long":
|
|
||||||
rendered = render_git_describe_long(pieces)
|
|
||||||
else:
|
|
||||||
raise ValueError("unknown style '%s'" % style)
|
|
||||||
|
|
||||||
return {"version": rendered, "full-revisionid": pieces["long"],
|
|
||||||
"dirty": pieces["dirty"], "error": None,
|
|
||||||
"date": pieces.get("date")}
|
|
||||||
|
|
||||||
|
|
||||||
def get_versions():
|
|
||||||
"""Get version information or return default if unable to do so."""
|
|
||||||
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
|
|
||||||
# __file__, we can work backwards from there to the root. Some
|
|
||||||
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
|
|
||||||
# case we can only use expanded keywords.
|
|
||||||
|
|
||||||
cfg = get_config()
|
|
||||||
verbose = cfg.verbose
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
|
here = os.path.abspath(__file__)
|
||||||
verbose)
|
|
||||||
except NotThisMethod:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
root = os.path.realpath(__file__)
|
|
||||||
# versionfile_source is the relative path from the top of the source
|
|
||||||
# tree (where the .git directory might live) to this file. Invert
|
|
||||||
# this to find the root from __file__.
|
|
||||||
for i in cfg.versionfile_source.split('/'):
|
|
||||||
root = os.path.dirname(root)
|
|
||||||
except NameError:
|
except NameError:
|
||||||
return {"version": "0+unknown", "full-revisionid": None,
|
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
|
||||||
"dirty": None,
|
return {} # not always correct
|
||||||
"error": "unable to find root of source tree",
|
|
||||||
"date": None}
|
|
||||||
|
|
||||||
|
# versionfile_source is the relative path from the top of the source tree
|
||||||
|
# (where the .git directory might live) to this file. Invert this to find
|
||||||
|
# the root from __file__.
|
||||||
|
root = here
|
||||||
|
if IN_LONG_VERSION_PY:
|
||||||
|
for i in range(len(versionfile_source.split("/"))):
|
||||||
|
root = os.path.dirname(root)
|
||||||
|
else:
|
||||||
|
root = os.path.dirname(here)
|
||||||
|
if not os.path.exists(os.path.join(root, ".git")):
|
||||||
|
if verbose:
|
||||||
|
print("no .git in %s" % root)
|
||||||
|
return {}
|
||||||
|
|
||||||
|
GIT = "git"
|
||||||
|
if sys.platform == "win32":
|
||||||
|
GIT = "git.cmd"
|
||||||
|
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
|
||||||
|
cwd=root)
|
||||||
|
if stdout is None:
|
||||||
|
return {}
|
||||||
|
if not stdout.startswith(tag_prefix):
|
||||||
|
if verbose:
|
||||||
|
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
|
||||||
|
return {}
|
||||||
|
tag = stdout[len(tag_prefix):]
|
||||||
|
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
|
||||||
|
if stdout is None:
|
||||||
|
return {}
|
||||||
|
full = stdout.strip()
|
||||||
|
if tag.endswith("-dirty"):
|
||||||
|
full += "-dirty"
|
||||||
|
return {"version": tag, "full": full}
|
||||||
|
|
||||||
|
|
||||||
|
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
|
||||||
|
if IN_LONG_VERSION_PY:
|
||||||
|
# We're running from _version.py. If it's from a source tree
|
||||||
|
# (execute-in-place), we can work upwards to find the root of the
|
||||||
|
# tree, and then check the parent directory for a version string. If
|
||||||
|
# it's in an installed application, there's no hope.
|
||||||
try:
|
try:
|
||||||
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
|
here = os.path.abspath(__file__)
|
||||||
return render(pieces, cfg.style)
|
except NameError:
|
||||||
except NotThisMethod:
|
# py2exe/bbfreeze/non-CPython don't have __file__
|
||||||
pass
|
return {} # without __file__, we have no hope
|
||||||
|
# versionfile_source is the relative path from the top of the source
|
||||||
|
# tree to _version.py. Invert this to find the root from __file__.
|
||||||
|
root = here
|
||||||
|
for i in range(len(versionfile_source.split("/"))):
|
||||||
|
root = os.path.dirname(root)
|
||||||
|
else:
|
||||||
|
# we're running from versioneer.py, which means we're running from
|
||||||
|
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
|
||||||
|
here = os.path.abspath(sys.argv[0])
|
||||||
|
root = os.path.dirname(here)
|
||||||
|
|
||||||
try:
|
# Source tarballs conventionally unpack into a directory that includes
|
||||||
if cfg.parentdir_prefix:
|
# both the project name and a version string.
|
||||||
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
|
dirname = os.path.basename(root)
|
||||||
except NotThisMethod:
|
if not dirname.startswith(parentdir_prefix):
|
||||||
pass
|
if verbose:
|
||||||
|
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
|
||||||
|
(root, dirname, parentdir_prefix))
|
||||||
|
return None
|
||||||
|
return {"version": dirname[len(parentdir_prefix):], "full": ""}
|
||||||
|
|
||||||
|
tag_prefix = "nilmdb-"
|
||||||
|
parentdir_prefix = "nilmdb-"
|
||||||
|
versionfile_source = "nilmdb/_version.py"
|
||||||
|
|
||||||
|
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
|
||||||
|
variables = { "refnames": git_refnames, "full": git_full }
|
||||||
|
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
|
||||||
|
verbose)
|
||||||
|
if not ver:
|
||||||
|
ver = default
|
||||||
|
return ver
|
||||||
|
|
||||||
return {"version": "0+unknown", "full-revisionid": None,
|
|
||||||
"dirty": None,
|
|
||||||
"error": "unable to compute version", "date": None}
|
|
||||||
|
|
|
@ -2,24 +2,24 @@
|
||||||
|
|
||||||
"""Class for performing HTTP client requests via libcurl"""
|
"""Class for performing HTTP client requests via libcurl"""
|
||||||
|
|
||||||
import json
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import nilmdb.utils
|
import nilmdb.utils
|
||||||
import nilmdb.client.httpclient
|
import nilmdb.client.httpclient
|
||||||
from nilmdb.client.errors import ClientError
|
from nilmdb.client.errors import ClientError
|
||||||
from nilmdb.utils.time import timestamp_to_string, string_to_timestamp
|
|
||||||
|
|
||||||
|
import time
|
||||||
|
import simplejson as json
|
||||||
|
import contextlib
|
||||||
|
|
||||||
|
from nilmdb.utils.time import timestamp_to_string, string_to_timestamp
|
||||||
|
|
||||||
def extract_timestamp(line):
|
def extract_timestamp(line):
|
||||||
"""Extract just the timestamp from a line of data text"""
|
"""Extract just the timestamp from a line of data text"""
|
||||||
return string_to_timestamp(line.split()[0])
|
return string_to_timestamp(line.split()[0])
|
||||||
|
|
||||||
|
class Client(object):
|
||||||
class Client():
|
|
||||||
"""Main client interface to the Nilm database."""
|
"""Main client interface to the Nilm database."""
|
||||||
|
|
||||||
def __init__(self, url, post_json=False):
|
def __init__(self, url, post_json = False):
|
||||||
"""Initialize client with given URL. If post_json is true,
|
"""Initialize client with given URL. If post_json is true,
|
||||||
POST requests are sent with Content-Type 'application/json'
|
POST requests are sent with Content-Type 'application/json'
|
||||||
instead of the default 'x-www-form-urlencoded'."""
|
instead of the default 'x-www-form-urlencoded'."""
|
||||||
|
@ -38,7 +38,7 @@ class Client():
|
||||||
if self.post_json:
|
if self.post_json:
|
||||||
# If we're posting as JSON, we don't need to encode it further here
|
# If we're posting as JSON, we don't need to encode it further here
|
||||||
return data
|
return data
|
||||||
return json.dumps(data, separators=(',', ':'))
|
return json.dumps(data, separators=(',',':'))
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
"""Close the connection; safe to call multiple times"""
|
"""Close the connection; safe to call multiple times"""
|
||||||
|
@ -57,7 +57,7 @@ class Client():
|
||||||
as a dictionary."""
|
as a dictionary."""
|
||||||
return self.http.get("dbinfo")
|
return self.http.get("dbinfo")
|
||||||
|
|
||||||
def stream_list(self, path=None, layout=None, extended=False):
|
def stream_list(self, path = None, layout = None, extended = False):
|
||||||
"""Return a sorted list of [path, layout] lists. If 'path' or
|
"""Return a sorted list of [path, layout] lists. If 'path' or
|
||||||
'layout' are specified, only return streams that match those
|
'layout' are specified, only return streams that match those
|
||||||
exact values. If 'extended' is True, the returned lists have
|
exact values. If 'extended' is True, the returned lists have
|
||||||
|
@ -71,11 +71,11 @@ class Client():
|
||||||
if extended:
|
if extended:
|
||||||
params["extended"] = 1
|
params["extended"] = 1
|
||||||
streams = self.http.get("stream/list", params)
|
streams = self.http.get("stream/list", params)
|
||||||
return nilmdb.utils.sort.sort_human(streams, key=lambda s: s[0])
|
return nilmdb.utils.sort.sort_human(streams, key = lambda s: s[0])
|
||||||
|
|
||||||
def stream_get_metadata(self, path, keys=None):
|
def stream_get_metadata(self, path, keys = None):
|
||||||
"""Get stream metadata"""
|
"""Get stream metadata"""
|
||||||
params = {"path": path}
|
params = { "path": path }
|
||||||
if keys is not None:
|
if keys is not None:
|
||||||
params["key"] = keys
|
params["key"] = keys
|
||||||
return self.http.get("stream/get_metadata", params)
|
return self.http.get("stream/get_metadata", params)
|
||||||
|
@ -99,28 +99,22 @@ class Client():
|
||||||
|
|
||||||
def stream_create(self, path, layout):
|
def stream_create(self, path, layout):
|
||||||
"""Create a new stream"""
|
"""Create a new stream"""
|
||||||
params = {
|
params = { "path": path,
|
||||||
"path": path,
|
"layout" : layout }
|
||||||
"layout": layout
|
|
||||||
}
|
|
||||||
return self.http.post("stream/create", params)
|
return self.http.post("stream/create", params)
|
||||||
|
|
||||||
def stream_destroy(self, path):
|
def stream_destroy(self, path):
|
||||||
"""Delete stream. Fails if any data is still present."""
|
"""Delete stream. Fails if any data is still present."""
|
||||||
params = {
|
params = { "path": path }
|
||||||
"path": path
|
|
||||||
}
|
|
||||||
return self.http.post("stream/destroy", params)
|
return self.http.post("stream/destroy", params)
|
||||||
|
|
||||||
def stream_rename(self, oldpath, newpath):
|
def stream_rename(self, oldpath, newpath):
|
||||||
"""Rename a stream."""
|
"""Rename a stream."""
|
||||||
params = {
|
params = { "oldpath": oldpath,
|
||||||
"oldpath": oldpath,
|
"newpath": newpath }
|
||||||
"newpath": newpath
|
|
||||||
}
|
|
||||||
return self.http.post("stream/rename", params)
|
return self.http.post("stream/rename", params)
|
||||||
|
|
||||||
def stream_remove(self, path, start=None, end=None):
|
def stream_remove(self, path, start = None, end = None):
|
||||||
"""Remove data from the specified time range"""
|
"""Remove data from the specified time range"""
|
||||||
params = {
|
params = {
|
||||||
"path": path
|
"path": path
|
||||||
|
@ -135,7 +129,7 @@ class Client():
|
||||||
return total
|
return total
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def stream_insert_context(self, path, start=None, end=None):
|
def stream_insert_context(self, path, start = None, end = None):
|
||||||
"""Return a context manager that allows data to be efficiently
|
"""Return a context manager that allows data to be efficiently
|
||||||
inserted into a stream in a piecewise manner. Data is
|
inserted into a stream in a piecewise manner. Data is
|
||||||
provided as ASCII lines, and is aggregated and sent to the
|
provided as ASCII lines, and is aggregated and sent to the
|
||||||
|
@ -145,8 +139,8 @@ class Client():
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
with client.stream_insert_context('/path', start, end) as ctx:
|
with client.stream_insert_context('/path', start, end) as ctx:
|
||||||
ctx.insert('1234567890000000 1 2 3 4\\n')
|
ctx.insert('1234567890.0 1 2 3 4\\n')
|
||||||
ctx.insert('1234567891000000 1 2 3 4\\n')
|
ctx.insert('1234567891.0 1 2 3 4\\n')
|
||||||
|
|
||||||
For more details, see help for nilmdb.client.client.StreamInserter
|
For more details, see help for nilmdb.client.client.StreamInserter
|
||||||
|
|
||||||
|
@ -158,21 +152,21 @@ class Client():
|
||||||
ctx.finalize()
|
ctx.finalize()
|
||||||
ctx.destroy()
|
ctx.destroy()
|
||||||
|
|
||||||
def stream_insert(self, path, data, start=None, end=None):
|
def stream_insert(self, path, data, start = None, end = None):
|
||||||
"""Insert rows of data into a stream. data should be a string
|
"""Insert rows of data into a stream. data should be a string
|
||||||
or iterable that provides ASCII data that matches the database
|
or iterable that provides ASCII data that matches the database
|
||||||
layout for path. Data is passed through stream_insert_context,
|
layout for path. Data is passed through stream_insert_context,
|
||||||
so it will be broken into reasonably-sized chunks and
|
so it will be broken into reasonably-sized chunks and
|
||||||
start/end will be deduced if missing."""
|
start/end will be deduced if missing."""
|
||||||
with self.stream_insert_context(path, start, end) as ctx:
|
with self.stream_insert_context(path, start, end) as ctx:
|
||||||
if isinstance(data, bytes):
|
if isinstance(data, basestring):
|
||||||
ctx.insert(data)
|
ctx.insert(data)
|
||||||
else:
|
else:
|
||||||
for chunk in data:
|
for chunk in data:
|
||||||
ctx.insert(chunk)
|
ctx.insert(chunk)
|
||||||
return ctx.last_response
|
return ctx.last_response
|
||||||
|
|
||||||
def stream_insert_block(self, path, data, start, end, binary=False):
|
def stream_insert_block(self, path, data, start, end, binary = False):
|
||||||
"""Insert a single fixed block of data into the stream. It is
|
"""Insert a single fixed block of data into the stream. It is
|
||||||
sent directly to the server in one block with no further
|
sent directly to the server in one block with no further
|
||||||
processing.
|
processing.
|
||||||
|
@ -187,9 +181,9 @@ class Client():
|
||||||
}
|
}
|
||||||
if binary:
|
if binary:
|
||||||
params["binary"] = 1
|
params["binary"] = 1
|
||||||
return self.http.put("stream/insert", data, params)
|
return self.http.put("stream/insert", data, params, binary = binary)
|
||||||
|
|
||||||
def stream_intervals(self, path, start=None, end=None, diffpath=None):
|
def stream_intervals(self, path, start = None, end = None, diffpath = None):
|
||||||
"""
|
"""
|
||||||
Return a generator that yields each stream interval.
|
Return a generator that yields each stream interval.
|
||||||
|
|
||||||
|
@ -207,8 +201,8 @@ class Client():
|
||||||
params["end"] = timestamp_to_string(end)
|
params["end"] = timestamp_to_string(end)
|
||||||
return self.http.get_gen("stream/intervals", params)
|
return self.http.get_gen("stream/intervals", params)
|
||||||
|
|
||||||
def stream_extract(self, path, start=None, end=None,
|
def stream_extract(self, path, start = None, end = None,
|
||||||
count=False, markup=False, binary=False):
|
count = False, markup = False, binary = False):
|
||||||
"""
|
"""
|
||||||
Extract data from a stream. Returns a generator that yields
|
Extract data from a stream. Returns a generator that yields
|
||||||
lines of ASCII-formatted data that matches the database
|
lines of ASCII-formatted data that matches the database
|
||||||
|
@ -238,18 +232,17 @@ class Client():
|
||||||
params["markup"] = 1
|
params["markup"] = 1
|
||||||
if binary:
|
if binary:
|
||||||
params["binary"] = 1
|
params["binary"] = 1
|
||||||
return self.http.get_gen("stream/extract", params, binary=binary)
|
return self.http.get_gen("stream/extract", params, binary = binary)
|
||||||
|
|
||||||
def stream_count(self, path, start=None, end=None):
|
def stream_count(self, path, start = None, end = None):
|
||||||
"""
|
"""
|
||||||
Return the number of rows of data in the stream that satisfy
|
Return the number of rows of data in the stream that satisfy
|
||||||
the given timestamps.
|
the given timestamps.
|
||||||
"""
|
"""
|
||||||
counts = list(self.stream_extract(path, start, end, count=True))
|
counts = list(self.stream_extract(path, start, end, count = True))
|
||||||
return int(counts[0])
|
return int(counts[0])
|
||||||
|
|
||||||
|
class StreamInserter(object):
|
||||||
class StreamInserter():
|
|
||||||
"""Object returned by stream_insert_context() that manages
|
"""Object returned by stream_insert_context() that manages
|
||||||
the insertion of rows of data into a particular path.
|
the insertion of rows of data into a particular path.
|
||||||
|
|
||||||
|
@ -337,8 +330,8 @@ class StreamInserter():
|
||||||
|
|
||||||
# Send the block once we have enough data
|
# Send the block once we have enough data
|
||||||
if self._block_len >= maxdata:
|
if self._block_len >= maxdata:
|
||||||
self._send_block(final=False)
|
self._send_block(final = False)
|
||||||
if self._block_len >= self._max_data_after_send:
|
if self._block_len >= self._max_data_after_send: # pragma: no cover
|
||||||
raise ValueError("too much data left over after trying"
|
raise ValueError("too much data left over after trying"
|
||||||
" to send intermediate block; is it"
|
" to send intermediate block; is it"
|
||||||
" missing newlines or malformed?")
|
" missing newlines or malformed?")
|
||||||
|
@ -364,12 +357,12 @@ class StreamInserter():
|
||||||
|
|
||||||
If more data is inserted after a finalize(), it will become
|
If more data is inserted after a finalize(), it will become
|
||||||
part of a new interval and there may be a gap left in-between."""
|
part of a new interval and there may be a gap left in-between."""
|
||||||
self._send_block(final=True)
|
self._send_block(final = True)
|
||||||
|
|
||||||
def send(self):
|
def send(self):
|
||||||
"""Send any data that we might have buffered up. Does not affect
|
"""Send any data that we might have buffered up. Does not affect
|
||||||
any other treatment of timestamps or endpoints."""
|
any other treatment of timestamps or endpoints."""
|
||||||
self._send_block(final=False)
|
self._send_block(final = False)
|
||||||
|
|
||||||
def _get_first_noncomment(self, block):
|
def _get_first_noncomment(self, block):
|
||||||
"""Return the (start, end) indices of the first full line in
|
"""Return the (start, end) indices of the first full line in
|
||||||
|
@ -377,10 +370,10 @@ class StreamInserter():
|
||||||
there isn't one."""
|
there isn't one."""
|
||||||
start = 0
|
start = 0
|
||||||
while True:
|
while True:
|
||||||
end = block.find(b'\n', start)
|
end = block.find('\n', start)
|
||||||
if end < 0:
|
if end < 0:
|
||||||
raise IndexError
|
raise IndexError
|
||||||
if block[start] != b'#'[0]:
|
if block[start] != '#':
|
||||||
return (start, (end + 1))
|
return (start, (end + 1))
|
||||||
start = end + 1
|
start = end + 1
|
||||||
|
|
||||||
|
@ -388,22 +381,22 @@ class StreamInserter():
|
||||||
"""Return the (start, end) indices of the last full line in
|
"""Return the (start, end) indices of the last full line in
|
||||||
block[:length] that isn't a comment, or raise IndexError if
|
block[:length] that isn't a comment, or raise IndexError if
|
||||||
there isn't one."""
|
there isn't one."""
|
||||||
end = block.rfind(b'\n')
|
end = block.rfind('\n')
|
||||||
if end <= 0:
|
if end <= 0:
|
||||||
raise IndexError
|
raise IndexError
|
||||||
while True:
|
while True:
|
||||||
start = block.rfind(b'\n', 0, end)
|
start = block.rfind('\n', 0, end)
|
||||||
if block[start + 1] != b'#'[0]:
|
if block[start + 1] != '#':
|
||||||
return ((start + 1), end)
|
return ((start + 1), end)
|
||||||
if start == -1:
|
if start == -1:
|
||||||
raise IndexError
|
raise IndexError
|
||||||
end = start
|
end = start
|
||||||
|
|
||||||
def _send_block(self, final=False):
|
def _send_block(self, final = False):
|
||||||
"""Send data currently in the block. The data sent will
|
"""Send data currently in the block. The data sent will
|
||||||
consist of full lines only, so some might be left over."""
|
consist of full lines only, so some might be left over."""
|
||||||
# Build the full string to send
|
# Build the full string to send
|
||||||
block = b"".join(self._block_data)
|
block = "".join(self._block_data)
|
||||||
|
|
||||||
start_ts = self._interval_start
|
start_ts = self._interval_start
|
||||||
if start_ts is None:
|
if start_ts is None:
|
||||||
|
@ -420,7 +413,7 @@ class StreamInserter():
|
||||||
# or the timestamp of the last line plus epsilon.
|
# or the timestamp of the last line plus epsilon.
|
||||||
end_ts = self._interval_end
|
end_ts = self._interval_end
|
||||||
try:
|
try:
|
||||||
if block[-1] != b'\n'[0]:
|
if block[-1] != '\n':
|
||||||
raise ValueError("final block didn't end with a newline")
|
raise ValueError("final block didn't end with a newline")
|
||||||
if end_ts is None:
|
if end_ts is None:
|
||||||
(spos, epos) = self._get_last_noncomment(block)
|
(spos, epos) = self._get_last_noncomment(block)
|
||||||
|
@ -454,7 +447,7 @@ class StreamInserter():
|
||||||
# the server complain so that the error is the same
|
# the server complain so that the error is the same
|
||||||
# as if we hadn't done this chunking.
|
# as if we hadn't done this chunking.
|
||||||
end_ts = self._interval_end
|
end_ts = self._interval_end
|
||||||
self._block_data = [block[spos:]]
|
self._block_data = [ block[spos:] ]
|
||||||
self._block_len = (epos - spos)
|
self._block_len = (epos - spos)
|
||||||
block = block[:spos]
|
block = block[:spos]
|
||||||
|
|
||||||
|
@ -472,6 +465,6 @@ class StreamInserter():
|
||||||
|
|
||||||
# Send it
|
# Send it
|
||||||
self.last_response = self._client.stream_insert_block(
|
self.last_response = self._client.stream_insert_block(
|
||||||
self._path, block, start_ts, end_ts, binary=False)
|
self._path, block, start_ts, end_ts, binary = False)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
@ -1,41 +1,33 @@
|
||||||
"""HTTP client errors"""
|
"""HTTP client errors"""
|
||||||
|
|
||||||
from nilmdb.utils.printf import sprintf
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
|
|
||||||
class Error(Exception):
|
class Error(Exception):
|
||||||
"""Base exception for both ClientError and ServerError responses"""
|
"""Base exception for both ClientError and ServerError responses"""
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
status="Unspecified error",
|
status = "Unspecified error",
|
||||||
message=None,
|
message = None,
|
||||||
url=None,
|
url = None,
|
||||||
traceback=None):
|
traceback = None):
|
||||||
super().__init__(status)
|
Exception.__init__(self, status)
|
||||||
self.status = status # e.g. "400 Bad Request"
|
self.status = status # e.g. "400 Bad Request"
|
||||||
self.message = message # textual message from the server
|
self.message = message # textual message from the server
|
||||||
self.url = url # URL we were requesting
|
self.url = url # URL we were requesting
|
||||||
self.traceback = traceback # server traceback, if available
|
self.traceback = traceback # server traceback, if available
|
||||||
|
|
||||||
def _format_error(self, show_url):
|
def _format_error(self, show_url):
|
||||||
s = sprintf("[%s]", self.status)
|
s = sprintf("[%s]", self.status)
|
||||||
if self.message:
|
if self.message:
|
||||||
s += sprintf(" %s", self.message)
|
s += sprintf(" %s", self.message)
|
||||||
if show_url and self.url:
|
if show_url and self.url: # pragma: no cover
|
||||||
s += sprintf(" (%s)", self.url)
|
s += sprintf(" (%s)", self.url)
|
||||||
if self.traceback:
|
if self.traceback: # pragma: no cover
|
||||||
s += sprintf("\nServer traceback:\n%s", self.traceback)
|
s += sprintf("\nServer traceback:\n%s", self.traceback)
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self._format_error(show_url=False)
|
return self._format_error(show_url = False)
|
||||||
|
def __repr__(self): # pragma: no cover
|
||||||
def __repr__(self):
|
return self._format_error(show_url = True)
|
||||||
return self._format_error(show_url=True)
|
|
||||||
|
|
||||||
|
|
||||||
class ClientError(Error):
|
class ClientError(Error):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ServerError(Error):
|
class ServerError(Error):
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -1,21 +1,21 @@
|
||||||
"""HTTP client library"""
|
"""HTTP client library"""
|
||||||
|
|
||||||
import json
|
import nilmdb.utils
|
||||||
import urllib.parse
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from nilmdb.client.errors import ClientError, ServerError, Error
|
from nilmdb.client.errors import ClientError, ServerError, Error
|
||||||
|
|
||||||
|
import simplejson as json
|
||||||
|
import urlparse
|
||||||
|
import requests
|
||||||
|
|
||||||
class HTTPClient():
|
class HTTPClient(object):
|
||||||
"""Class to manage and perform HTTP requests from the client"""
|
"""Class to manage and perform HTTP requests from the client"""
|
||||||
def __init__(self, baseurl="", post_json=False, verify_ssl=True):
|
def __init__(self, baseurl = "", post_json = False, verify_ssl = True):
|
||||||
"""If baseurl is supplied, all other functions that take
|
"""If baseurl is supplied, all other functions that take
|
||||||
a URL can be given a relative URL instead."""
|
a URL can be given a relative URL instead."""
|
||||||
# Verify / clean up URL
|
# Verify / clean up URL
|
||||||
reparsed = urllib.parse.urlparse(baseurl).geturl()
|
reparsed = urlparse.urlparse(baseurl).geturl()
|
||||||
if '://' not in reparsed:
|
if '://' not in reparsed:
|
||||||
reparsed = urllib.parse.urlparse("http://" + baseurl).geturl()
|
reparsed = urlparse.urlparse("http://" + baseurl).geturl()
|
||||||
self.baseurl = reparsed.rstrip('/') + '/'
|
self.baseurl = reparsed.rstrip('/') + '/'
|
||||||
|
|
||||||
# Note whether we want SSL verification
|
# Note whether we want SSL verification
|
||||||
|
@ -32,27 +32,25 @@ class HTTPClient():
|
||||||
# Default variables for exception. We use the entire body as
|
# Default variables for exception. We use the entire body as
|
||||||
# the default message, in case we can't extract it from a JSON
|
# the default message, in case we can't extract it from a JSON
|
||||||
# response.
|
# response.
|
||||||
args = {
|
args = { "url" : url,
|
||||||
"url": url,
|
"status" : str(code),
|
||||||
"status": str(code),
|
"message" : body,
|
||||||
"message": body,
|
"traceback" : None }
|
||||||
"traceback": None
|
|
||||||
}
|
|
||||||
try:
|
try:
|
||||||
# Fill with server-provided data if we can
|
# Fill with server-provided data if we can
|
||||||
jsonerror = json.loads(body)
|
jsonerror = json.loads(body)
|
||||||
args["status"] = jsonerror["status"]
|
args["status"] = jsonerror["status"]
|
||||||
args["message"] = jsonerror["message"]
|
args["message"] = jsonerror["message"]
|
||||||
args["traceback"] = jsonerror["traceback"]
|
args["traceback"] = jsonerror["traceback"]
|
||||||
except Exception:
|
except Exception: # pragma: no cover
|
||||||
pass
|
pass
|
||||||
if 400 <= code <= 499:
|
if code >= 400 and code <= 499:
|
||||||
raise ClientError(**args)
|
raise ClientError(**args)
|
||||||
else:
|
else: # pragma: no cover
|
||||||
if 500 <= code <= 599:
|
if code >= 500 and code <= 599:
|
||||||
if args["message"] is None:
|
if args["message"] is None:
|
||||||
args["message"] = ("(no message; try disabling "
|
args["message"] = ("(no message; try disabling " +
|
||||||
"response.stream option in "
|
"response.stream option in " +
|
||||||
"nilmdb.server for better debugging)")
|
"nilmdb.server for better debugging)")
|
||||||
raise ServerError(**args)
|
raise ServerError(**args)
|
||||||
else:
|
else:
|
||||||
|
@ -62,7 +60,7 @@ class HTTPClient():
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _do_req(self, method, url, query_data, body_data, stream, headers):
|
def _do_req(self, method, url, query_data, body_data, stream, headers):
|
||||||
url = urllib.parse.urljoin(self.baseurl, url)
|
url = urlparse.urljoin(self.baseurl, url)
|
||||||
try:
|
try:
|
||||||
# Create a new session, ensure we send "Connection: close",
|
# Create a new session, ensure we send "Connection: close",
|
||||||
# and explicitly close connection after the transfer.
|
# and explicitly close connection after the transfer.
|
||||||
|
@ -76,11 +74,11 @@ class HTTPClient():
|
||||||
headers = {}
|
headers = {}
|
||||||
headers["Connection"] = "close"
|
headers["Connection"] = "close"
|
||||||
response = session.request(method, url,
|
response = session.request(method, url,
|
||||||
params=query_data,
|
params = query_data,
|
||||||
data=body_data,
|
data = body_data,
|
||||||
stream=stream,
|
stream = stream,
|
||||||
headers=headers,
|
headers = headers,
|
||||||
verify=self.verify_ssl)
|
verify = self.verify_ssl)
|
||||||
|
|
||||||
# Close the connection. If it's a generator (stream =
|
# Close the connection. If it's a generator (stream =
|
||||||
# True), the requests library shouldn't actually close the
|
# True), the requests library shouldn't actually close the
|
||||||
|
@ -88,8 +86,8 @@ class HTTPClient():
|
||||||
# response.
|
# response.
|
||||||
session.close()
|
session.close()
|
||||||
except requests.RequestException as e:
|
except requests.RequestException as e:
|
||||||
raise ServerError(status="502 Error", url=url,
|
raise ServerError(status = "502 Error", url = url,
|
||||||
message=str(e))
|
message = str(e.message))
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
self._handle_error(url, response.status_code, response.content)
|
self._handle_error(url, response.status_code, response.content)
|
||||||
self._last_response = response
|
self._last_response = response
|
||||||
|
@ -100,46 +98,48 @@ class HTTPClient():
|
||||||
return (response, False)
|
return (response, False)
|
||||||
|
|
||||||
# Normal versions that return data directly
|
# Normal versions that return data directly
|
||||||
def _req(self, method, url, query=None, body=None, headers=None):
|
def _req(self, method, url, query = None, body = None, headers = None):
|
||||||
"""
|
"""
|
||||||
Make a request and return the body data as a string or parsed
|
Make a request and return the body data as a string or parsed
|
||||||
JSON object, or raise an error if it contained an error.
|
JSON object, or raise an error if it contained an error.
|
||||||
"""
|
"""
|
||||||
(response, isjson) = self._do_req(method, url, query, body,
|
(response, isjson) = self._do_req(method, url, query, body,
|
||||||
stream=False, headers=headers)
|
stream = False, headers = headers)
|
||||||
if isjson:
|
if isjson:
|
||||||
return json.loads(response.content)
|
return json.loads(response.content)
|
||||||
return response.text
|
return response.content
|
||||||
|
|
||||||
def get(self, url, params=None):
|
def get(self, url, params = None):
|
||||||
"""Simple GET (parameters in URL)"""
|
"""Simple GET (parameters in URL)"""
|
||||||
return self._req("GET", url, params, None)
|
return self._req("GET", url, params, None)
|
||||||
|
|
||||||
def post(self, url, params=None):
|
def post(self, url, params = None):
|
||||||
"""Simple POST (parameters in body)"""
|
"""Simple POST (parameters in body)"""
|
||||||
if self.post_json:
|
if self.post_json:
|
||||||
return self._req("POST", url, None,
|
return self._req("POST", url, None,
|
||||||
json.dumps(params),
|
json.dumps(params),
|
||||||
{'Content-type': 'application/json'})
|
{ 'Content-type': 'application/json' })
|
||||||
else:
|
else:
|
||||||
return self._req("POST", url, None, params)
|
return self._req("POST", url, None, params)
|
||||||
|
|
||||||
def put(self, url, data, params=None,
|
def put(self, url, data, params = None, binary = False):
|
||||||
content_type="application/octet-stream"):
|
|
||||||
"""Simple PUT (parameters in URL, data in body)"""
|
"""Simple PUT (parameters in URL, data in body)"""
|
||||||
h = {'Content-type': content_type}
|
if binary:
|
||||||
return self._req("PUT", url, query=params, body=data, headers=h)
|
h = { 'Content-type': 'application/octet-stream' }
|
||||||
|
else:
|
||||||
|
h = { 'Content-type': 'text/plain; charset=utf-8' }
|
||||||
|
return self._req("PUT", url, query = params, body = data, headers = h)
|
||||||
|
|
||||||
# Generator versions that return data one line at a time.
|
# Generator versions that return data one line at a time.
|
||||||
def _req_gen(self, method, url, query=None, body=None,
|
def _req_gen(self, method, url, query = None, body = None,
|
||||||
headers=None, binary=False):
|
headers = None, binary = False):
|
||||||
"""
|
"""
|
||||||
Make a request and return a generator that gives back strings
|
Make a request and return a generator that gives back strings
|
||||||
or JSON decoded lines of the body data, or raise an error if
|
or JSON decoded lines of the body data, or raise an error if
|
||||||
it contained an eror.
|
it contained an eror.
|
||||||
"""
|
"""
|
||||||
(response, isjson) = self._do_req(method, url, query, body,
|
(response, isjson) = self._do_req(method, url, query, body,
|
||||||
stream=True, headers=headers)
|
stream = True, headers = headers)
|
||||||
|
|
||||||
# Like the iter_lines function in Requests, but only splits on
|
# Like the iter_lines function in Requests, but only splits on
|
||||||
# the specified line ending.
|
# the specified line ending.
|
||||||
|
@ -156,32 +156,32 @@ class HTTPClient():
|
||||||
pending = tmp[-1]
|
pending = tmp[-1]
|
||||||
for line in lines:
|
for line in lines:
|
||||||
yield line
|
yield line
|
||||||
if pending is not None:
|
if pending is not None: # pragma: no cover (missing newline)
|
||||||
yield pending
|
yield pending
|
||||||
|
|
||||||
# Yield the chunks or lines as requested
|
# Yield the chunks or lines as requested
|
||||||
if binary:
|
if binary:
|
||||||
for chunk in response.iter_content(chunk_size=65536):
|
for chunk in response.iter_content(chunk_size = 65536):
|
||||||
yield chunk
|
yield chunk
|
||||||
elif isjson:
|
elif isjson:
|
||||||
for line in lines(response.iter_content(chunk_size=1),
|
for line in lines(response.iter_content(chunk_size = 1),
|
||||||
ending=b'\r\n'):
|
ending = '\r\n'):
|
||||||
yield json.loads(line)
|
yield json.loads(line)
|
||||||
else:
|
else:
|
||||||
for line in lines(response.iter_content(chunk_size=65536),
|
for line in lines(response.iter_content(chunk_size = 65536),
|
||||||
ending=b'\n'):
|
ending = '\n'):
|
||||||
yield line
|
yield line
|
||||||
|
|
||||||
def get_gen(self, url, params=None, binary=False):
|
def get_gen(self, url, params = None, binary = False):
|
||||||
"""Simple GET (parameters in URL) returning a generator"""
|
"""Simple GET (parameters in URL) returning a generator"""
|
||||||
return self._req_gen("GET", url, params, binary=binary)
|
return self._req_gen("GET", url, params, binary = binary)
|
||||||
|
|
||||||
def post_gen(self, url, params=None):
|
def post_gen(self, url, params = None):
|
||||||
"""Simple POST (parameters in body) returning a generator"""
|
"""Simple POST (parameters in body) returning a generator"""
|
||||||
if self.post_json:
|
if self.post_json:
|
||||||
return self._req_gen("POST", url, None,
|
return self._req_gen("POST", url, None,
|
||||||
json.dumps(params),
|
json.dumps(params),
|
||||||
{'Content-type': 'application/json'})
|
{ 'Content-type': 'application/json' })
|
||||||
else:
|
else:
|
||||||
return self._req_gen("POST", url, None, params)
|
return self._req_gen("POST", url, None, params)
|
||||||
|
|
||||||
|
|
|
@ -3,33 +3,29 @@
|
||||||
"""Provide a NumpyClient class that is based on normal Client, but has
|
"""Provide a NumpyClient class that is based on normal Client, but has
|
||||||
additional methods for extracting and inserting data via Numpy arrays."""
|
additional methods for extracting and inserting data via Numpy arrays."""
|
||||||
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import numpy
|
|
||||||
|
|
||||||
import nilmdb.utils
|
import nilmdb.utils
|
||||||
import nilmdb.client.client
|
import nilmdb.client.client
|
||||||
import nilmdb.client.httpclient
|
import nilmdb.client.httpclient
|
||||||
from nilmdb.client.errors import ClientError
|
from nilmdb.client.errors import ClientError
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
from nilmdb.utils.time import timestamp_to_string, string_to_timestamp
|
||||||
|
|
||||||
|
import numpy
|
||||||
|
import cStringIO
|
||||||
|
|
||||||
def layout_to_dtype(layout):
|
def layout_to_dtype(layout):
|
||||||
ltype = layout.split('_')[0]
|
ltype = layout.split('_')[0]
|
||||||
lcount = int(layout.split('_')[1])
|
lcount = int(layout.split('_')[1])
|
||||||
if ltype.startswith('int'):
|
if ltype.startswith('int'):
|
||||||
atype = '<i' + str(int(ltype[3:]) // 8)
|
atype = '<i' + str(int(ltype[3:]) / 8)
|
||||||
elif ltype.startswith('uint'):
|
elif ltype.startswith('uint'):
|
||||||
atype = '<u' + str(int(ltype[4:]) // 8)
|
atype = '<u' + str(int(ltype[4:]) / 8)
|
||||||
elif ltype.startswith('float'):
|
elif ltype.startswith('float'):
|
||||||
atype = '<f' + str(int(ltype[5:]) // 8)
|
atype = '<f' + str(int(ltype[5:]) / 8)
|
||||||
else:
|
else:
|
||||||
raise ValueError("bad layout")
|
raise ValueError("bad layout")
|
||||||
if lcount == 1:
|
return numpy.dtype([('timestamp', '<i8'), ('data', atype, lcount)])
|
||||||
dtype = [('timestamp', '<i8'), ('data', atype)]
|
|
||||||
else:
|
|
||||||
dtype = [('timestamp', '<i8'), ('data', atype, lcount)]
|
|
||||||
return numpy.dtype(dtype)
|
|
||||||
|
|
||||||
|
|
||||||
class NumpyClient(nilmdb.client.client.Client):
|
class NumpyClient(nilmdb.client.client.Client):
|
||||||
"""Subclass of nilmdb.client.Client that adds additional methods for
|
"""Subclass of nilmdb.client.Client that adds additional methods for
|
||||||
|
@ -43,9 +39,9 @@ class NumpyClient(nilmdb.client.client.Client):
|
||||||
layout = streams[0][1]
|
layout = streams[0][1]
|
||||||
return layout_to_dtype(layout)
|
return layout_to_dtype(layout)
|
||||||
|
|
||||||
def stream_extract_numpy(self, path, start=None, end=None,
|
def stream_extract_numpy(self, path, start = None, end = None,
|
||||||
layout=None, maxrows=100000,
|
layout = None, maxrows = 100000,
|
||||||
structured=False):
|
structured = False):
|
||||||
"""
|
"""
|
||||||
Extract data from a stream. Returns a generator that yields
|
Extract data from a stream. Returns a generator that yields
|
||||||
Numpy arrays of up to 'maxrows' of data each.
|
Numpy arrays of up to 'maxrows' of data each.
|
||||||
|
@ -59,7 +55,7 @@ class NumpyClient(nilmdb.client.client.Client):
|
||||||
dtype = self._get_dtype(path, layout)
|
dtype = self._get_dtype(path, layout)
|
||||||
|
|
||||||
def to_numpy(data):
|
def to_numpy(data):
|
||||||
a = numpy.frombuffer(data, dtype)
|
a = numpy.fromstring(data, dtype)
|
||||||
if structured:
|
if structured:
|
||||||
return a
|
return a
|
||||||
return numpy.c_[a['timestamp'], a['data']]
|
return numpy.c_[a['timestamp'], a['data']]
|
||||||
|
@ -67,25 +63,25 @@ class NumpyClient(nilmdb.client.client.Client):
|
||||||
chunks = []
|
chunks = []
|
||||||
total_len = 0
|
total_len = 0
|
||||||
maxsize = dtype.itemsize * maxrows
|
maxsize = dtype.itemsize * maxrows
|
||||||
for data in self.stream_extract(path, start, end, binary=True):
|
for data in self.stream_extract(path, start, end, binary = True):
|
||||||
# Add this block of binary data
|
# Add this block of binary data
|
||||||
chunks.append(data)
|
chunks.append(data)
|
||||||
total_len += len(data)
|
total_len += len(data)
|
||||||
|
|
||||||
# See if we have enough to make the requested Numpy array
|
# See if we have enough to make the requested Numpy array
|
||||||
while total_len >= maxsize:
|
while total_len >= maxsize:
|
||||||
assembled = b"".join(chunks)
|
assembled = "".join(chunks)
|
||||||
total_len -= maxsize
|
total_len -= maxsize
|
||||||
chunks = [assembled[maxsize:]]
|
chunks = [ assembled[maxsize:] ]
|
||||||
block = assembled[:maxsize]
|
block = assembled[:maxsize]
|
||||||
yield to_numpy(block)
|
yield to_numpy(block)
|
||||||
|
|
||||||
if total_len:
|
if total_len:
|
||||||
yield to_numpy(b"".join(chunks))
|
yield to_numpy("".join(chunks))
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def stream_insert_numpy_context(self, path, start=None, end=None,
|
def stream_insert_numpy_context(self, path, start = None, end = None,
|
||||||
layout=None):
|
layout = None):
|
||||||
"""Return a context manager that allows data to be efficiently
|
"""Return a context manager that allows data to be efficiently
|
||||||
inserted into a stream in a piecewise manner. Data is
|
inserted into a stream in a piecewise manner. Data is
|
||||||
provided as Numpy arrays, and is aggregated and sent to the
|
provided as Numpy arrays, and is aggregated and sent to the
|
||||||
|
@ -104,8 +100,8 @@ class NumpyClient(nilmdb.client.client.Client):
|
||||||
ctx.finalize()
|
ctx.finalize()
|
||||||
ctx.destroy()
|
ctx.destroy()
|
||||||
|
|
||||||
def stream_insert_numpy(self, path, data, start=None, end=None,
|
def stream_insert_numpy(self, path, data, start = None, end = None,
|
||||||
layout=None):
|
layout = None):
|
||||||
"""Insert data into a stream. data should be a Numpy array
|
"""Insert data into a stream. data should be a Numpy array
|
||||||
which will be passed through stream_insert_numpy_context to
|
which will be passed through stream_insert_numpy_context to
|
||||||
break it into chunks etc. See the help for that function
|
break it into chunks etc. See the help for that function
|
||||||
|
@ -118,7 +114,6 @@ class NumpyClient(nilmdb.client.client.Client):
|
||||||
ctx.insert(chunk)
|
ctx.insert(chunk)
|
||||||
return ctx.last_response
|
return ctx.last_response
|
||||||
|
|
||||||
|
|
||||||
class StreamInserterNumpy(nilmdb.client.client.StreamInserter):
|
class StreamInserterNumpy(nilmdb.client.client.StreamInserter):
|
||||||
"""Object returned by stream_insert_numpy_context() that manages
|
"""Object returned by stream_insert_numpy_context() that manages
|
||||||
the insertion of rows of data into a particular path.
|
the insertion of rows of data into a particular path.
|
||||||
|
@ -151,7 +146,7 @@ class StreamInserterNumpy(nilmdb.client.client.StreamInserter):
|
||||||
|
|
||||||
def insert(self, array):
|
def insert(self, array):
|
||||||
"""Insert Numpy data, which must match the layout type."""
|
"""Insert Numpy data, which must match the layout type."""
|
||||||
if not isinstance(array, numpy.ndarray):
|
if type(array) != numpy.ndarray:
|
||||||
array = numpy.array(array)
|
array = numpy.array(array)
|
||||||
if array.ndim == 1:
|
if array.ndim == 1:
|
||||||
# Already a structured array; just verify the type
|
# Already a structured array; just verify the type
|
||||||
|
@ -161,9 +156,9 @@ class StreamInserterNumpy(nilmdb.client.client.StreamInserter):
|
||||||
# Convert to structured array
|
# Convert to structured array
|
||||||
sarray = numpy.zeros(array.shape[0], dtype=self._dtype)
|
sarray = numpy.zeros(array.shape[0], dtype=self._dtype)
|
||||||
try:
|
try:
|
||||||
sarray['timestamp'] = array[:, 0]
|
sarray['timestamp'] = array[:,0]
|
||||||
# Need the squeeze in case sarray['data'] is 1 dimensional
|
# Need the squeeze in case sarray['data'] is 1 dimensional
|
||||||
sarray['data'] = numpy.squeeze(array[:, 1:])
|
sarray['data'] = numpy.squeeze(array[:,1:])
|
||||||
except (IndexError, ValueError):
|
except (IndexError, ValueError):
|
||||||
raise ValueError("wrong number of fields for this data type")
|
raise ValueError("wrong number of fields for this data type")
|
||||||
array = sarray
|
array = sarray
|
||||||
|
@ -189,15 +184,15 @@ class StreamInserterNumpy(nilmdb.client.client.StreamInserter):
|
||||||
|
|
||||||
# Send if it's too long
|
# Send if it's too long
|
||||||
if self._block_rows >= maxrows:
|
if self._block_rows >= maxrows:
|
||||||
self._send_block(final=False)
|
self._send_block(final = False)
|
||||||
|
|
||||||
def _send_block(self, final=False):
|
def _send_block(self, final = False):
|
||||||
"""Send the data current stored up. One row might be left
|
"""Send the data current stored up. One row might be left
|
||||||
over if we need its timestamp saved."""
|
over if we need its timestamp saved."""
|
||||||
|
|
||||||
# Build the full array to send
|
# Build the full array to send
|
||||||
if self._block_rows == 0:
|
if self._block_rows == 0:
|
||||||
array = numpy.zeros(0, dtype=self._dtype)
|
array = numpy.zeros(0, dtype = self._dtype)
|
||||||
else:
|
else:
|
||||||
array = numpy.hstack(self._block_arrays)
|
array = numpy.hstack(self._block_arrays)
|
||||||
|
|
||||||
|
@ -241,7 +236,7 @@ class StreamInserterNumpy(nilmdb.client.client.StreamInserter):
|
||||||
# the server complain so that the error is the same
|
# the server complain so that the error is the same
|
||||||
# as if we hadn't done this chunking.
|
# as if we hadn't done this chunking.
|
||||||
end_ts = self._interval_end
|
end_ts = self._interval_end
|
||||||
self._block_arrays = [array[-1:]]
|
self._block_arrays = [ array[-1:] ]
|
||||||
self._block_rows = 1
|
self._block_rows = 1
|
||||||
array = array[:-1]
|
array = array[:-1]
|
||||||
|
|
||||||
|
@ -251,13 +246,13 @@ class StreamInserterNumpy(nilmdb.client.client.StreamInserter):
|
||||||
# If we have no endpoints, or equal endpoints, it's OK as long
|
# If we have no endpoints, or equal endpoints, it's OK as long
|
||||||
# as there's no data to send
|
# as there's no data to send
|
||||||
if (start_ts is None or end_ts is None) or (start_ts == end_ts):
|
if (start_ts is None or end_ts is None) or (start_ts == end_ts):
|
||||||
if not array:
|
if len(array) == 0:
|
||||||
return
|
return
|
||||||
raise ClientError("have data to send, but invalid start/end times")
|
raise ClientError("have data to send, but invalid start/end times")
|
||||||
|
|
||||||
# Send it
|
# Send it
|
||||||
data = array.tostring()
|
data = array.tostring()
|
||||||
self.last_response = self._client.stream_insert_block(
|
self.last_response = self._client.stream_insert_block(
|
||||||
self._path, data, start_ts, end_ts, binary=True)
|
self._path, data, start_ts, end_ts, binary = True)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
@ -1,28 +1,31 @@
|
||||||
"""Command line client functionality"""
|
"""Command line client functionality"""
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import signal
|
|
||||||
import argparse
|
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
|
||||||
|
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
from nilmdb.utils.printf import fprintf, sprintf
|
|
||||||
|
from nilmdb.utils.printf import *
|
||||||
|
from nilmdb.utils import datetime_tz
|
||||||
import nilmdb.utils.time
|
import nilmdb.utils.time
|
||||||
|
|
||||||
import argcomplete
|
import sys
|
||||||
import datetime_tz
|
import os
|
||||||
|
import argparse
|
||||||
|
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||||
|
import signal
|
||||||
|
|
||||||
|
try: # pragma: no cover
|
||||||
|
import argcomplete
|
||||||
|
except ImportError: # pragma: no cover
|
||||||
|
argcomplete = None
|
||||||
|
|
||||||
# Valid subcommands. Defined in separate files just to break
|
# Valid subcommands. Defined in separate files just to break
|
||||||
# things up -- they're still called with Cmdline as self.
|
# things up -- they're still called with Cmdline as self.
|
||||||
subcommands = ["help", "info", "create", "rename", "list", "intervals",
|
subcommands = [ "help", "info", "create", "rename", "list", "intervals",
|
||||||
"metadata", "insert", "extract", "remove", "destroy"]
|
"metadata", "insert", "extract", "remove", "destroy" ]
|
||||||
|
|
||||||
# Import the subcommand modules
|
# Import the subcommand modules
|
||||||
subcmd_mods = {}
|
subcmd_mods = {}
|
||||||
for cmd in subcommands:
|
for cmd in subcommands:
|
||||||
subcmd_mods[cmd] = __import__("nilmdb.cmdline." + cmd, fromlist=[cmd])
|
subcmd_mods[cmd] = __import__("nilmdb.cmdline." + cmd, fromlist = [ cmd ])
|
||||||
|
|
||||||
|
|
||||||
class JimArgumentParser(argparse.ArgumentParser):
|
class JimArgumentParser(argparse.ArgumentParser):
|
||||||
def parse_args(self, args=None, namespace=None):
|
def parse_args(self, args=None, namespace=None):
|
||||||
|
@ -30,19 +33,18 @@ class JimArgumentParser(argparse.ArgumentParser):
|
||||||
# --version". This makes "nilmtool cmd --version" work, which
|
# --version". This makes "nilmtool cmd --version" work, which
|
||||||
# is needed by help2man.
|
# is needed by help2man.
|
||||||
if "--version" in (args or sys.argv[1:]):
|
if "--version" in (args or sys.argv[1:]):
|
||||||
args = ["--version"]
|
args = [ "--version" ]
|
||||||
return argparse.ArgumentParser.parse_args(self, args, namespace)
|
return argparse.ArgumentParser.parse_args(self, args, namespace)
|
||||||
|
|
||||||
def error(self, message):
|
def error(self, message):
|
||||||
self.print_usage(sys.stderr)
|
self.print_usage(sys.stderr)
|
||||||
self.exit(2, sprintf("error: %s\n", message))
|
self.exit(2, sprintf("error: %s\n", message))
|
||||||
|
|
||||||
|
class Complete(object): # pragma: no cover
|
||||||
class Complete():
|
|
||||||
# Completion helpers, for using argcomplete (see
|
# Completion helpers, for using argcomplete (see
|
||||||
# extras/nilmtool-bash-completion.sh)
|
# extras/nilmtool-bash-completion.sh)
|
||||||
def escape(self, s):
|
def escape(self, s):
|
||||||
quote_chars = ["\\", "\"", "'", " "]
|
quote_chars = [ "\\", "\"", "'", " " ]
|
||||||
for char in quote_chars:
|
for char in quote_chars:
|
||||||
s = s.replace(char, "\\" + char)
|
s = s.replace(char, "\\" + char)
|
||||||
return s
|
return s
|
||||||
|
@ -55,18 +57,18 @@ class Complete():
|
||||||
|
|
||||||
def path(self, prefix, parsed_args, **kwargs):
|
def path(self, prefix, parsed_args, **kwargs):
|
||||||
client = nilmdb.client.Client(parsed_args.url)
|
client = nilmdb.client.Client(parsed_args.url)
|
||||||
return (self.escape(s[0])
|
return ( self.escape(s[0])
|
||||||
for s in client.stream_list()
|
for s in client.stream_list()
|
||||||
if s[0].startswith(prefix))
|
if s[0].startswith(prefix) )
|
||||||
|
|
||||||
def layout(self, prefix, parsed_args, **kwargs):
|
def layout(self, prefix, parsed_args, **kwargs):
|
||||||
types = ["int8", "int16", "int32", "int64",
|
types = [ "int8", "int16", "int32", "int64",
|
||||||
"uint8", "uint16", "uint32", "uint64",
|
"uint8", "uint16", "uint32", "uint64",
|
||||||
"float32", "float64"]
|
"float32", "float64" ]
|
||||||
layouts = []
|
layouts = []
|
||||||
for i in range(1, 10):
|
for i in range(1,10):
|
||||||
layouts.extend([(t + "_" + str(i)) for t in types])
|
layouts.extend([(t + "_" + str(i)) for t in types])
|
||||||
return (lay for lay in layouts if lay.startswith(prefix))
|
return ( l for l in layouts if l.startswith(prefix) )
|
||||||
|
|
||||||
def meta_key(self, prefix, parsed_args, **kwargs):
|
def meta_key(self, prefix, parsed_args, **kwargs):
|
||||||
return (kv.split('=')[0] for kv
|
return (kv.split('=')[0] for kv
|
||||||
|
@ -78,22 +80,30 @@ class Complete():
|
||||||
if not path:
|
if not path:
|
||||||
return []
|
return []
|
||||||
results = []
|
results = []
|
||||||
for (k, v) in client.stream_get_metadata(path).items():
|
# prefix comes in as UTF-8, but results need to be Unicode,
|
||||||
|
# weird. Still doesn't work in all cases, but that's bugs in
|
||||||
|
# argcomplete.
|
||||||
|
prefix = nilmdb.utils.unicode.decode(prefix)
|
||||||
|
for (k,v) in client.stream_get_metadata(path).iteritems():
|
||||||
kv = self.escape(k + '=' + v)
|
kv = self.escape(k + '=' + v)
|
||||||
if kv.startswith(prefix):
|
if kv.startswith(prefix):
|
||||||
results.append(kv)
|
results.append(kv)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
class Cmdline(object):
|
||||||
|
|
||||||
class Cmdline():
|
def __init__(self, argv = None):
|
||||||
|
|
||||||
def __init__(self, argv=None):
|
|
||||||
self.argv = argv or sys.argv[1:]
|
self.argv = argv or sys.argv[1:]
|
||||||
|
try:
|
||||||
|
# Assume command line arguments are encoded with stdin's encoding,
|
||||||
|
# and reverse it. Won't be needed in Python 3, but for now..
|
||||||
|
self.argv = [ x.decode(sys.stdin.encoding) for x in self.argv ]
|
||||||
|
except Exception: # pragma: no cover
|
||||||
|
pass
|
||||||
self.client = None
|
self.client = None
|
||||||
self.def_url = os.environ.get("NILMDB_URL", "http://localhost/nilmdb/")
|
self.def_url = os.environ.get("NILMDB_URL", "http://localhost/nilmdb/")
|
||||||
self.subcmd = {}
|
self.subcmd = {}
|
||||||
self.complete = Complete()
|
self.complete = Complete()
|
||||||
self.complete_output_stream = None # overridden by test suite
|
|
||||||
|
|
||||||
def arg_time(self, toparse):
|
def arg_time(self, toparse):
|
||||||
"""Parse a time string argument"""
|
"""Parse a time string argument"""
|
||||||
|
@ -105,14 +115,14 @@ class Cmdline():
|
||||||
|
|
||||||
# Set up the parser
|
# Set up the parser
|
||||||
def parser_setup(self):
|
def parser_setup(self):
|
||||||
self.parser = JimArgumentParser(add_help=False,
|
self.parser = JimArgumentParser(add_help = False,
|
||||||
formatter_class=def_form)
|
formatter_class = def_form)
|
||||||
|
|
||||||
group = self.parser.add_argument_group("General options")
|
group = self.parser.add_argument_group("General options")
|
||||||
group.add_argument("-h", "--help", action='help',
|
group.add_argument("-h", "--help", action='help',
|
||||||
help='show this help message and exit')
|
help='show this help message and exit')
|
||||||
group.add_argument("-v", "--version", action="version",
|
group.add_argument("-v", "--version", action="version",
|
||||||
version=nilmdb.__version__)
|
version = nilmdb.__version__)
|
||||||
|
|
||||||
group = self.parser.add_argument_group("Server")
|
group = self.parser.add_argument_group("Server")
|
||||||
group.add_argument("-u", "--url", action="store",
|
group.add_argument("-u", "--url", action="store",
|
||||||
|
@ -121,7 +131,7 @@ class Cmdline():
|
||||||
).completer = self.complete.url
|
).completer = self.complete.url
|
||||||
|
|
||||||
sub = self.parser.add_subparsers(
|
sub = self.parser.add_subparsers(
|
||||||
title="Commands", dest="command", required=True,
|
title="Commands", dest="command",
|
||||||
description="Use 'help command' or 'command --help' for more "
|
description="Use 'help command' or 'command --help' for more "
|
||||||
"details on a particular command.")
|
"details on a particular command.")
|
||||||
|
|
||||||
|
@ -138,7 +148,10 @@ class Cmdline():
|
||||||
def run(self):
|
def run(self):
|
||||||
# Set SIGPIPE to its default handler -- we don't need Python
|
# Set SIGPIPE to its default handler -- we don't need Python
|
||||||
# to catch it for us.
|
# to catch it for us.
|
||||||
|
try:
|
||||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||||
|
except ValueError: # pragma: no cover
|
||||||
|
pass
|
||||||
|
|
||||||
# Clear cached timezone, so that we can pick up timezone changes
|
# Clear cached timezone, so that we can pick up timezone changes
|
||||||
# while running this from the test suite.
|
# while running this from the test suite.
|
||||||
|
@ -146,8 +159,8 @@ class Cmdline():
|
||||||
|
|
||||||
# Run parser
|
# Run parser
|
||||||
self.parser_setup()
|
self.parser_setup()
|
||||||
argcomplete.autocomplete(self.parser, exit_method=sys.exit,
|
if argcomplete: # pragma: no cover
|
||||||
output_stream=self.complete_output_stream)
|
argcomplete.autocomplete(self.parser)
|
||||||
self.args = self.parser.parse_args(self.argv)
|
self.args = self.parser.parse_args(self.argv)
|
||||||
|
|
||||||
# Run arg verify handler if there is one
|
# Run arg verify handler if there is one
|
||||||
|
@ -160,7 +173,7 @@ class Cmdline():
|
||||||
# unless the particular command requests that we don't.
|
# unless the particular command requests that we don't.
|
||||||
if "no_test_connect" not in self.args:
|
if "no_test_connect" not in self.args:
|
||||||
try:
|
try:
|
||||||
self.client.version()
|
server_version = self.client.version()
|
||||||
except nilmdb.client.Error as e:
|
except nilmdb.client.Error as e:
|
||||||
self.die("error connecting to server: %s", str(e))
|
self.die("error connecting to server: %s", str(e))
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
from argparse import RawDescriptionHelpFormatter as raw_form
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
|
|
||||||
|
from argparse import RawDescriptionHelpFormatter as raw_form
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("create", help="Create a new stream",
|
cmd = sub.add_parser("create", help="Create a new stream",
|
||||||
formatter_class=raw_form,
|
formatter_class = raw_form,
|
||||||
description="""
|
description="""
|
||||||
Create a new empty stream at the specified path and with the specified
|
Create a new empty stream at the specified path and with the specified
|
||||||
layout type.
|
layout type.
|
||||||
|
@ -19,7 +19,7 @@ Layout types are of the format: type_count
|
||||||
For example, 'float32_8' means the data for this stream has 8 columns of
|
For example, 'float32_8' means the data for this stream has 8 columns of
|
||||||
32-bit floating point values.
|
32-bit floating point values.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(handler=cmd_create)
|
cmd.set_defaults(handler = cmd_create)
|
||||||
group = cmd.add_argument_group("Required arguments")
|
group = cmd.add_argument_group("Required arguments")
|
||||||
group.add_argument("path",
|
group.add_argument("path",
|
||||||
help="Path (in database) of new stream, e.g. /foo/bar",
|
help="Path (in database) of new stream, e.g. /foo/bar",
|
||||||
|
@ -29,7 +29,6 @@ Layout types are of the format: type_count
|
||||||
).completer = self.complete.layout
|
).completer = self.complete.layout
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_create(self):
|
def cmd_create(self):
|
||||||
"""Create new stream"""
|
"""Create new stream"""
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -1,14 +1,12 @@
|
||||||
|
from nilmdb.utils.printf import *
|
||||||
|
import nilmdb.client
|
||||||
import fnmatch
|
import fnmatch
|
||||||
|
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||||
|
|
||||||
from nilmdb.utils.printf import printf
|
|
||||||
import nilmdb.client
|
|
||||||
|
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("destroy", help="Delete a stream and all data",
|
cmd = sub.add_parser("destroy", help="Delete a stream and all data",
|
||||||
formatter_class=def_form,
|
formatter_class = def_form,
|
||||||
description="""
|
description="""
|
||||||
Destroy the stream at the specified path.
|
Destroy the stream at the specified path.
|
||||||
The stream must be empty. All metadata
|
The stream must be empty. All metadata
|
||||||
|
@ -16,7 +14,7 @@ def setup(self, sub):
|
||||||
|
|
||||||
Wildcards and multiple paths are supported.
|
Wildcards and multiple paths are supported.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(handler=cmd_destroy)
|
cmd.set_defaults(handler = cmd_destroy)
|
||||||
group = cmd.add_argument_group("Options")
|
group = cmd.add_argument_group("Options")
|
||||||
group.add_argument("-R", "--remove", action="store_true",
|
group.add_argument("-R", "--remove", action="store_true",
|
||||||
help="Remove all data before destroying stream")
|
help="Remove all data before destroying stream")
|
||||||
|
@ -29,10 +27,9 @@ def setup(self, sub):
|
||||||
).completer = self.complete.path
|
).completer = self.complete.path
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_destroy(self):
|
def cmd_destroy(self):
|
||||||
"""Destroy stream"""
|
"""Destroy stream"""
|
||||||
streams = [s[0] for s in self.client.stream_list()]
|
streams = [ s[0] for s in self.client.stream_list() ]
|
||||||
paths = []
|
paths = []
|
||||||
for path in self.args.path:
|
for path in self.args.path:
|
||||||
new = fnmatch.filter(streams, path)
|
new = fnmatch.filter(streams, path)
|
||||||
|
@ -46,7 +43,7 @@ def cmd_destroy(self):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if self.args.remove:
|
if self.args.remove:
|
||||||
self.client.stream_remove(path)
|
count = self.client.stream_remove(path)
|
||||||
self.client.stream_destroy(path)
|
self.client.stream_destroy(path)
|
||||||
except nilmdb.client.ClientError as e:
|
except nilmdb.client.ClientError as e:
|
||||||
self.die("error destroying stream: %s", str(e))
|
self.die("error destroying stream: %s", str(e))
|
||||||
|
|
|
@ -1,16 +1,15 @@
|
||||||
import sys
|
from __future__ import print_function
|
||||||
|
from nilmdb.utils.printf import *
|
||||||
from nilmdb.utils.printf import printf
|
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
|
import sys
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("extract", help="Extract data",
|
cmd = sub.add_parser("extract", help="Extract data",
|
||||||
description="""
|
description="""
|
||||||
Extract data from a stream.
|
Extract data from a stream.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(verify=cmd_extract_verify,
|
cmd.set_defaults(verify = cmd_extract_verify,
|
||||||
handler=cmd_extract)
|
handler = cmd_extract)
|
||||||
|
|
||||||
group = cmd.add_argument_group("Data selection")
|
group = cmd.add_argument_group("Data selection")
|
||||||
group.add_argument("path",
|
group.add_argument("path",
|
||||||
|
@ -41,8 +40,8 @@ def setup(self, sub):
|
||||||
help="Just output a count of matched data points")
|
help="Just output a count of matched data points")
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_extract_verify(self):
|
def cmd_extract_verify(self):
|
||||||
|
if self.args.start is not None and self.args.end is not None:
|
||||||
if self.args.start > self.args.end:
|
if self.args.start > self.args.end:
|
||||||
self.parser.error("start is after end")
|
self.parser.error("start is after end")
|
||||||
|
|
||||||
|
@ -51,7 +50,6 @@ def cmd_extract_verify(self):
|
||||||
self.args.timestamp_raw or self.args.count):
|
self.args.timestamp_raw or self.args.count):
|
||||||
self.parser.error("--binary cannot be combined with other options")
|
self.parser.error("--binary cannot be combined with other options")
|
||||||
|
|
||||||
|
|
||||||
def cmd_extract(self):
|
def cmd_extract(self):
|
||||||
streams = self.client.stream_list(self.args.path)
|
streams = self.client.stream_list(self.args.path)
|
||||||
if len(streams) != 1:
|
if len(streams) != 1:
|
||||||
|
@ -71,9 +69,9 @@ def cmd_extract(self):
|
||||||
|
|
||||||
printed = False
|
printed = False
|
||||||
if self.args.binary:
|
if self.args.binary:
|
||||||
printer = sys.stdout.buffer.write
|
printer = sys.stdout.write
|
||||||
else:
|
else:
|
||||||
printer = lambda x: print(x.decode('utf-8'))
|
printer = print
|
||||||
bare = self.args.bare
|
bare = self.args.bare
|
||||||
count = self.args.count
|
count = self.args.count
|
||||||
for dataline in self.client.stream_extract(self.args.path,
|
for dataline in self.client.stream_extract(self.args.path,
|
||||||
|
@ -85,7 +83,7 @@ def cmd_extract(self):
|
||||||
if bare and not count:
|
if bare and not count:
|
||||||
# Strip timestamp (first element). Doesn't make sense
|
# Strip timestamp (first element). Doesn't make sense
|
||||||
# if we are only returning a count.
|
# if we are only returning a count.
|
||||||
dataline = b' '.join(dataline.split(b' ')[1:])
|
dataline = ' '.join(dataline.split(' ')[1:])
|
||||||
printer(dataline)
|
printer(dataline)
|
||||||
printed = True
|
printed = True
|
||||||
if not printed:
|
if not printed:
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import argparse
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("help", help="Show detailed help for a command",
|
cmd = sub.add_parser("help", help="Show detailed help for a command",
|
||||||
|
@ -7,15 +9,14 @@ def setup(self, sub):
|
||||||
Show help for a command. 'help command' is
|
Show help for a command. 'help command' is
|
||||||
the same as 'command --help'.
|
the same as 'command --help'.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(handler=cmd_help)
|
cmd.set_defaults(handler = cmd_help)
|
||||||
cmd.set_defaults(no_test_connect=True)
|
cmd.set_defaults(no_test_connect = True)
|
||||||
cmd.add_argument("command", nargs="?",
|
cmd.add_argument("command", nargs="?",
|
||||||
help="Command to get help about")
|
help="Command to get help about")
|
||||||
cmd.add_argument("rest", nargs=argparse.REMAINDER,
|
cmd.add_argument("rest", nargs=argparse.REMAINDER,
|
||||||
help=argparse.SUPPRESS)
|
help=argparse.SUPPRESS)
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_help(self):
|
def cmd_help(self):
|
||||||
if self.args.command in self.subcmd:
|
if self.args.command in self.subcmd:
|
||||||
self.subcmd[self.args.command].print_help()
|
self.subcmd[self.args.command].print_help()
|
||||||
|
|
|
@ -1,21 +1,19 @@
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
|
||||||
|
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
from nilmdb.utils.printf import printf
|
from nilmdb.utils.printf import *
|
||||||
from nilmdb.utils import human_size
|
from nilmdb.utils import human_size
|
||||||
|
|
||||||
|
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("info", help="Server information",
|
cmd = sub.add_parser("info", help="Server information",
|
||||||
formatter_class=def_form,
|
formatter_class = def_form,
|
||||||
description="""
|
description="""
|
||||||
List information about the server, like
|
List information about the server, like
|
||||||
version.
|
version.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(handler=cmd_info)
|
cmd.set_defaults(handler = cmd_info)
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_info(self):
|
def cmd_info(self):
|
||||||
"""Print info about the server"""
|
"""Print info about the server"""
|
||||||
printf("Client version: %s\n", nilmdb.__version__)
|
printf("Client version: %s\n", nilmdb.__version__)
|
||||||
|
|
|
@ -1,18 +1,17 @@
|
||||||
import sys
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
from nilmdb.utils.printf import printf
|
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
import nilmdb.utils.timestamper as timestamper
|
import nilmdb.utils.timestamper as timestamper
|
||||||
import nilmdb.utils.time
|
import nilmdb.utils.time
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("insert", help="Insert data",
|
cmd = sub.add_parser("insert", help="Insert data",
|
||||||
description="""
|
description="""
|
||||||
Insert data into a stream.
|
Insert data into a stream.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(verify=cmd_insert_verify,
|
cmd.set_defaults(verify = cmd_insert_verify,
|
||||||
handler=cmd_insert)
|
handler = cmd_insert)
|
||||||
cmd.add_argument("-q", "--quiet", action='store_true',
|
cmd.add_argument("-q", "--quiet", action='store_true',
|
||||||
help='suppress unnecessary messages')
|
help='suppress unnecessary messages')
|
||||||
|
|
||||||
|
@ -62,24 +61,21 @@ def setup(self, sub):
|
||||||
group.add_argument("path",
|
group.add_argument("path",
|
||||||
help="Path of stream, e.g. /foo/bar",
|
help="Path of stream, e.g. /foo/bar",
|
||||||
).completer = self.complete.path
|
).completer = self.complete.path
|
||||||
group.add_argument("file", nargs='?', default='-',
|
group.add_argument("file", nargs = '?', default='-',
|
||||||
help="File to insert (default: - (stdin))")
|
help="File to insert (default: - (stdin))")
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_insert_verify(self):
|
def cmd_insert_verify(self):
|
||||||
if self.args.timestamp:
|
if self.args.timestamp:
|
||||||
if not self.args.rate:
|
if not self.args.rate:
|
||||||
self.die("error: --rate is needed, but was not specified")
|
self.die("error: --rate is needed, but was not specified")
|
||||||
if not self.args.filename and self.args.start is None:
|
if not self.args.filename and self.args.start is None:
|
||||||
self.die("error: need --start or --filename "
|
self.die("error: need --start or --filename when adding timestamps")
|
||||||
"when adding timestamps")
|
|
||||||
else:
|
else:
|
||||||
if self.args.start is None or self.args.end is None:
|
if self.args.start is None or self.args.end is None:
|
||||||
self.die("error: when not adding timestamps, --start and "
|
self.die("error: when not adding timestamps, --start and "
|
||||||
"--end are required")
|
"--end are required")
|
||||||
|
|
||||||
|
|
||||||
def cmd_insert(self):
|
def cmd_insert(self):
|
||||||
# Find requested stream
|
# Find requested stream
|
||||||
streams = self.client.stream_list(self.args.path)
|
streams = self.client.stream_list(self.args.path)
|
||||||
|
@ -91,7 +87,7 @@ def cmd_insert(self):
|
||||||
try:
|
try:
|
||||||
filename = arg.file
|
filename = arg.file
|
||||||
if filename == '-':
|
if filename == '-':
|
||||||
infile = sys.stdin.buffer
|
infile = sys.stdin
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
infile = open(filename, "rb")
|
infile = open(filename, "rb")
|
||||||
|
@ -108,7 +104,7 @@ def cmd_insert(self):
|
||||||
if arg.timestamp:
|
if arg.timestamp:
|
||||||
data = timestamper.TimestamperRate(infile, arg.start, arg.rate)
|
data = timestamper.TimestamperRate(infile, arg.start, arg.rate)
|
||||||
else:
|
else:
|
||||||
data = iter(lambda: infile.read(1048576), b'')
|
data = iter(lambda: infile.read(1048576), '')
|
||||||
|
|
||||||
# Print info
|
# Print info
|
||||||
if not arg.quiet:
|
if not arg.quiet:
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
from nilmdb.utils.printf import printf
|
|
||||||
import nilmdb.utils.time
|
import nilmdb.utils.time
|
||||||
from nilmdb.utils.interval import Interval
|
from nilmdb.utils.interval import Interval
|
||||||
|
|
||||||
|
import fnmatch
|
||||||
|
import argparse
|
||||||
|
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("intervals", help="List intervals",
|
cmd = sub.add_parser("intervals", help="List intervals",
|
||||||
formatter_class=def_form,
|
formatter_class = def_form,
|
||||||
description="""
|
description="""
|
||||||
List intervals in a stream, similar to
|
List intervals in a stream, similar to
|
||||||
'list --detail path'.
|
'list --detail path'.
|
||||||
|
@ -16,8 +17,8 @@ def setup(self, sub):
|
||||||
interval ranges that are present in 'path'
|
interval ranges that are present in 'path'
|
||||||
and not present in 'diffpath' are printed.
|
and not present in 'diffpath' are printed.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(verify=cmd_intervals_verify,
|
cmd.set_defaults(verify = cmd_intervals_verify,
|
||||||
handler=cmd_intervals)
|
handler = cmd_intervals)
|
||||||
|
|
||||||
group = cmd.add_argument_group("Stream selection")
|
group = cmd.add_argument_group("Stream selection")
|
||||||
group.add_argument("path", metavar="PATH",
|
group.add_argument("path", metavar="PATH",
|
||||||
|
@ -47,13 +48,11 @@ def setup(self, sub):
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_intervals_verify(self):
|
def cmd_intervals_verify(self):
|
||||||
if self.args.start is not None and self.args.end is not None:
|
if self.args.start is not None and self.args.end is not None:
|
||||||
if self.args.start >= self.args.end:
|
if self.args.start >= self.args.end:
|
||||||
self.parser.error("start must precede end")
|
self.parser.error("start must precede end")
|
||||||
|
|
||||||
|
|
||||||
def cmd_intervals(self):
|
def cmd_intervals(self):
|
||||||
"""List intervals in a stream"""
|
"""List intervals in a stream"""
|
||||||
if self.args.timestamp_raw:
|
if self.args.timestamp_raw:
|
||||||
|
@ -62,11 +61,11 @@ def cmd_intervals(self):
|
||||||
time_string = nilmdb.utils.time.timestamp_to_human
|
time_string = nilmdb.utils.time.timestamp_to_human
|
||||||
|
|
||||||
try:
|
try:
|
||||||
intervals = (Interval(start, end) for (start, end) in
|
intervals = ( Interval(start, end) for (start, end) in
|
||||||
self.client.stream_intervals(self.args.path,
|
self.client.stream_intervals(self.args.path,
|
||||||
self.args.start,
|
self.args.start,
|
||||||
self.args.end,
|
self.args.end,
|
||||||
self.args.diff))
|
self.args.diff) )
|
||||||
if self.args.optimize:
|
if self.args.optimize:
|
||||||
intervals = nilmdb.utils.interval.optimize(intervals)
|
intervals = nilmdb.utils.interval.optimize(intervals)
|
||||||
for i in intervals:
|
for i in intervals:
|
||||||
|
@ -74,3 +73,4 @@ def cmd_intervals(self):
|
||||||
|
|
||||||
except nilmdb.client.ClientError as e:
|
except nilmdb.client.ClientError as e:
|
||||||
self.die("error listing intervals: %s", str(e))
|
self.die("error listing intervals: %s", str(e))
|
||||||
|
|
||||||
|
|
|
@ -1,21 +1,21 @@
|
||||||
import fnmatch
|
from nilmdb.utils.printf import *
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
|
||||||
|
|
||||||
from nilmdb.utils.printf import printf
|
|
||||||
import nilmdb.utils.time
|
import nilmdb.utils.time
|
||||||
|
|
||||||
|
import fnmatch
|
||||||
|
import argparse
|
||||||
|
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("list", help="List streams",
|
cmd = sub.add_parser("list", help="List streams",
|
||||||
formatter_class=def_form,
|
formatter_class = def_form,
|
||||||
description="""
|
description="""
|
||||||
List streams available in the database,
|
List streams available in the database,
|
||||||
optionally filtering by path. Wildcards
|
optionally filtering by path. Wildcards
|
||||||
are accepted; non-matching paths or wildcards
|
are accepted; non-matching paths or wildcards
|
||||||
are ignored.
|
are ignored.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(verify=cmd_list_verify,
|
cmd.set_defaults(verify = cmd_list_verify,
|
||||||
handler=cmd_list)
|
handler = cmd_list)
|
||||||
|
|
||||||
group = cmd.add_argument_group("Stream filtering")
|
group = cmd.add_argument_group("Stream filtering")
|
||||||
group.add_argument("path", metavar="PATH", default=["*"], nargs='*',
|
group.add_argument("path", metavar="PATH", default=["*"], nargs='*',
|
||||||
|
@ -50,7 +50,6 @@ def setup(self, sub):
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_list_verify(self):
|
def cmd_list_verify(self):
|
||||||
if self.args.start is not None and self.args.end is not None:
|
if self.args.start is not None and self.args.end is not None:
|
||||||
if self.args.start >= self.args.end:
|
if self.args.start >= self.args.end:
|
||||||
|
@ -58,13 +57,11 @@ def cmd_list_verify(self):
|
||||||
|
|
||||||
if self.args.start is not None or self.args.end is not None:
|
if self.args.start is not None or self.args.end is not None:
|
||||||
if not self.args.detail:
|
if not self.args.detail:
|
||||||
self.parser.error("--start and --end only make sense "
|
self.parser.error("--start and --end only make sense with --detail")
|
||||||
"with --detail")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_list(self):
|
def cmd_list(self):
|
||||||
"""List available streams"""
|
"""List available streams"""
|
||||||
streams = self.client.stream_list(extended=True)
|
streams = self.client.stream_list(extended = True)
|
||||||
|
|
||||||
if self.args.timestamp_raw:
|
if self.args.timestamp_raw:
|
||||||
time_string = nilmdb.utils.time.timestamp_to_string
|
time_string = nilmdb.utils.time.timestamp_to_string
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
from nilmdb.utils.printf import printf
|
from nilmdb.utils.printf import *
|
||||||
import nilmdb
|
import nilmdb
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
|
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("metadata", help="Get or set stream metadata",
|
cmd = sub.add_parser("metadata", help="Get or set stream metadata",
|
||||||
description="""
|
description="""
|
||||||
|
@ -12,7 +11,7 @@ def setup(self, sub):
|
||||||
usage="%(prog)s path [-g [key ...] | "
|
usage="%(prog)s path [-g [key ...] | "
|
||||||
"-s key=value [...] | -u key=value [...]] | "
|
"-s key=value [...] | -u key=value [...]] | "
|
||||||
"-d [key ...]")
|
"-d [key ...]")
|
||||||
cmd.set_defaults(handler=cmd_metadata)
|
cmd.set_defaults(handler = cmd_metadata)
|
||||||
|
|
||||||
group = cmd.add_argument_group("Required arguments")
|
group = cmd.add_argument_group("Required arguments")
|
||||||
group.add_argument("path",
|
group.add_argument("path",
|
||||||
|
@ -37,16 +36,15 @@ def setup(self, sub):
|
||||||
).completer = self.complete.meta_key
|
).completer = self.complete.meta_key
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_metadata(self):
|
def cmd_metadata(self):
|
||||||
"""Manipulate metadata"""
|
"""Manipulate metadata"""
|
||||||
if self.args.set is not None or self.args.update is not None:
|
if self.args.set is not None or self.args.update is not None:
|
||||||
# Either set, or update
|
# Either set, or update
|
||||||
if self.args.set is not None:
|
if self.args.set is not None:
|
||||||
keyvals = self.args.set
|
keyvals = map(nilmdb.utils.unicode.decode, self.args.set)
|
||||||
handler = self.client.stream_set_metadata
|
handler = self.client.stream_set_metadata
|
||||||
else:
|
else:
|
||||||
keyvals = self.args.update
|
keyvals = map(nilmdb.utils.unicode.decode, self.args.update)
|
||||||
handler = self.client.stream_update_metadata
|
handler = self.client.stream_update_metadata
|
||||||
|
|
||||||
# Extract key=value pairs
|
# Extract key=value pairs
|
||||||
|
@ -66,7 +64,7 @@ def cmd_metadata(self):
|
||||||
# Delete (by setting values to empty strings)
|
# Delete (by setting values to empty strings)
|
||||||
keys = None
|
keys = None
|
||||||
if self.args.delete:
|
if self.args.delete:
|
||||||
keys = list(self.args.delete)
|
keys = map(nilmdb.utils.unicode.decode, self.args.delete)
|
||||||
try:
|
try:
|
||||||
data = self.client.stream_get_metadata(self.args.path, keys)
|
data = self.client.stream_get_metadata(self.args.path, keys)
|
||||||
for key in data:
|
for key in data:
|
||||||
|
@ -78,7 +76,7 @@ def cmd_metadata(self):
|
||||||
# Get (or unspecified)
|
# Get (or unspecified)
|
||||||
keys = None
|
keys = None
|
||||||
if self.args.get:
|
if self.args.get:
|
||||||
keys = list(self.args.get)
|
keys = map(nilmdb.utils.unicode.decode, self.args.get)
|
||||||
try:
|
try:
|
||||||
data = self.client.stream_get_metadata(self.args.path, keys)
|
data = self.client.stream_get_metadata(self.args.path, keys)
|
||||||
except nilmdb.client.ClientError as e:
|
except nilmdb.client.ClientError as e:
|
||||||
|
@ -87,4 +85,6 @@ def cmd_metadata(self):
|
||||||
# Print nonexistant keys as having empty value
|
# Print nonexistant keys as having empty value
|
||||||
if value is None:
|
if value is None:
|
||||||
value = ""
|
value = ""
|
||||||
printf("%s=%s\n", key, value)
|
printf("%s=%s\n",
|
||||||
|
nilmdb.utils.unicode.encode(key),
|
||||||
|
nilmdb.utils.unicode.encode(value))
|
||||||
|
|
|
@ -1,18 +1,15 @@
|
||||||
import fnmatch
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
from nilmdb.utils.printf import printf
|
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
|
import fnmatch
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("remove", help="Remove data",
|
cmd = sub.add_parser("remove", help="Remove data",
|
||||||
description="""
|
description="""
|
||||||
Remove all data from a specified time range within a
|
Remove all data from a specified time range within a
|
||||||
stream. If multiple streams or wildcards are
|
stream. If multiple streams or wildcards are provided,
|
||||||
provided, the same time range is removed from all
|
the same time range is removed from all streams.
|
||||||
streams.
|
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(handler=cmd_remove)
|
cmd.set_defaults(handler = cmd_remove)
|
||||||
|
|
||||||
group = cmd.add_argument_group("Data selection")
|
group = cmd.add_argument_group("Data selection")
|
||||||
group.add_argument("path", nargs='+',
|
group.add_argument("path", nargs='+',
|
||||||
|
@ -35,9 +32,8 @@ def setup(self, sub):
|
||||||
help="Output number of data points removed")
|
help="Output number of data points removed")
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_remove(self):
|
def cmd_remove(self):
|
||||||
streams = [s[0] for s in self.client.stream_list()]
|
streams = [ s[0] for s in self.client.stream_list() ]
|
||||||
paths = []
|
paths = []
|
||||||
for path in self.args.path:
|
for path in self.args.path:
|
||||||
new = fnmatch.filter(streams, path)
|
new = fnmatch.filter(streams, path)
|
||||||
|
@ -52,7 +48,7 @@ def cmd_remove(self):
|
||||||
count = self.client.stream_remove(path,
|
count = self.client.stream_remove(path,
|
||||||
self.args.start, self.args.end)
|
self.args.start, self.args.end)
|
||||||
if self.args.count:
|
if self.args.count:
|
||||||
printf("%d\n", count)
|
printf("%d\n", count);
|
||||||
except nilmdb.client.ClientError as e:
|
except nilmdb.client.ClientError as e:
|
||||||
self.die("error removing data: %s", str(e))
|
self.die("error removing data: %s", str(e))
|
||||||
|
|
||||||
|
|
|
@ -1,18 +1,18 @@
|
||||||
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
from nilmdb.utils.printf import *
|
||||||
|
|
||||||
import nilmdb.client
|
import nilmdb.client
|
||||||
|
|
||||||
|
from argparse import ArgumentDefaultsHelpFormatter as def_form
|
||||||
|
|
||||||
def setup(self, sub):
|
def setup(self, sub):
|
||||||
cmd = sub.add_parser("rename", help="Rename a stream",
|
cmd = sub.add_parser("rename", help="Rename a stream",
|
||||||
formatter_class=def_form,
|
formatter_class = def_form,
|
||||||
description="""
|
description="""
|
||||||
Rename a stream.
|
Rename a stream.
|
||||||
|
|
||||||
Only the stream's path is renamed; no
|
Only the stream's path is renamed; no
|
||||||
metadata is changed.
|
metadata is changed.
|
||||||
""")
|
""")
|
||||||
cmd.set_defaults(handler=cmd_rename)
|
cmd.set_defaults(handler = cmd_rename)
|
||||||
group = cmd.add_argument_group("Required arguments")
|
group = cmd.add_argument_group("Required arguments")
|
||||||
group.add_argument("oldpath",
|
group.add_argument("oldpath",
|
||||||
help="Old path, e.g. /foo/old",
|
help="Old path, e.g. /foo/old",
|
||||||
|
@ -23,7 +23,6 @@ def setup(self, sub):
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def cmd_rename(self):
|
def cmd_rename(self):
|
||||||
"""Rename a stream"""
|
"""Rename a stream"""
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
"""nilmdb.fsck"""
|
"""nilmdb.fsck"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
from nilmdb.fsck.fsck import Fsck
|
from nilmdb.fsck.fsck import Fsck
|
||||||
|
|
|
@ -10,7 +10,8 @@ import nilmdb.server
|
||||||
import nilmdb.client.numpyclient
|
import nilmdb.client.numpyclient
|
||||||
from nilmdb.utils.interval import IntervalError
|
from nilmdb.utils.interval import IntervalError
|
||||||
from nilmdb.server.interval import Interval, IntervalSet
|
from nilmdb.server.interval import Interval, IntervalSet
|
||||||
from nilmdb.utils.printf import printf, fprintf, sprintf
|
from nilmdb.utils.printf import *
|
||||||
|
from nilmdb.utils.time import timestamp_to_string
|
||||||
|
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
@ -18,83 +19,70 @@ import os
|
||||||
import sys
|
import sys
|
||||||
import progressbar
|
import progressbar
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
import shutil
|
import shutil
|
||||||
import pickle
|
import cPickle as pickle
|
||||||
import numpy
|
import numpy
|
||||||
|
|
||||||
|
|
||||||
class FsckError(Exception):
|
class FsckError(Exception):
|
||||||
def __init__(self, msg="", *args):
|
def __init__(self, msg = "", *args):
|
||||||
if args:
|
if args:
|
||||||
msg = sprintf(msg, *args)
|
msg = sprintf(msg, *args)
|
||||||
Exception.__init__(self, msg)
|
Exception.__init__(self, msg)
|
||||||
|
|
||||||
|
|
||||||
class FixableFsckError(FsckError):
|
class FixableFsckError(FsckError):
|
||||||
def __init__(self, msg=""):
|
def __init__(self, msg = "", *args):
|
||||||
FsckError.__init__(self, f'{msg}\nThis may be fixable with "--fix".')
|
if args:
|
||||||
|
msg = sprintf(msg, *args)
|
||||||
|
FsckError.__init__(self, "%s\nThis may be fixable with \"--fix\".", msg)
|
||||||
class RetryFsck(FsckError):
|
class RetryFsck(FsckError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class FsckFormatError(FsckError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def log(format, *args):
|
def log(format, *args):
|
||||||
printf(format, *args)
|
printf(format, *args)
|
||||||
|
|
||||||
|
|
||||||
def err(format, *args):
|
def err(format, *args):
|
||||||
fprintf(sys.stderr, format, *args)
|
fprintf(sys.stderr, format, *args)
|
||||||
|
|
||||||
|
|
||||||
# Decorator that retries a function if it returns a specific value
|
# Decorator that retries a function if it returns a specific value
|
||||||
def retry_if_raised(exc, message=None, max_retries=1000):
|
def retry_if_raised(exc, message = None, max_retries = 100):
|
||||||
def f1(func):
|
def f1(func):
|
||||||
def f2(*args, **kwargs):
|
def f2(*args, **kwargs):
|
||||||
for n in range(max_retries):
|
for n in range(max_retries):
|
||||||
try:
|
try:
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
except exc:
|
except exc as e:
|
||||||
if message:
|
if message:
|
||||||
log(f"{message} ({n+1})\n\n")
|
log("%s\n\n", message)
|
||||||
raise Exception("Max number of retries (%d) exceeded; giving up" %
|
raise Exception("Max number of retries (%d) exceeded; giving up")
|
||||||
max_retries)
|
|
||||||
return f2
|
return f2
|
||||||
return f1
|
return f1
|
||||||
|
|
||||||
|
|
||||||
class Progress(object):
|
class Progress(object):
|
||||||
def __init__(self, maxval):
|
def __init__(self, maxval):
|
||||||
if maxval == 0:
|
if maxval == 0:
|
||||||
maxval = 1
|
maxval = 1
|
||||||
self.bar = progressbar.ProgressBar(
|
self.bar = progressbar.ProgressBar(
|
||||||
maxval=maxval,
|
maxval = maxval,
|
||||||
widgets=[progressbar.Percentage(), ' ',
|
widgets = [ progressbar.Percentage(), ' ',
|
||||||
progressbar.Bar(), ' ',
|
progressbar.Bar(), ' ',
|
||||||
progressbar.ETA()])
|
progressbar.ETA() ])
|
||||||
self.bar.term_width = self.bar.term_width or 75
|
if self.bar.term_width == 0:
|
||||||
|
self.bar.term_width = 75
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.bar.start()
|
self.bar.start()
|
||||||
self.last_update = 0
|
self.last_update = 0
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
if exc_type is None:
|
if exc_type is None:
|
||||||
self.bar.finish()
|
self.bar.finish()
|
||||||
else:
|
else:
|
||||||
printf("\n")
|
printf("\n")
|
||||||
|
|
||||||
def update(self, val):
|
def update(self, val):
|
||||||
self.bar.update(val)
|
self.bar.update(val)
|
||||||
|
|
||||||
|
|
||||||
class Fsck(object):
|
class Fsck(object):
|
||||||
def __init__(self, path, fix=False):
|
|
||||||
|
def __init__(self, path, fix = False):
|
||||||
self.basepath = path
|
self.basepath = path
|
||||||
self.sqlpath = os.path.join(path, "data.sql")
|
self.sqlpath = os.path.join(path, "data.sql")
|
||||||
self.bulkpath = os.path.join(path, "data")
|
self.bulkpath = os.path.join(path, "data")
|
||||||
|
@ -104,7 +92,7 @@ class Fsck(object):
|
||||||
### Main checks
|
### Main checks
|
||||||
|
|
||||||
@retry_if_raised(RetryFsck, "Something was fixed: restarting fsck")
|
@retry_if_raised(RetryFsck, "Something was fixed: restarting fsck")
|
||||||
def check(self, skip_data=False):
|
def check(self, skip_data = False):
|
||||||
self.bulk = None
|
self.bulk = None
|
||||||
self.sql = None
|
self.sql = None
|
||||||
try:
|
try:
|
||||||
|
@ -119,9 +107,7 @@ class Fsck(object):
|
||||||
finally:
|
finally:
|
||||||
if self.bulk:
|
if self.bulk:
|
||||||
self.bulk.close()
|
self.bulk.close()
|
||||||
if self.sql: # pragma: no cover
|
if self.sql:
|
||||||
# (coverage doesn't handle finally clauses correctly;
|
|
||||||
# both branches here are tested)
|
|
||||||
self.sql.commit()
|
self.sql.commit()
|
||||||
self.sql.close()
|
self.sql.close()
|
||||||
log("ok\n")
|
log("ok\n")
|
||||||
|
@ -176,7 +162,7 @@ class Fsck(object):
|
||||||
"ORDER BY start_time")
|
"ORDER BY start_time")
|
||||||
for r in result:
|
for r in result:
|
||||||
if r[0] not in self.stream_path:
|
if r[0] not in self.stream_path:
|
||||||
raise FsckError("interval ID %d not in streams", r[0])
|
raise FsckError("interval ID %d not in streams", k)
|
||||||
self.stream_interval[r[0]].append((r[1], r[2], r[3], r[4]))
|
self.stream_interval[r[0]].append((r[1], r[2], r[3], r[4]))
|
||||||
|
|
||||||
log(" loading metadata\n")
|
log(" loading metadata\n")
|
||||||
|
@ -184,17 +170,16 @@ class Fsck(object):
|
||||||
result = cur.execute("SELECT stream_id, key, value FROM metadata")
|
result = cur.execute("SELECT stream_id, key, value FROM metadata")
|
||||||
for r in result:
|
for r in result:
|
||||||
if r[0] not in self.stream_path:
|
if r[0] not in self.stream_path:
|
||||||
raise FsckError("metadata ID %d not in streams", r[0])
|
raise FsckError("metadata ID %d not in streams", k)
|
||||||
if r[1] in self.stream_meta[r[0]]:
|
if r[1] in self.stream_meta[r[0]]:
|
||||||
raise FsckError(
|
raise FsckError("duplicate metadata key '%s' for stream %d",
|
||||||
"duplicate metadata key '%s' for stream %d",
|
|
||||||
r[1], r[0])
|
r[1], r[0])
|
||||||
self.stream_meta[r[0]][r[1]] = r[2]
|
self.stream_meta[r[0]][r[1]] = r[2]
|
||||||
|
|
||||||
### Check streams and basic interval overlap
|
### Check streams and basic interval overlap
|
||||||
|
|
||||||
def check_streams(self):
|
def check_streams(self):
|
||||||
ids = list(self.stream_path.keys())
|
ids = self.stream_path.keys()
|
||||||
log("checking %s streams\n", "{:,d}".format(len(ids)))
|
log("checking %s streams\n", "{:,d}".format(len(ids)))
|
||||||
with Progress(len(ids)) as pbar:
|
with Progress(len(ids)) as pbar:
|
||||||
for i, sid in enumerate(ids):
|
for i, sid in enumerate(ids):
|
||||||
|
@ -202,7 +187,7 @@ class Fsck(object):
|
||||||
path = self.stream_path[sid]
|
path = self.stream_path[sid]
|
||||||
|
|
||||||
# unique path, valid layout
|
# unique path, valid layout
|
||||||
if list(self.stream_path.values()).count(path) != 1:
|
if self.stream_path.values().count(path) != 1:
|
||||||
raise FsckError("duplicated path %s", path)
|
raise FsckError("duplicated path %s", path)
|
||||||
layout = self.stream_layout[sid].split('_')[0]
|
layout = self.stream_layout[sid].split('_')[0]
|
||||||
if layout not in ('int8', 'int16', 'int32', 'int64',
|
if layout not in ('int8', 'int16', 'int32', 'int64',
|
||||||
|
@ -215,7 +200,6 @@ class Fsck(object):
|
||||||
|
|
||||||
# must exist in bulkdata
|
# must exist in bulkdata
|
||||||
bulk = self.bulkpath + path
|
bulk = self.bulkpath + path
|
||||||
bulk = bulk.encode('utf-8')
|
|
||||||
if not os.path.isdir(bulk):
|
if not os.path.isdir(bulk):
|
||||||
raise FsckError("%s: missing bulkdata dir", path)
|
raise FsckError("%s: missing bulkdata dir", path)
|
||||||
if not nilmdb.server.bulkdata.Table.exists(bulk):
|
if not nilmdb.server.bulkdata.Table.exists(bulk):
|
||||||
|
@ -238,97 +222,39 @@ class Fsck(object):
|
||||||
try:
|
try:
|
||||||
posiset += new
|
posiset += new
|
||||||
except IntervalError:
|
except IntervalError:
|
||||||
self.fix_row_overlap(sid, path, posiset, new)
|
raise FsckError("%s: overlap in file offsets:\n"
|
||||||
|
"set: %s\nnew: %s",
|
||||||
|
path, str(posiset), str(new))
|
||||||
|
|
||||||
try:
|
# check bulkdata
|
||||||
# Check bulkdata
|
|
||||||
self.check_bulkdata(sid, path, bulk)
|
self.check_bulkdata(sid, path, bulk)
|
||||||
|
|
||||||
# Check that we can open bulkdata
|
# Check that we can open bulkdata
|
||||||
|
try:
|
||||||
|
tab = None
|
||||||
|
try:
|
||||||
tab = nilmdb.server.bulkdata.Table(bulk)
|
tab = nilmdb.server.bulkdata.Table(bulk)
|
||||||
except FsckFormatError:
|
except Exception as e:
|
||||||
# If there are no files except _format, try deleting
|
|
||||||
# the entire stream; this may remove metadata, but
|
|
||||||
# it's probably unimportant.
|
|
||||||
files = list(os.listdir(bulk))
|
|
||||||
if len(files) > 1:
|
|
||||||
raise FsckFormatError(f"{path}: can't load _format, "
|
|
||||||
f"but data is also present")
|
|
||||||
|
|
||||||
# Since the stream was empty, just remove it
|
|
||||||
self.fix_remove_stream(sid, path, bulk,
|
|
||||||
"empty, with corrupted format file")
|
|
||||||
except FsckError as e:
|
|
||||||
raise e
|
|
||||||
except Exception as e: # pragma: no cover
|
|
||||||
# No coverage because this is an unknown/unexpected error
|
|
||||||
raise FsckError("%s: can't open bulkdata: %s",
|
raise FsckError("%s: can't open bulkdata: %s",
|
||||||
path, str(e))
|
path, str(e))
|
||||||
|
finally:
|
||||||
|
if tab:
|
||||||
tab.close()
|
tab.close()
|
||||||
|
|
||||||
def fix_row_overlap(self, sid, path, existing, new):
|
|
||||||
# If the file rows (spos, epos) overlap in the interval table,
|
|
||||||
# and the overlapping ranges look like this:
|
|
||||||
# A --------- C
|
|
||||||
# B -------- D
|
|
||||||
# Then we can try changing the first interval to go from
|
|
||||||
# A to B instead.
|
|
||||||
msg = (f"{path}: overlap in file offsets:\n"
|
|
||||||
f"existing ranges: {existing}\n"
|
|
||||||
f"overlapping interval: {new}")
|
|
||||||
if not self.fix:
|
|
||||||
raise FixableFsckError(msg)
|
|
||||||
err(f"\n{msg}\nSeeing if we can truncate one of them...\n")
|
|
||||||
|
|
||||||
# See if there'e exactly one interval that overlaps the
|
|
||||||
# conflicting one in the right way
|
|
||||||
match = None
|
|
||||||
for intv in self.stream_interval[sid]:
|
|
||||||
(stime, etime, spos, epos) = intv
|
|
||||||
if spos < new.start and epos > new.start:
|
|
||||||
if match:
|
|
||||||
err(f"no, more than one interval matched:\n"
|
|
||||||
f"{intv}\n{match}\n")
|
|
||||||
raise FsckError(f"{path}: unfixable overlap")
|
|
||||||
match = intv
|
|
||||||
if match is None:
|
|
||||||
err("no intervals overlapped in the right way\n")
|
|
||||||
raise FsckError(f"{path}: unfixable overlap")
|
|
||||||
|
|
||||||
# Truncate the file position
|
|
||||||
err(f"truncating {match}\n")
|
|
||||||
with self.sql:
|
|
||||||
cur = self.sql.cursor()
|
|
||||||
cur.execute("UPDATE ranges SET end_pos=? "
|
|
||||||
"WHERE stream_id=? AND start_time=? AND "
|
|
||||||
"end_time=? AND start_pos=? AND end_pos=?",
|
|
||||||
(new.start, sid, *match))
|
|
||||||
if cur.rowcount != 1: # pragma: no cover (shouldn't fail)
|
|
||||||
raise FsckError("failed to fix SQL database")
|
|
||||||
raise RetryFsck
|
|
||||||
|
|
||||||
### Check that bulkdata is good enough to be opened
|
### Check that bulkdata is good enough to be opened
|
||||||
|
|
||||||
@retry_if_raised(RetryFsck)
|
@retry_if_raised(RetryFsck)
|
||||||
def check_bulkdata(self, sid, path, bulk):
|
def check_bulkdata(self, sid, path, bulk):
|
||||||
try:
|
with open(os.path.join(bulk, "_format"), "rb") as f:
|
||||||
with open(os.path.join(bulk, b"_format"), "rb") as f:
|
|
||||||
fmt = pickle.load(f)
|
fmt = pickle.load(f)
|
||||||
except Exception as e:
|
|
||||||
raise FsckFormatError(f"{path}: can't load _format file ({e})")
|
|
||||||
|
|
||||||
if fmt["version"] != 3:
|
if fmt["version"] != 3:
|
||||||
raise FsckFormatError("%s: bad or unsupported bulkdata version %d",
|
raise FsckError("%s: bad or unsupported bulkdata version %d",
|
||||||
path, fmt["version"])
|
path, fmt["version"])
|
||||||
rows_per_file = int(fmt["rows_per_file"])
|
row_per_file = int(fmt["rows_per_file"])
|
||||||
if rows_per_file < 1:
|
|
||||||
raise FsckFormatError(f"{path}: bad rows_per_file {rows_per_file}")
|
|
||||||
files_per_dir = int(fmt["files_per_dir"])
|
files_per_dir = int(fmt["files_per_dir"])
|
||||||
if files_per_dir < 1:
|
|
||||||
raise FsckFormatError(f"{path}: bad files_per_dir {files_per_dir}")
|
|
||||||
layout = fmt["layout"]
|
layout = fmt["layout"]
|
||||||
if layout != self.stream_layout[sid]:
|
if layout != self.stream_layout[sid]:
|
||||||
raise FsckFormatError("%s: layout mismatch %s != %s", path,
|
raise FsckError("%s: layout mismatch %s != %s", path,
|
||||||
layout, self.stream_layout[sid])
|
layout, self.stream_layout[sid])
|
||||||
|
|
||||||
# Every file should have a size that's the multiple of the row size
|
# Every file should have a size that's the multiple of the row size
|
||||||
|
@ -337,16 +263,16 @@ class Fsck(object):
|
||||||
rkt.close()
|
rkt.close()
|
||||||
|
|
||||||
# Find all directories
|
# Find all directories
|
||||||
regex = re.compile(b"^[0-9a-f]{4,}$")
|
regex = re.compile("^[0-9a-f]{4,}$")
|
||||||
subdirs = sorted(filter(regex.search, os.listdir(bulk)),
|
subdirs = sorted(filter(regex.search, os.listdir(bulk)),
|
||||||
key=lambda x: int(x, 16), reverse=True)
|
key = lambda x: int(x, 16), reverse = True)
|
||||||
for subdir in subdirs:
|
for subdir in subdirs:
|
||||||
# Find all files in that dir
|
# Find all files in that dir
|
||||||
subpath = os.path.join(bulk, subdir)
|
subpath = os.path.join(bulk, subdir)
|
||||||
files = list(filter(regex.search, os.listdir(subpath)))
|
files = filter(regex.search, os.listdir(subpath))
|
||||||
if not files:
|
if not files:
|
||||||
self.fix_empty_subdir(subpath)
|
self.fix_empty_subdir(subpath)
|
||||||
|
raise RetryFsck
|
||||||
# Verify that their size is a multiple of the row size
|
# Verify that their size is a multiple of the row size
|
||||||
for filename in files:
|
for filename in files:
|
||||||
filepath = os.path.join(subpath, filename)
|
filepath = os.path.join(subpath, filename)
|
||||||
|
@ -362,11 +288,10 @@ class Fsck(object):
|
||||||
# as long as it's only ".removed" files.
|
# as long as it's only ".removed" files.
|
||||||
err("\n%s\n", msg)
|
err("\n%s\n", msg)
|
||||||
for fn in os.listdir(subpath):
|
for fn in os.listdir(subpath):
|
||||||
if not fn.endswith(b".removed"):
|
if not fn.endswith(".removed"):
|
||||||
raise FsckError("can't fix automatically: please manually "
|
raise FsckError("can't fix automatically: please manually "
|
||||||
"remove the file '%s' and try again",
|
"remove the file %s and try again",
|
||||||
os.path.join(subpath, fn).decode(
|
os.path.join(subpath, fn))
|
||||||
'utf-8', errors='backslashreplace'))
|
|
||||||
# Remove the whole thing
|
# Remove the whole thing
|
||||||
err("Removing empty subpath\n")
|
err("Removing empty subpath\n")
|
||||||
shutil.rmtree(subpath)
|
shutil.rmtree(subpath)
|
||||||
|
@ -387,40 +312,19 @@ class Fsck(object):
|
||||||
f.truncate(newsize)
|
f.truncate(newsize)
|
||||||
raise RetryFsck
|
raise RetryFsck
|
||||||
|
|
||||||
def fix_remove_stream(self, sid, path, bulk, reason):
|
|
||||||
msg = f"stream {path} is corrupted: {reason}"
|
|
||||||
if not self.fix:
|
|
||||||
raise FixableFsckError(msg)
|
|
||||||
# Remove the stream from disk and the database
|
|
||||||
err(f"\n{msg}\n")
|
|
||||||
err(f"Removing stream {path} from disk and database\n")
|
|
||||||
shutil.rmtree(bulk)
|
|
||||||
with self.sql:
|
|
||||||
cur = self.sql.cursor()
|
|
||||||
cur.execute("DELETE FROM streams WHERE id=?",
|
|
||||||
(sid,))
|
|
||||||
if cur.rowcount != 1: # pragma: no cover (shouldn't fail)
|
|
||||||
raise FsckError("failed to remove stream")
|
|
||||||
cur.execute("DELETE FROM ranges WHERE stream_id=?", (sid,))
|
|
||||||
cur.execute("DELETE FROM metadata WHERE stream_id=?", (sid,))
|
|
||||||
raise RetryFsck
|
|
||||||
|
|
||||||
### Check interval endpoints
|
### Check interval endpoints
|
||||||
|
|
||||||
def check_intervals(self):
|
def check_intervals(self):
|
||||||
total_ints = sum(len(x) for x in list(self.stream_interval.values()))
|
total_ints = sum(len(x) for x in self.stream_interval.values())
|
||||||
log("checking %s intervals\n", "{:,d}".format(total_ints))
|
log("checking %s intervals\n", "{:,d}".format(total_ints))
|
||||||
done = 0
|
done = 0
|
||||||
with Progress(total_ints) as pbar:
|
with Progress(total_ints) as pbar:
|
||||||
for sid in self.stream_interval:
|
for sid in self.stream_interval:
|
||||||
try:
|
try:
|
||||||
bulk = self.bulkpath + self.stream_path[sid]
|
bulk = self.bulkpath + self.stream_path[sid]
|
||||||
bulk = bulk.encode('utf-8')
|
|
||||||
tab = nilmdb.server.bulkdata.Table(bulk)
|
tab = nilmdb.server.bulkdata.Table(bulk)
|
||||||
|
|
||||||
def update(x):
|
def update(x):
|
||||||
pbar.update(done + x)
|
pbar.update(done + x)
|
||||||
|
|
||||||
ints = self.stream_interval[sid]
|
ints = self.stream_interval[sid]
|
||||||
done += self.check_table_intervals(sid, ints, tab, update)
|
done += self.check_table_intervals(sid, ints, tab, update)
|
||||||
finally:
|
finally:
|
||||||
|
@ -429,7 +333,7 @@ class Fsck(object):
|
||||||
def check_table_intervals(self, sid, ints, tab, update):
|
def check_table_intervals(self, sid, ints, tab, update):
|
||||||
# look in the table to make sure we can pick out the interval's
|
# look in the table to make sure we can pick out the interval's
|
||||||
# endpoints
|
# endpoints
|
||||||
path = self.stream_path[sid] # noqa: F841 unused
|
path = self.stream_path[sid]
|
||||||
tab.file_open.cache_remove_all()
|
tab.file_open.cache_remove_all()
|
||||||
for (i, intv) in enumerate(ints):
|
for (i, intv) in enumerate(ints):
|
||||||
update(i)
|
update(i)
|
||||||
|
@ -437,11 +341,11 @@ class Fsck(object):
|
||||||
if spos == epos and spos >= 0 and spos <= tab.nrows:
|
if spos == epos and spos >= 0 and spos <= tab.nrows:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
srow = tab[spos] # noqa: F841 unused
|
srow = tab[spos]
|
||||||
erow = tab[epos-1] # noqa: F841 unused
|
erow = tab[epos-1]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fix_bad_interval(sid, intv, tab, str(e))
|
self.fix_bad_interval(sid, intv, tab, str(e))
|
||||||
|
raise RetryFsck
|
||||||
return len(ints)
|
return len(ints)
|
||||||
|
|
||||||
def fix_bad_interval(self, sid, intv, tab, msg):
|
def fix_bad_interval(self, sid, intv, tab, msg):
|
||||||
|
@ -470,23 +374,23 @@ class Fsck(object):
|
||||||
"end_time=? AND start_pos=? AND end_pos=?",
|
"end_time=? AND start_pos=? AND end_pos=?",
|
||||||
(new_etime, new_epos, sid, stime, etime,
|
(new_etime, new_epos, sid, stime, etime,
|
||||||
spos, epos))
|
spos, epos))
|
||||||
if cur.rowcount != 1: # pragma: no cover (shouldn't fail)
|
if cur.rowcount != 1:
|
||||||
raise FsckError("failed to fix SQL database")
|
raise FsckError("failed to fix SQL database")
|
||||||
raise RetryFsck
|
raise RetryFsck
|
||||||
err("actually it can't be truncated; times are bad too\n")
|
err("actually it can't be truncated; times are bad too")
|
||||||
|
|
||||||
# Otherwise, the only hope is to delete the interval entirely.
|
# Otherwise, the only hope is to delete the interval entirely.
|
||||||
err("*** Deleting the entire interval from SQL.\n")
|
err("*** Deleting the entire interval from SQL.\n")
|
||||||
err("This may leave stale data on disk. To fix that, copy all "
|
err("This may leave stale data on disk. To fix that, copy all\n")
|
||||||
"data from this stream to a new stream using nilm-copy, then\n")
|
err("data from this stream to a new stream, then remove all data\n")
|
||||||
err("remove all data from and destroy %s.\n", path)
|
err("from and destroy %s.\n", path)
|
||||||
with self.sql:
|
with self.sql:
|
||||||
cur = self.sql.cursor()
|
cur = self.sql.cursor()
|
||||||
cur.execute("DELETE FROM ranges WHERE "
|
cur.execute("DELETE FROM ranges WHERE "
|
||||||
"stream_id=? AND start_time=? AND "
|
"stream_id=? AND start_time=? AND "
|
||||||
"end_time=? AND start_pos=? AND end_pos=?",
|
"end_time=? AND start_pos=? AND end_pos=?",
|
||||||
(sid, stime, etime, spos, epos))
|
(sid, stime, etime, spos, epos))
|
||||||
if cur.rowcount != 1: # pragma: no cover (shouldn't fail)
|
if cur.rowcount != 1:
|
||||||
raise FsckError("failed to remove interval")
|
raise FsckError("failed to remove interval")
|
||||||
raise RetryFsck
|
raise RetryFsck
|
||||||
|
|
||||||
|
@ -494,19 +398,16 @@ class Fsck(object):
|
||||||
|
|
||||||
def check_data(self):
|
def check_data(self):
|
||||||
total_rows = sum(sum((y[3] - y[2]) for y in x)
|
total_rows = sum(sum((y[3] - y[2]) for y in x)
|
||||||
for x in list(self.stream_interval.values()))
|
for x in self.stream_interval.values())
|
||||||
log("checking %s rows of data\n", "{:,d}".format(total_rows))
|
log("checking %s rows of data\n", "{:,d}".format(total_rows))
|
||||||
done = 0
|
done = 0
|
||||||
with Progress(total_rows) as pbar:
|
with Progress(total_rows) as pbar:
|
||||||
for sid in self.stream_interval:
|
for sid in self.stream_interval:
|
||||||
try:
|
try:
|
||||||
bulk = self.bulkpath + self.stream_path[sid]
|
bulk = self.bulkpath + self.stream_path[sid]
|
||||||
bulk = bulk.encode('utf-8')
|
|
||||||
tab = nilmdb.server.bulkdata.Table(bulk)
|
tab = nilmdb.server.bulkdata.Table(bulk)
|
||||||
|
|
||||||
def update(x):
|
def update(x):
|
||||||
pbar.update(done + x)
|
pbar.update(done + x)
|
||||||
|
|
||||||
ints = self.stream_interval[sid]
|
ints = self.stream_interval[sid]
|
||||||
done += self.check_table_data(sid, ints, tab, update)
|
done += self.check_table_data(sid, ints, tab, update)
|
||||||
finally:
|
finally:
|
||||||
|
@ -515,7 +416,7 @@ class Fsck(object):
|
||||||
def check_table_data(self, sid, ints, tab, update):
|
def check_table_data(self, sid, ints, tab, update):
|
||||||
# Pull out all of the interval's data and verify that it's
|
# Pull out all of the interval's data and verify that it's
|
||||||
# monotonic.
|
# monotonic.
|
||||||
maxrows = getattr(self, 'maxrows_override', 100000)
|
maxrows = 100000
|
||||||
path = self.stream_path[sid]
|
path = self.stream_path[sid]
|
||||||
layout = self.stream_layout[sid]
|
layout = self.stream_layout[sid]
|
||||||
dtype = nilmdb.client.numpyclient.layout_to_dtype(layout)
|
dtype = nilmdb.client.numpyclient.layout_to_dtype(layout)
|
||||||
|
@ -535,76 +436,29 @@ class Fsck(object):
|
||||||
|
|
||||||
# Get raw data, convert to NumPy arary
|
# Get raw data, convert to NumPy arary
|
||||||
try:
|
try:
|
||||||
raw = tab.get_data(start, stop, binary=True)
|
raw = tab.get_data(start, stop, binary = True)
|
||||||
data = numpy.frombuffer(raw, dtype)
|
data = numpy.fromstring(raw, dtype)
|
||||||
except Exception as e: # pragma: no cover
|
except Exception as e:
|
||||||
# No coverage because it's hard to trigger this -- earlier
|
raise FsckError("%s: failed to grab rows %d through %d: %s",
|
||||||
# checks check the ranges, so this would probably be a real
|
|
||||||
# disk error, malloc failure, etc.
|
|
||||||
raise FsckError(
|
|
||||||
"%s: failed to grab rows %d through %d: %s",
|
|
||||||
path, start, stop, repr(e))
|
path, start, stop, repr(e))
|
||||||
|
|
||||||
ts = data['timestamp']
|
|
||||||
|
|
||||||
# Verify that all timestamps are in range.
|
|
||||||
match = (ts < stime) | (ts >= etime)
|
|
||||||
if match.any():
|
|
||||||
row = numpy.argmax(match)
|
|
||||||
if ts[row] != 0:
|
|
||||||
raise FsckError("%s: data timestamp %d at row %d "
|
|
||||||
"outside interval range [%d,%d)",
|
|
||||||
path, ts[row], row + start,
|
|
||||||
stime, etime)
|
|
||||||
|
|
||||||
# Timestamp is zero and out of the expected range;
|
|
||||||
# assume file ends with zeroed data and just truncate it.
|
|
||||||
self.fix_table_by_truncating(
|
|
||||||
path, tab, row + start,
|
|
||||||
"data timestamp is out of range, and zero")
|
|
||||||
|
|
||||||
# Verify that timestamps are monotonic
|
# Verify that timestamps are monotonic
|
||||||
match = numpy.diff(ts) <= 0
|
if (numpy.diff(data['timestamp']) <= 0).any():
|
||||||
if match.any():
|
raise FsckError("%s: non-monotonic timestamp(s) in rows "
|
||||||
row = numpy.argmax(match)
|
"%d through %d", path, start, stop)
|
||||||
if ts[row+1] != 0:
|
first_ts = data['timestamp'][0]
|
||||||
raise FsckError(
|
|
||||||
"%s: non-monotonic timestamp (%d -> %d) "
|
|
||||||
"at row %d", path, ts[row], ts[row+1],
|
|
||||||
row + start)
|
|
||||||
|
|
||||||
# Timestamp is zero and non-monotonic;
|
|
||||||
# assume file ends with zeroed data and just truncate it.
|
|
||||||
self.fix_table_by_truncating(
|
|
||||||
path, tab, row + start + 1,
|
|
||||||
"data timestamp is non-monotonic, and zero")
|
|
||||||
|
|
||||||
first_ts = ts[0]
|
|
||||||
if last_ts is not None and first_ts <= last_ts:
|
if last_ts is not None and first_ts <= last_ts:
|
||||||
raise FsckError("%s: first interval timestamp %d is not "
|
raise FsckError("%s: first interval timestamp %d is not "
|
||||||
"greater than the previous last interval "
|
"greater than the previous last interval "
|
||||||
"timestamp %d, at row %d",
|
"timestamp %d, at row %d",
|
||||||
path, first_ts, last_ts, start)
|
path, first_ts, last_ts, start)
|
||||||
last_ts = ts[-1]
|
last_ts = data['timestamp'][-1]
|
||||||
|
|
||||||
# The previous errors are fixable, by removing the
|
# These are probably fixable, by removing the offending
|
||||||
# offending intervals, or changing the data
|
# intervals. But I'm not going to bother implementing
|
||||||
# timestamps. But these are probably unlikely errors,
|
# that yet.
|
||||||
# so it's not worth implementing that yet.
|
|
||||||
|
|
||||||
# Done
|
# Done
|
||||||
done += count
|
done += count
|
||||||
update(done)
|
update(done)
|
||||||
return done
|
return done
|
||||||
|
|
||||||
def fix_table_by_truncating(self, path, tab, row, reason):
|
|
||||||
# Simple fix for bad data: truncate the table at the given row.
|
|
||||||
# On retry, fix_bad_interval will correct the database and timestamps
|
|
||||||
# to account for this truncation.
|
|
||||||
msg = f"{path}: bad data in table, starting at row {row}: {reason}"
|
|
||||||
if not self.fix:
|
|
||||||
raise FixableFsckError(msg)
|
|
||||||
err(f"\n{msg}\nWill try truncating table\n")
|
|
||||||
(subdir, fname, offs, count) = tab._offset_from_row(row)
|
|
||||||
tab._remove_or_truncate_file(subdir, fname, offs)
|
|
||||||
raise RetryFsck
|
|
||||||
|
|
|
@ -1,27 +1,26 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/python
|
||||||
|
|
||||||
import nilmdb.fsck
|
import nilmdb.fsck
|
||||||
import argparse
|
import argparse
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Main entry point for the 'nilmdb-fsck' command line script"""
|
"""Main entry point for the 'nilmdb-fsck' command line script"""
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='Check database consistency',
|
description = 'Check database consistency',
|
||||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
|
||||||
parser.add_argument("-v", "--version", action="version",
|
version = nilmdb.__version__)
|
||||||
version=nilmdb.__version__)
|
|
||||||
parser.add_argument("-f", "--fix", action="store_true",
|
parser.add_argument("-f", "--fix", action="store_true",
|
||||||
default=False, help='Fix errors when possible '
|
default=False, help = 'Fix errors when possible '
|
||||||
'(which may involve removing data)')
|
'(which may involve removing data)')
|
||||||
parser.add_argument("-n", "--no-data", action="store_true",
|
parser.add_argument("-n", "--no-data", action="store_true",
|
||||||
default=False, help='Skip the slow full-data check')
|
default=False, help = 'Skip the slow full-data check')
|
||||||
parser.add_argument('database', help='Database directory')
|
parser.add_argument('database', help = 'Database directory')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
nilmdb.fsck.Fsck(args.database, args.fix).check(skip_data=args.no_data)
|
nilmdb.fsck.Fsck(args.database, args.fix).check(skip_data = args.no_data)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1,43 +1,36 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/python
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import socket
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
import cherrypy
|
|
||||||
|
|
||||||
import nilmdb.server
|
import nilmdb.server
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Main entry point for the 'nilmdb-server' command line script"""
|
"""Main entry point for the 'nilmdb-server' command line script"""
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='Run the NilmDB server',
|
description = 'Run the NilmDB server',
|
||||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
|
||||||
|
version = nilmdb.__version__)
|
||||||
parser.add_argument("-v", "--version", action="version",
|
|
||||||
version=nilmdb.__version__)
|
|
||||||
|
|
||||||
group = parser.add_argument_group("Standard options")
|
group = parser.add_argument_group("Standard options")
|
||||||
group.add_argument('-a', '--address',
|
group.add_argument('-a', '--address',
|
||||||
help='Only listen on the given address',
|
help = 'Only listen on the given address',
|
||||||
default='0.0.0.0')
|
default = '0.0.0.0')
|
||||||
group.add_argument('-p', '--port', help='Listen on the given port',
|
group.add_argument('-p', '--port', help = 'Listen on the given port',
|
||||||
type=int, default=12380)
|
type = int, default = 12380)
|
||||||
group.add_argument('-d', '--database', help='Database directory',
|
group.add_argument('-d', '--database', help = 'Database directory',
|
||||||
default="./db")
|
default = "./db")
|
||||||
group.add_argument('-q', '--quiet', help='Silence output',
|
group.add_argument('-q', '--quiet', help = 'Silence output',
|
||||||
action='store_true')
|
action = 'store_true')
|
||||||
group.add_argument('-t', '--traceback',
|
group.add_argument('-t', '--traceback',
|
||||||
help='Provide tracebacks in client errors',
|
help = 'Provide tracebacks in client errors',
|
||||||
action='store_true', default=False)
|
action = 'store_true', default = False)
|
||||||
|
|
||||||
group = parser.add_argument_group("Debug options")
|
group = parser.add_argument_group("Debug options")
|
||||||
group.add_argument('-y', '--yappi', help='Run under yappi profiler and '
|
group.add_argument('-y', '--yappi', help = 'Run under yappi profiler and '
|
||||||
'invoke interactive shell afterwards',
|
'invoke interactive shell afterwards',
|
||||||
action='store_true')
|
action = 'store_true')
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
@ -46,54 +39,47 @@ def main():
|
||||||
db = nilmdb.utils.serializer_proxy(nilmdb.server.NilmDB)(args.database)
|
db = nilmdb.utils.serializer_proxy(nilmdb.server.NilmDB)(args.database)
|
||||||
|
|
||||||
# Configure the server
|
# Configure the server
|
||||||
if not args.quiet:
|
if args.quiet:
|
||||||
cherrypy._cpconfig.environments['embedded']['log.screen'] = True
|
embedded = True
|
||||||
|
else:
|
||||||
|
embedded = False
|
||||||
server = nilmdb.server.Server(db,
|
server = nilmdb.server.Server(db,
|
||||||
host=args.address,
|
host = args.address,
|
||||||
port=args.port,
|
port = args.port,
|
||||||
force_traceback=args.traceback)
|
embedded = embedded,
|
||||||
|
force_traceback = args.traceback)
|
||||||
|
|
||||||
# Print info
|
# Print info
|
||||||
if not args.quiet:
|
if not args.quiet:
|
||||||
print("Version: %s" % nilmdb.__version__)
|
print "Version: %s" % nilmdb.__version__
|
||||||
print("Database: %s" % (os.path.realpath(args.database)))
|
print "Database: %s" % (os.path.realpath(args.database))
|
||||||
if args.address == '0.0.0.0' or args.address == '::':
|
if args.address == '0.0.0.0' or args.address == '::':
|
||||||
host = socket.getfqdn()
|
host = socket.getfqdn()
|
||||||
else:
|
else:
|
||||||
host = args.address
|
host = args.address
|
||||||
print("Server URL: http://%s:%d/" % (host, args.port))
|
print "Server URL: http://%s:%d/" % ( host, args.port)
|
||||||
print("----")
|
print "----"
|
||||||
|
|
||||||
# Run it
|
# Run it
|
||||||
try:
|
|
||||||
if args.yappi:
|
if args.yappi:
|
||||||
print("Running in yappi")
|
print "Running in yappi"
|
||||||
try:
|
try:
|
||||||
import yappi
|
import yappi
|
||||||
yappi.start()
|
yappi.start()
|
||||||
server.start(blocking=True)
|
server.start(blocking = True)
|
||||||
finally:
|
finally:
|
||||||
yappi.stop()
|
yappi.stop()
|
||||||
stats = yappi.get_func_stats()
|
yappi.print_stats(sort_type = yappi.SORTTYPE_TTOT, limit = 50)
|
||||||
stats.sort("ttot")
|
|
||||||
stats.print_all()
|
|
||||||
try:
|
|
||||||
from IPython import embed
|
from IPython import embed
|
||||||
embed(header="Use the `yappi` or `stats` object to "
|
embed(header = "Use the yappi object to explore further, "
|
||||||
"explore further, `quit` to exit")
|
"quit to exit")
|
||||||
except ModuleNotFoundError:
|
|
||||||
print("\nInstall ipython to explore further")
|
|
||||||
else:
|
else:
|
||||||
server.start(blocking=True)
|
server.start(blocking = True)
|
||||||
except nilmdb.server.serverutil.CherryPyExit:
|
|
||||||
print("Exiting due to CherryPy error", file=sys.stderr)
|
|
||||||
raise
|
|
||||||
finally:
|
|
||||||
if not args.quiet:
|
|
||||||
print("Closing database")
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
if not args.quiet:
|
||||||
|
print "Closing database"
|
||||||
|
db.close()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1,12 +1,10 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/python
|
||||||
|
|
||||||
import nilmdb.cmdline
|
import nilmdb.cmdline
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Main entry point for the 'nilmtool' command line script"""
|
"""Main entry point for the 'nilmtool' command line script"""
|
||||||
nilmdb.cmdline.Cmdline().run()
|
nilmdb.cmdline.Cmdline().run()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1,8 +1,20 @@
|
||||||
"""nilmdb.server"""
|
"""nilmdb.server"""
|
||||||
|
|
||||||
# Set up pyximport to automatically rebuild Cython modules if needed.
|
from __future__ import absolute_import
|
||||||
import pyximport
|
|
||||||
pyximport.install(inplace=True, build_in_temp=False)
|
# Try to set up pyximport to automatically rebuild Cython modules. If
|
||||||
|
# this doesn't work, it's OK, as long as the modules were built externally.
|
||||||
|
# (e.g. python setup.py build_ext --inplace)
|
||||||
|
try: # pragma: no cover
|
||||||
|
import Cython
|
||||||
|
import distutils.version
|
||||||
|
if (distutils.version.LooseVersion(Cython.__version__) <
|
||||||
|
distutils.version.LooseVersion("0.17")): # pragma: no cover
|
||||||
|
raise ImportError("Cython version too old")
|
||||||
|
import pyximport
|
||||||
|
pyximport.install(inplace = True, build_in_temp = False)
|
||||||
|
except (ImportError, TypeError): # pragma: no cover
|
||||||
|
pass
|
||||||
|
|
||||||
from nilmdb.server.nilmdb import NilmDB
|
from nilmdb.server.nilmdb import NilmDB
|
||||||
from nilmdb.server.server import Server, wsgi_application
|
from nilmdb.server.server import Server, wsgi_application
|
||||||
|
|
|
@ -1,15 +1,19 @@
|
||||||
# Fixed record size bulk data storage
|
# Fixed record size bulk data storage
|
||||||
|
|
||||||
|
# Need absolute_import so that "import nilmdb" won't pull in
|
||||||
|
# nilmdb.py, but will pull the parent nilmdb module instead.
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from nilmdb.utils.printf import *
|
||||||
|
from nilmdb.utils.time import timestamp_to_string as timestamp_to_string
|
||||||
|
import nilmdb.utils
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import cPickle as pickle
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import pickle
|
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from nilmdb.utils.printf import sprintf
|
|
||||||
from nilmdb.utils.time import timestamp_to_string
|
|
||||||
import nilmdb.utils
|
|
||||||
|
|
||||||
import nilmdb.utils.lock
|
import nilmdb.utils.lock
|
||||||
from . import rocket
|
from . import rocket
|
||||||
|
|
||||||
|
@ -18,32 +22,28 @@ from . import rocket
|
||||||
table_cache_size = 32
|
table_cache_size = 32
|
||||||
fd_cache_size = 8
|
fd_cache_size = 8
|
||||||
|
|
||||||
|
@nilmdb.utils.must_close(wrap_verify = False)
|
||||||
@nilmdb.utils.must_close(wrap_verify=False)
|
class BulkData(object):
|
||||||
class BulkData():
|
|
||||||
def __init__(self, basepath, **kwargs):
|
def __init__(self, basepath, **kwargs):
|
||||||
if isinstance(basepath, str):
|
|
||||||
self.basepath = self._encode_filename(basepath)
|
|
||||||
else:
|
|
||||||
self.basepath = basepath
|
self.basepath = basepath
|
||||||
self.root = os.path.join(self.basepath, b"data")
|
self.root = os.path.join(self.basepath, "data")
|
||||||
self.lock = self.root + b".lock"
|
self.lock = self.root + ".lock"
|
||||||
self.lockfile = None
|
self.lockfile = None
|
||||||
|
|
||||||
# Tuneables
|
# Tuneables
|
||||||
if "file_size" in kwargs and kwargs["file_size"] is not None:
|
if "file_size" in kwargs:
|
||||||
self.file_size = kwargs["file_size"]
|
self.file_size = kwargs["file_size"]
|
||||||
else:
|
else:
|
||||||
# Default to approximately 128 MiB per file
|
# Default to approximately 128 MiB per file
|
||||||
self.file_size = 128 * 1024 * 1024
|
self.file_size = 128 * 1024 * 1024
|
||||||
|
|
||||||
if "files_per_dir" in kwargs and kwargs["files_per_dir"] is not None:
|
if "files_per_dir" in kwargs:
|
||||||
self.files_per_dir = kwargs["files_per_dir"]
|
self.files_per_dir = kwargs["files_per_dir"]
|
||||||
else:
|
else:
|
||||||
# 32768 files per dir should work even on FAT32
|
# 32768 files per dir should work even on FAT32
|
||||||
self.files_per_dir = 32768
|
self.files_per_dir = 32768
|
||||||
|
|
||||||
if "initial_nrows" in kwargs and kwargs["initial_nrows"] is not None:
|
if "initial_nrows" in kwargs:
|
||||||
self.initial_nrows = kwargs["initial_nrows"]
|
self.initial_nrows = kwargs["initial_nrows"]
|
||||||
else:
|
else:
|
||||||
# First row is 0
|
# First row is 0
|
||||||
|
@ -56,8 +56,7 @@ class BulkData():
|
||||||
# Create the lock
|
# Create the lock
|
||||||
self.lockfile = open(self.lock, "w")
|
self.lockfile = open(self.lock, "w")
|
||||||
if not nilmdb.utils.lock.exclusive_lock(self.lockfile):
|
if not nilmdb.utils.lock.exclusive_lock(self.lockfile):
|
||||||
raise IOError('database at "' +
|
raise IOError('database at "' + self.basepath +
|
||||||
self._decode_filename(self.basepath) +
|
|
||||||
'" is already locked by another process')
|
'" is already locked by another process')
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
|
@ -67,21 +66,21 @@ class BulkData():
|
||||||
self.lockfile.close()
|
self.lockfile.close()
|
||||||
try:
|
try:
|
||||||
os.unlink(self.lock)
|
os.unlink(self.lock)
|
||||||
except OSError:
|
except OSError: # pragma: no cover
|
||||||
pass
|
pass
|
||||||
self.lockfile = None
|
self.lockfile = None
|
||||||
|
|
||||||
def _encode_filename(self, path):
|
def _encode_filename(self, path):
|
||||||
# Translate unicode strings to raw bytes, if needed. We
|
# Encode all paths to UTF-8, regardless of sys.getfilesystemencoding(),
|
||||||
# always manipulate paths internally as bytes.
|
# because we want to be able to represent all code points and the user
|
||||||
|
# will never be directly exposed to filenames. We can then do path
|
||||||
|
# manipulations on the UTF-8 directly.
|
||||||
|
if isinstance(path, unicode):
|
||||||
return path.encode('utf-8')
|
return path.encode('utf-8')
|
||||||
|
return path
|
||||||
def _decode_filename(self, path):
|
|
||||||
# Translate raw bytes to unicode strings, escaping if needed
|
|
||||||
return path.decode('utf-8', errors='backslashreplace')
|
|
||||||
|
|
||||||
def _create_check_ospath(self, ospath):
|
def _create_check_ospath(self, ospath):
|
||||||
if ospath[-1:] == b'/':
|
if ospath[-1] == '/':
|
||||||
raise ValueError("invalid path; should not end with a /")
|
raise ValueError("invalid path; should not end with a /")
|
||||||
if Table.exists(ospath):
|
if Table.exists(ospath):
|
||||||
raise ValueError("stream already exists at this path")
|
raise ValueError("stream already exists at this path")
|
||||||
|
@ -89,7 +88,7 @@ class BulkData():
|
||||||
# Look for any files in subdirectories. Fully empty subdirectories
|
# Look for any files in subdirectories. Fully empty subdirectories
|
||||||
# are OK; they might be there during a rename
|
# are OK; they might be there during a rename
|
||||||
for (root, dirs, files) in os.walk(ospath):
|
for (root, dirs, files) in os.walk(ospath):
|
||||||
if files:
|
if len(files):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"non-empty subdirs of this path already exist")
|
"non-empty subdirs of this path already exist")
|
||||||
|
|
||||||
|
@ -98,13 +97,13 @@ class BulkData():
|
||||||
don't exist. Returns a list of elements that got created."""
|
don't exist. Returns a list of elements that got created."""
|
||||||
path = self._encode_filename(unicodepath)
|
path = self._encode_filename(unicodepath)
|
||||||
|
|
||||||
if path[0:1] != b'/':
|
if path[0] != '/':
|
||||||
raise ValueError("paths must start with / ")
|
raise ValueError("paths must start with /")
|
||||||
[group, node] = path.rsplit(b"/", 1)
|
[ group, node ] = path.rsplit("/", 1)
|
||||||
if group == b'':
|
if group == '':
|
||||||
raise ValueError("invalid path; path must contain at least one "
|
raise ValueError("invalid path; path must contain at least one "
|
||||||
"folder")
|
"folder")
|
||||||
if node == b'':
|
if node == '':
|
||||||
raise ValueError("invalid path; should not end with a /")
|
raise ValueError("invalid path; should not end with a /")
|
||||||
if not Table.valid_path(path):
|
if not Table.valid_path(path):
|
||||||
raise ValueError("path name is invalid or contains reserved words")
|
raise ValueError("path name is invalid or contains reserved words")
|
||||||
|
@ -115,7 +114,7 @@ class BulkData():
|
||||||
# os.path.join)
|
# os.path.join)
|
||||||
|
|
||||||
# Make directories leading up to this one
|
# Make directories leading up to this one
|
||||||
elements = path.lstrip(b'/').split(b'/')
|
elements = path.lstrip('/').split('/')
|
||||||
made_dirs = []
|
made_dirs = []
|
||||||
try:
|
try:
|
||||||
# Make parent elements
|
# Make parent elements
|
||||||
|
@ -126,11 +125,15 @@ class BulkData():
|
||||||
if not os.path.isdir(ospath):
|
if not os.path.isdir(ospath):
|
||||||
os.mkdir(ospath)
|
os.mkdir(ospath)
|
||||||
made_dirs.append(ospath)
|
made_dirs.append(ospath)
|
||||||
except Exception:
|
except Exception as e:
|
||||||
# Remove paths that we created
|
# Try to remove paths that we created; ignore errors
|
||||||
for ospath in reversed(made_dirs):
|
exc_info = sys.exc_info()
|
||||||
|
for ospath in reversed(made_dirs): # pragma: no cover (hard to hit)
|
||||||
|
try:
|
||||||
os.rmdir(ospath)
|
os.rmdir(ospath)
|
||||||
raise
|
except OSError:
|
||||||
|
pass
|
||||||
|
raise exc_info[1], None, exc_info[2]
|
||||||
|
|
||||||
return elements
|
return elements
|
||||||
|
|
||||||
|
@ -165,7 +168,7 @@ class BulkData():
|
||||||
os.rmdir(ospath)
|
os.rmdir(ospath)
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
raise exc_info[1].with_traceback(exc_info[2])
|
raise exc_info[1], None, exc_info[2]
|
||||||
|
|
||||||
# Success
|
# Success
|
||||||
return
|
return
|
||||||
|
@ -173,8 +176,8 @@ class BulkData():
|
||||||
def _remove_leaves(self, unicodepath):
|
def _remove_leaves(self, unicodepath):
|
||||||
"""Remove empty directories starting at the leaves of unicodepath"""
|
"""Remove empty directories starting at the leaves of unicodepath"""
|
||||||
path = self._encode_filename(unicodepath)
|
path = self._encode_filename(unicodepath)
|
||||||
elements = path.lstrip(b'/').split(b'/')
|
elements = path.lstrip('/').split('/')
|
||||||
for i in reversed(list(range(len(elements)))):
|
for i in reversed(range(len(elements))):
|
||||||
ospath = os.path.join(self.root, *elements[0:i+1])
|
ospath = os.path.join(self.root, *elements[0:i+1])
|
||||||
try:
|
try:
|
||||||
os.rmdir(ospath)
|
os.rmdir(ospath)
|
||||||
|
@ -188,9 +191,9 @@ class BulkData():
|
||||||
newpath = self._encode_filename(newunicodepath)
|
newpath = self._encode_filename(newunicodepath)
|
||||||
|
|
||||||
# Get OS paths
|
# Get OS paths
|
||||||
oldelements = oldpath.lstrip(b'/').split(b'/')
|
oldelements = oldpath.lstrip('/').split('/')
|
||||||
oldospath = os.path.join(self.root, *oldelements)
|
oldospath = os.path.join(self.root, *oldelements)
|
||||||
newelements = newpath.lstrip(b'/').split(b'/')
|
newelements = newpath.lstrip('/').split('/')
|
||||||
newospath = os.path.join(self.root, *newelements)
|
newospath = os.path.join(self.root, *newelements)
|
||||||
|
|
||||||
# Basic checks
|
# Basic checks
|
||||||
|
@ -201,8 +204,8 @@ class BulkData():
|
||||||
self.getnode.cache_remove(self, oldunicodepath)
|
self.getnode.cache_remove(self, oldunicodepath)
|
||||||
|
|
||||||
# Move the table to a temporary location
|
# Move the table to a temporary location
|
||||||
tmpdir = tempfile.mkdtemp(prefix=b"rename-", dir=self.root)
|
tmpdir = tempfile.mkdtemp(prefix = "rename-", dir = self.root)
|
||||||
tmppath = os.path.join(tmpdir, b"table")
|
tmppath = os.path.join(tmpdir, "table")
|
||||||
os.rename(oldospath, tmppath)
|
os.rename(oldospath, tmppath)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -230,7 +233,7 @@ class BulkData():
|
||||||
path = self._encode_filename(unicodepath)
|
path = self._encode_filename(unicodepath)
|
||||||
|
|
||||||
# Get OS path
|
# Get OS path
|
||||||
elements = path.lstrip(b'/').split(b'/')
|
elements = path.lstrip('/').split('/')
|
||||||
ospath = os.path.join(self.root, *elements)
|
ospath = os.path.join(self.root, *elements)
|
||||||
|
|
||||||
# Remove Table object from cache
|
# Remove Table object from cache
|
||||||
|
@ -239,7 +242,7 @@ class BulkData():
|
||||||
# Remove the contents of the target directory
|
# Remove the contents of the target directory
|
||||||
if not Table.exists(ospath):
|
if not Table.exists(ospath):
|
||||||
raise ValueError("nothing at that path")
|
raise ValueError("nothing at that path")
|
||||||
for (root, dirs, files) in os.walk(ospath, topdown=False):
|
for (root, dirs, files) in os.walk(ospath, topdown = False):
|
||||||
for name in files:
|
for name in files:
|
||||||
os.remove(os.path.join(root, name))
|
os.remove(os.path.join(root, name))
|
||||||
for name in dirs:
|
for name in dirs:
|
||||||
|
@ -249,19 +252,18 @@ class BulkData():
|
||||||
self._remove_leaves(unicodepath)
|
self._remove_leaves(unicodepath)
|
||||||
|
|
||||||
# Cache open tables
|
# Cache open tables
|
||||||
@nilmdb.utils.lru_cache(size=table_cache_size,
|
@nilmdb.utils.lru_cache(size = table_cache_size,
|
||||||
onremove=lambda x: x.close())
|
onremove = lambda x: x.close())
|
||||||
def getnode(self, unicodepath):
|
def getnode(self, unicodepath):
|
||||||
"""Return a Table object corresponding to the given database
|
"""Return a Table object corresponding to the given database
|
||||||
path, which must exist."""
|
path, which must exist."""
|
||||||
path = self._encode_filename(unicodepath)
|
path = self._encode_filename(unicodepath)
|
||||||
elements = path.lstrip(b'/').split(b'/')
|
elements = path.lstrip('/').split('/')
|
||||||
ospath = os.path.join(self.root, *elements)
|
ospath = os.path.join(self.root, *elements)
|
||||||
return Table(ospath, self.initial_nrows)
|
return Table(ospath, self.initial_nrows)
|
||||||
|
|
||||||
|
@nilmdb.utils.must_close(wrap_verify = False)
|
||||||
@nilmdb.utils.must_close(wrap_verify=False)
|
class Table(object):
|
||||||
class Table():
|
|
||||||
"""Tools to help access a single table (data at a specific OS path)."""
|
"""Tools to help access a single table (data at a specific OS path)."""
|
||||||
# See design.md for design details
|
# See design.md for design details
|
||||||
|
|
||||||
|
@ -269,12 +271,12 @@ class Table():
|
||||||
@classmethod
|
@classmethod
|
||||||
def valid_path(cls, root):
|
def valid_path(cls, root):
|
||||||
"""Return True if a root path is a valid name"""
|
"""Return True if a root path is a valid name"""
|
||||||
return b"_format" not in root.split(b"/")
|
return "_format" not in root.split("/")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def exists(cls, root):
|
def exists(cls, root):
|
||||||
"""Return True if a table appears to exist at this OS path"""
|
"""Return True if a table appears to exist at this OS path"""
|
||||||
return os.path.isfile(os.path.join(root, b"_format"))
|
return os.path.isfile(os.path.join(root, "_format"))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create(cls, root, layout, file_size, files_per_dir):
|
def create(cls, root, layout, file_size, files_per_dir):
|
||||||
|
@ -287,26 +289,24 @@ class Table():
|
||||||
rows_per_file = max(file_size // rkt.binary_size, 1)
|
rows_per_file = max(file_size // rkt.binary_size, 1)
|
||||||
rkt.close()
|
rkt.close()
|
||||||
|
|
||||||
fmt = {
|
fmt = { "rows_per_file": rows_per_file,
|
||||||
"rows_per_file": rows_per_file,
|
|
||||||
"files_per_dir": files_per_dir,
|
"files_per_dir": files_per_dir,
|
||||||
"layout": layout,
|
"layout": layout,
|
||||||
"version": 3
|
"version": 3 }
|
||||||
}
|
with open(os.path.join(root, "_format"), "wb") as f:
|
||||||
nilmdb.utils.atomic.replace_file(
|
pickle.dump(fmt, f, 2)
|
||||||
os.path.join(root, b"_format"), pickle.dumps(fmt, 2))
|
|
||||||
|
|
||||||
# Normal methods
|
# Normal methods
|
||||||
def __init__(self, root, initial_nrows=0):
|
def __init__(self, root, initial_nrows = 0):
|
||||||
"""'root' is the full OS path to the directory of this table"""
|
"""'root' is the full OS path to the directory of this table"""
|
||||||
self.root = root
|
self.root = root
|
||||||
self.initial_nrows = initial_nrows
|
self.initial_nrows = initial_nrows
|
||||||
|
|
||||||
# Load the format
|
# Load the format
|
||||||
with open(os.path.join(self.root, b"_format"), "rb") as f:
|
with open(os.path.join(self.root, "_format"), "rb") as f:
|
||||||
fmt = pickle.load(f)
|
fmt = pickle.load(f)
|
||||||
|
|
||||||
if fmt["version"] != 3:
|
if fmt["version"] != 3: # pragma: no cover
|
||||||
# Old versions used floating point timestamps, which aren't
|
# Old versions used floating point timestamps, which aren't
|
||||||
# valid anymore.
|
# valid anymore.
|
||||||
raise NotImplementedError("old version " + str(fmt["version"]) +
|
raise NotImplementedError("old version " + str(fmt["version"]) +
|
||||||
|
@ -336,25 +336,25 @@ class Table():
|
||||||
# greater than the row number of any piece of data that
|
# greater than the row number of any piece of data that
|
||||||
# currently exists, not necessarily all data that _ever_
|
# currently exists, not necessarily all data that _ever_
|
||||||
# existed.
|
# existed.
|
||||||
regex = re.compile(b"^[0-9a-f]{4,}$")
|
regex = re.compile("^[0-9a-f]{4,}$")
|
||||||
|
|
||||||
# Find the last directory. We sort and loop through all of them,
|
# Find the last directory. We sort and loop through all of them,
|
||||||
# starting with the numerically greatest, because the dirs could be
|
# starting with the numerically greatest, because the dirs could be
|
||||||
# empty if something was deleted but the directory was unexpectedly
|
# empty if something was deleted but the directory was unexpectedly
|
||||||
# not deleted.
|
# not deleted.
|
||||||
subdirs = sorted(filter(regex.search, os.listdir(self.root)),
|
subdirs = sorted(filter(regex.search, os.listdir(self.root)),
|
||||||
key=lambda x: int(x, 16), reverse=True)
|
key = lambda x: int(x, 16), reverse = True)
|
||||||
|
|
||||||
for subdir in subdirs:
|
for subdir in subdirs:
|
||||||
# Now find the last file in that dir
|
# Now find the last file in that dir
|
||||||
path = os.path.join(self.root, subdir)
|
path = os.path.join(self.root, subdir)
|
||||||
files = list(filter(regex.search, os.listdir(path)))
|
files = filter(regex.search, os.listdir(path))
|
||||||
if not files:
|
if not files: # pragma: no cover (shouldn't occur)
|
||||||
# Empty dir: try the next one
|
# Empty dir: try the next one
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Find the numerical max
|
# Find the numerical max
|
||||||
filename = max(files, key=lambda x: int(x, 16))
|
filename = max(files, key = lambda x: int(x, 16))
|
||||||
offset = os.path.getsize(os.path.join(self.root, subdir, filename))
|
offset = os.path.getsize(os.path.join(self.root, subdir, filename))
|
||||||
|
|
||||||
# Convert to row number
|
# Convert to row number
|
||||||
|
@ -380,8 +380,8 @@ class Table():
|
||||||
filenum = row // self.rows_per_file
|
filenum = row // self.rows_per_file
|
||||||
# It's OK if these format specifiers are too short; the filenames
|
# It's OK if these format specifiers are too short; the filenames
|
||||||
# will just get longer but will still sort correctly.
|
# will just get longer but will still sort correctly.
|
||||||
dirname = sprintf(b"%04x", filenum // self.files_per_dir)
|
dirname = sprintf("%04x", filenum // self.files_per_dir)
|
||||||
filename = sprintf(b"%04x", filenum % self.files_per_dir)
|
filename = sprintf("%04x", filenum % self.files_per_dir)
|
||||||
offset = (row % self.rows_per_file) * self.row_size
|
offset = (row % self.rows_per_file) * self.row_size
|
||||||
count = self.rows_per_file - (row % self.rows_per_file)
|
count = self.rows_per_file - (row % self.rows_per_file)
|
||||||
return (dirname, filename, offset, count)
|
return (dirname, filename, offset, count)
|
||||||
|
@ -389,14 +389,14 @@ class Table():
|
||||||
def _row_from_offset(self, subdir, filename, offset):
|
def _row_from_offset(self, subdir, filename, offset):
|
||||||
"""Return the row number that corresponds to the given
|
"""Return the row number that corresponds to the given
|
||||||
'subdir/filename' and byte-offset within that file."""
|
'subdir/filename' and byte-offset within that file."""
|
||||||
if (offset % self.row_size) != 0:
|
if (offset % self.row_size) != 0: # pragma: no cover
|
||||||
# this shouldn't occur, unless there is some corruption somewhere
|
# this shouldn't occur, unless there is some corruption somewhere
|
||||||
raise ValueError("file offset is not a multiple of data size")
|
raise ValueError("file offset is not a multiple of data size")
|
||||||
filenum = int(subdir, 16) * self.files_per_dir + int(filename, 16)
|
filenum = int(subdir, 16) * self.files_per_dir + int(filename, 16)
|
||||||
row = (filenum * self.rows_per_file) + (offset // self.row_size)
|
row = (filenum * self.rows_per_file) + (offset // self.row_size)
|
||||||
return row
|
return row
|
||||||
|
|
||||||
def _remove_or_truncate_file(self, subdir, filename, offset=0):
|
def _remove_or_truncate_file(self, subdir, filename, offset = 0):
|
||||||
"""Remove the given file, and remove the subdirectory too
|
"""Remove the given file, and remove the subdirectory too
|
||||||
if it's empty. If offset is nonzero, truncate the file
|
if it's empty. If offset is nonzero, truncate the file
|
||||||
to that size instead."""
|
to that size instead."""
|
||||||
|
@ -416,8 +416,8 @@ class Table():
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Cache open files
|
# Cache open files
|
||||||
@nilmdb.utils.lru_cache(size=fd_cache_size,
|
@nilmdb.utils.lru_cache(size = fd_cache_size,
|
||||||
onremove=lambda f: f.close())
|
onremove = lambda f: f.close())
|
||||||
def file_open(self, subdir, filename):
|
def file_open(self, subdir, filename):
|
||||||
"""Open and map a given 'subdir/filename' (relative to self.root).
|
"""Open and map a given 'subdir/filename' (relative to self.root).
|
||||||
Will be automatically closed when evicted from the cache."""
|
Will be automatically closed when evicted from the cache."""
|
||||||
|
@ -430,14 +430,12 @@ class Table():
|
||||||
return rocket.Rocket(self.layout,
|
return rocket.Rocket(self.layout,
|
||||||
os.path.join(self.root, subdir, filename))
|
os.path.join(self.root, subdir, filename))
|
||||||
|
|
||||||
def append_data(self, data, start, end, binary=False):
|
def append_data(self, data, start, end, binary = False):
|
||||||
"""Parse the formatted string in 'data', according to the
|
"""Parse the formatted string in 'data', according to the
|
||||||
current layout, and append it to the table. If any timestamps
|
current layout, and append it to the table. If any timestamps
|
||||||
are non-monotonic, or don't fall between 'start' and 'end',
|
are non-monotonic, or don't fall between 'start' and 'end',
|
||||||
a ValueError is raised.
|
a ValueError is raised.
|
||||||
|
|
||||||
Note that data is always of 'bytes' type.
|
|
||||||
|
|
||||||
If 'binary' is True, the data should be in raw binary format
|
If 'binary' is True, the data should be in raw binary format
|
||||||
instead: little-endian, matching the current table's layout,
|
instead: little-endian, matching the current table's layout,
|
||||||
including the int64 timestamp.
|
including the int64 timestamp.
|
||||||
|
@ -454,7 +452,7 @@ class Table():
|
||||||
while data_offset < len(data):
|
while data_offset < len(data):
|
||||||
# See how many rows we can fit into the current file,
|
# See how many rows we can fit into the current file,
|
||||||
# and open it
|
# and open it
|
||||||
(subdir, fname, offs, count) = self._offset_from_row(tot_rows)
|
(subdir, fname, offset, count) = self._offset_from_row(tot_rows)
|
||||||
f = self.file_open(subdir, fname)
|
f = self.file_open(subdir, fname)
|
||||||
|
|
||||||
# Ask the rocket object to parse and append up to "count"
|
# Ask the rocket object to parse and append up to "count"
|
||||||
|
@ -478,9 +476,9 @@ class Table():
|
||||||
if binary:
|
if binary:
|
||||||
raise IndexError
|
raise IndexError
|
||||||
bad = data.splitlines()[linenum-1]
|
bad = data.splitlines()[linenum-1]
|
||||||
bad += b'\n' + b' ' * (colnum - 1) + b'^'
|
bad += '\n' + ' ' * (colnum - 1) + '^'
|
||||||
except IndexError:
|
except IndexError:
|
||||||
bad = b""
|
bad = ""
|
||||||
if errtype == rocket.ERR_NON_MONOTONIC:
|
if errtype == rocket.ERR_NON_MONOTONIC:
|
||||||
err = "timestamp is not monotonically increasing"
|
err = "timestamp is not monotonically increasing"
|
||||||
elif errtype == rocket.ERR_OUT_OF_INTERVAL:
|
elif errtype == rocket.ERR_OUT_OF_INTERVAL:
|
||||||
|
@ -494,17 +492,16 @@ class Table():
|
||||||
timestamp_to_string(end))
|
timestamp_to_string(end))
|
||||||
else:
|
else:
|
||||||
err = str(obj)
|
err = str(obj)
|
||||||
bad_str = bad.decode('utf-8', errors='backslashreplace')
|
|
||||||
raise ValueError("error parsing input data: " +
|
raise ValueError("error parsing input data: " +
|
||||||
where + err + "\n" + bad_str)
|
where + err + "\n" + bad)
|
||||||
tot_rows += added_rows
|
tot_rows += added_rows
|
||||||
except Exception:
|
except Exception:
|
||||||
# Some failure, so try to roll things back by truncating or
|
# Some failure, so try to roll things back by truncating or
|
||||||
# deleting files that we may have appended data to.
|
# deleting files that we may have appended data to.
|
||||||
cleanpos = self.nrows
|
cleanpos = self.nrows
|
||||||
while cleanpos <= tot_rows:
|
while cleanpos <= tot_rows:
|
||||||
(subdir, fname, offs, count) = self._offset_from_row(cleanpos)
|
(subdir, fname, offset, count) = self._offset_from_row(cleanpos)
|
||||||
self._remove_or_truncate_file(subdir, fname, offs)
|
self._remove_or_truncate_file(subdir, fname, offset)
|
||||||
cleanpos += count
|
cleanpos += count
|
||||||
# Re-raise original exception
|
# Re-raise original exception
|
||||||
raise
|
raise
|
||||||
|
@ -512,11 +509,14 @@ class Table():
|
||||||
# Success, so update self.nrows accordingly
|
# Success, so update self.nrows accordingly
|
||||||
self.nrows = tot_rows
|
self.nrows = tot_rows
|
||||||
|
|
||||||
def get_data(self, start, stop, binary=False):
|
def get_data(self, start, stop, binary = False):
|
||||||
"""Extract data corresponding to Python range [n:m],
|
"""Extract data corresponding to Python range [n:m],
|
||||||
and returns a formatted string"""
|
and returns a formatted string"""
|
||||||
if (start is None or stop is None or
|
if (start is None or
|
||||||
start > stop or start < 0 or stop > self.nrows):
|
stop is None or
|
||||||
|
start > stop or
|
||||||
|
start < 0 or
|
||||||
|
stop > self.nrows):
|
||||||
raise IndexError("Index out of range")
|
raise IndexError("Index out of range")
|
||||||
|
|
||||||
ret = []
|
ret = []
|
||||||
|
@ -556,7 +556,7 @@ class Table():
|
||||||
# file. Only when the list covers the entire extent of the
|
# file. Only when the list covers the entire extent of the
|
||||||
# file will that file be removed.
|
# file will that file be removed.
|
||||||
datafile = os.path.join(self.root, subdir, filename)
|
datafile = os.path.join(self.root, subdir, filename)
|
||||||
cachefile = datafile + b".removed"
|
cachefile = datafile + ".removed"
|
||||||
try:
|
try:
|
||||||
with open(cachefile, "rb") as f:
|
with open(cachefile, "rb") as f:
|
||||||
ranges = pickle.load(f)
|
ranges = pickle.load(f)
|
||||||
|
@ -583,8 +583,7 @@ class Table():
|
||||||
# Not connected; append previous and start again
|
# Not connected; append previous and start again
|
||||||
merged.append(prev)
|
merged.append(prev)
|
||||||
prev = new
|
prev = new
|
||||||
# Last range we were looking at goes into the file. We know
|
if prev is not None:
|
||||||
# there was at least one (the one we just removed).
|
|
||||||
merged.append(prev)
|
merged.append(prev)
|
||||||
|
|
||||||
# If the range covered the whole file, we can delete it now.
|
# If the range covered the whole file, we can delete it now.
|
||||||
|
|
|
@ -1,15 +1,12 @@
|
||||||
"""Exceptions"""
|
"""Exceptions"""
|
||||||
|
|
||||||
|
|
||||||
class NilmDBError(Exception):
|
class NilmDBError(Exception):
|
||||||
"""Base exception for NilmDB errors"""
|
"""Base exception for NilmDB errors"""
|
||||||
def __init__(self, msg="Unspecified error"):
|
def __init__(self, message = "Unspecified error"):
|
||||||
super().__init__(msg)
|
Exception.__init__(self, message)
|
||||||
|
|
||||||
|
|
||||||
class StreamError(NilmDBError):
|
class StreamError(NilmDBError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class OverlapError(NilmDBError):
|
class OverlapError(NilmDBError):
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
# cython: language_level=2
|
|
||||||
|
|
||||||
"""Interval, IntervalSet
|
"""Interval, IntervalSet
|
||||||
|
|
||||||
The Interval implemented here is just like
|
The Interval implemented here is just like
|
||||||
|
@ -60,19 +58,9 @@ cdef class Interval:
|
||||||
return ("[" + timestamp_to_string(self.start) +
|
return ("[" + timestamp_to_string(self.start) +
|
||||||
" -> " + timestamp_to_string(self.end) + ")")
|
" -> " + timestamp_to_string(self.end) + ")")
|
||||||
|
|
||||||
# Compare two intervals. If non-equal, order by start then end
|
def __cmp__(self, Interval other):
|
||||||
def __lt__(self, Interval other):
|
"""Compare two intervals. If non-equal, order by start then end"""
|
||||||
return (self.start, self.end) < (other.start, other.end)
|
return cmp(self.start, other.start) or cmp(self.end, other.end)
|
||||||
def __gt__(self, Interval other):
|
|
||||||
return (self.start, self.end) > (other.start, other.end)
|
|
||||||
def __le__(self, Interval other):
|
|
||||||
return (self.start, self.end) <= (other.start, other.end)
|
|
||||||
def __ge__(self, Interval other):
|
|
||||||
return (self.start, self.end) >= (other.start, other.end)
|
|
||||||
def __eq__(self, Interval other):
|
|
||||||
return (self.start, self.end) == (other.start, other.end)
|
|
||||||
def __ne__(self, Interval other):
|
|
||||||
return (self.start, self.end) != (other.start, other.end)
|
|
||||||
|
|
||||||
cpdef intersects(self, Interval other):
|
cpdef intersects(self, Interval other):
|
||||||
"""Return True if two Interval objects intersect"""
|
"""Return True if two Interval objects intersect"""
|
||||||
|
|
|
@ -7,13 +7,12 @@ Object that represents a NILM database file.
|
||||||
Manages both the SQL database and the table storage backend.
|
Manages both the SQL database and the table storage backend.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
# Need absolute_import so that "import nilmdb" won't pull in
|
||||||
import errno
|
# nilmdb.py, but will pull the parent nilmdb module instead.
|
||||||
import sqlite3
|
from __future__ import absolute_import
|
||||||
|
|
||||||
import nilmdb.utils
|
import nilmdb.utils
|
||||||
from nilmdb.utils.printf import printf
|
from nilmdb.utils.printf import *
|
||||||
from nilmdb.utils.time import timestamp_to_bytes
|
from nilmdb.utils.time import timestamp_to_string
|
||||||
|
|
||||||
from nilmdb.utils.interval import IntervalError
|
from nilmdb.utils.interval import IntervalError
|
||||||
from nilmdb.server.interval import Interval, DBInterval, IntervalSet
|
from nilmdb.server.interval import Interval, DBInterval, IntervalSet
|
||||||
|
@ -21,6 +20,10 @@ from nilmdb.server.interval import Interval, DBInterval, IntervalSet
|
||||||
from nilmdb.server import bulkdata
|
from nilmdb.server import bulkdata
|
||||||
from nilmdb.server.errors import NilmDBError, StreamError, OverlapError
|
from nilmdb.server.errors import NilmDBError, StreamError, OverlapError
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
import errno
|
||||||
|
|
||||||
# Note about performance and transactions:
|
# Note about performance and transactions:
|
||||||
#
|
#
|
||||||
# Committing a transaction in the default sync mode (PRAGMA synchronous=FULL)
|
# Committing a transaction in the default sync mode (PRAGMA synchronous=FULL)
|
||||||
|
@ -34,7 +37,7 @@ from nilmdb.server.errors import NilmDBError, StreamError, OverlapError
|
||||||
# seems that 'PRAGMA synchronous=NORMAL' and 'PRAGMA journal_mode=WAL'
|
# seems that 'PRAGMA synchronous=NORMAL' and 'PRAGMA journal_mode=WAL'
|
||||||
# give an equivalent speedup more safely. That is what is used here.
|
# give an equivalent speedup more safely. That is what is used here.
|
||||||
_sql_schema_updates = {
|
_sql_schema_updates = {
|
||||||
0: {"next": 1, "sql": """
|
0: { "next": 1, "sql": """
|
||||||
-- All streams
|
-- All streams
|
||||||
CREATE TABLE streams(
|
CREATE TABLE streams(
|
||||||
id INTEGER PRIMARY KEY, -- stream ID
|
id INTEGER PRIMARY KEY, -- stream ID
|
||||||
|
@ -58,26 +61,25 @@ _sql_schema_updates = {
|
||||||
end_pos INTEGER NOT NULL
|
end_pos INTEGER NOT NULL
|
||||||
);
|
);
|
||||||
CREATE INDEX _ranges_index ON ranges (stream_id, start_time, end_time);
|
CREATE INDEX _ranges_index ON ranges (stream_id, start_time, end_time);
|
||||||
"""},
|
""" },
|
||||||
|
|
||||||
1: {"next": 3, "sql": """
|
1: { "next": 3, "sql": """
|
||||||
-- Generic dictionary-type metadata that can be associated with a stream
|
-- Generic dictionary-type metadata that can be associated with a stream
|
||||||
CREATE TABLE metadata(
|
CREATE TABLE metadata(
|
||||||
stream_id INTEGER NOT NULL,
|
stream_id INTEGER NOT NULL,
|
||||||
key TEXT NOT NULL,
|
key TEXT NOT NULL,
|
||||||
value TEXT
|
value TEXT
|
||||||
);
|
);
|
||||||
"""},
|
""" },
|
||||||
|
|
||||||
2: {"error": "old format with floating-point timestamps requires "
|
2: { "error": "old format with floating-point timestamps requires "
|
||||||
"nilmdb 1.3.1 or older"},
|
"nilmdb 1.3.1 or older" },
|
||||||
|
|
||||||
3: {"next": None},
|
3: { "next": None },
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@nilmdb.utils.must_close()
|
@nilmdb.utils.must_close()
|
||||||
class NilmDB():
|
class NilmDB(object):
|
||||||
verbose = 0
|
verbose = 0
|
||||||
|
|
||||||
def __init__(self, basepath,
|
def __init__(self, basepath,
|
||||||
|
@ -109,7 +111,9 @@ class NilmDB():
|
||||||
try:
|
try:
|
||||||
os.makedirs(self.basepath)
|
os.makedirs(self.basepath)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno != errno.EEXIST:
|
if e.errno != errno.EEXIST: # pragma: no cover
|
||||||
|
# (no coverage, because it's hard to trigger this case
|
||||||
|
# if tests are run as root)
|
||||||
raise IOError("can't create tree " + self.basepath)
|
raise IOError("can't create tree " + self.basepath)
|
||||||
|
|
||||||
# Our data goes inside it
|
# Our data goes inside it
|
||||||
|
@ -117,10 +121,10 @@ class NilmDB():
|
||||||
|
|
||||||
# SQLite database too
|
# SQLite database too
|
||||||
sqlfilename = os.path.join(self.basepath, "data.sql")
|
sqlfilename = os.path.join(self.basepath, "data.sql")
|
||||||
self.con = sqlite3.connect(sqlfilename, check_same_thread=True)
|
self.con = sqlite3.connect(sqlfilename, check_same_thread = True)
|
||||||
try:
|
try:
|
||||||
self._sql_schema_update()
|
self._sql_schema_update()
|
||||||
except Exception:
|
except Exception: # pragma: no cover
|
||||||
self.data.close()
|
self.data.close()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -145,7 +149,6 @@ class NilmDB():
|
||||||
if self.con:
|
if self.con:
|
||||||
self.con.commit()
|
self.con.commit()
|
||||||
self.con.close()
|
self.con.close()
|
||||||
self.con = None
|
|
||||||
self.data.close()
|
self.data.close()
|
||||||
|
|
||||||
def _sql_schema_update(self):
|
def _sql_schema_update(self):
|
||||||
|
@ -154,18 +157,18 @@ class NilmDB():
|
||||||
oldversion = version
|
oldversion = version
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
if version not in _sql_schema_updates:
|
if version not in _sql_schema_updates: # pragma: no cover
|
||||||
raise Exception(self.basepath + ": unknown database version "
|
raise Exception(self.basepath + ": unknown database version "
|
||||||
+ str(version))
|
+ str(version))
|
||||||
update = _sql_schema_updates[version]
|
update = _sql_schema_updates[version]
|
||||||
if "error" in update:
|
if "error" in update: # pragma: no cover
|
||||||
raise Exception(self.basepath + ": can't use database version "
|
raise Exception(self.basepath + ": can't use database version "
|
||||||
+ str(version) + ": " + update["error"])
|
+ str(version) + ": " + update["error"])
|
||||||
if update["next"] is None:
|
if update["next"] is None:
|
||||||
break
|
break
|
||||||
cur.executescript(update["sql"])
|
cur.executescript(update["sql"])
|
||||||
version = update["next"]
|
version = update["next"]
|
||||||
if self.verbose:
|
if self.verbose: # pragma: no cover
|
||||||
printf("Database schema updated to %d\n", version)
|
printf("Database schema updated to %d\n", version)
|
||||||
|
|
||||||
if version != oldversion:
|
if version != oldversion:
|
||||||
|
@ -181,7 +184,7 @@ class NilmDB():
|
||||||
raise NilmDBError("start must precede end")
|
raise NilmDBError("start must precede end")
|
||||||
return (start, end)
|
return (start, end)
|
||||||
|
|
||||||
@nilmdb.utils.lru_cache(size=64)
|
@nilmdb.utils.lru_cache(size = 64)
|
||||||
def _get_intervals(self, stream_id):
|
def _get_intervals(self, stream_id):
|
||||||
"""
|
"""
|
||||||
Return a mutable IntervalSet corresponding to the given stream ID.
|
Return a mutable IntervalSet corresponding to the given stream ID.
|
||||||
|
@ -196,7 +199,7 @@ class NilmDB():
|
||||||
iset += DBInterval(start_time, end_time,
|
iset += DBInterval(start_time, end_time,
|
||||||
start_time, end_time,
|
start_time, end_time,
|
||||||
start_pos, end_pos)
|
start_pos, end_pos)
|
||||||
except IntervalError:
|
except IntervalError: # pragma: no cover
|
||||||
raise NilmDBError("unexpected overlap in ranges table!")
|
raise NilmDBError("unexpected overlap in ranges table!")
|
||||||
|
|
||||||
return iset
|
return iset
|
||||||
|
@ -223,6 +226,10 @@ class NilmDB():
|
||||||
# Load this stream's intervals
|
# Load this stream's intervals
|
||||||
iset = self._get_intervals(stream_id)
|
iset = self._get_intervals(stream_id)
|
||||||
|
|
||||||
|
# Check for overlap
|
||||||
|
if iset.intersects(interval): # pragma: no cover (gets caught earlier)
|
||||||
|
raise NilmDBError("new interval overlaps existing data")
|
||||||
|
|
||||||
# Check for adjacency. If there's a stream in the database
|
# Check for adjacency. If there's a stream in the database
|
||||||
# that ends exactly when this one starts, and the database
|
# that ends exactly when this one starts, and the database
|
||||||
# rows match up, we can make one interval that covers the
|
# rows match up, we can make one interval that covers the
|
||||||
|
@ -265,6 +272,10 @@ class NilmDB():
|
||||||
original: original DBInterval; must be already present in DB
|
original: original DBInterval; must be already present in DB
|
||||||
to_remove: DBInterval to remove; must be subset of 'original'
|
to_remove: DBInterval to remove; must be subset of 'original'
|
||||||
"""
|
"""
|
||||||
|
# Just return if we have nothing to remove
|
||||||
|
if remove.start == remove.end: # pragma: no cover
|
||||||
|
return
|
||||||
|
|
||||||
# Load this stream's intervals
|
# Load this stream's intervals
|
||||||
iset = self._get_intervals(stream_id)
|
iset = self._get_intervals(stream_id)
|
||||||
|
|
||||||
|
@ -279,8 +290,7 @@ class NilmDB():
|
||||||
# the removed piece was in the middle.
|
# the removed piece was in the middle.
|
||||||
def add(iset, start, end, start_pos, end_pos):
|
def add(iset, start, end, start_pos, end_pos):
|
||||||
iset += DBInterval(start, end, start, end, start_pos, end_pos)
|
iset += DBInterval(start, end, start, end, start_pos, end_pos)
|
||||||
self._sql_interval_insert(stream_id, start, end,
|
self._sql_interval_insert(stream_id, start, end, start_pos, end_pos)
|
||||||
start_pos, end_pos)
|
|
||||||
|
|
||||||
if original.start != remove.start:
|
if original.start != remove.start:
|
||||||
# Interval before the removed region
|
# Interval before the removed region
|
||||||
|
@ -297,7 +307,7 @@ class NilmDB():
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
def stream_list(self, path=None, layout=None, extended=False):
|
def stream_list(self, path = None, layout = None, extended = False):
|
||||||
"""Return list of lists of all streams in the database.
|
"""Return list of lists of all streams in the database.
|
||||||
|
|
||||||
If path is specified, include only streams with a path that
|
If path is specified, include only streams with a path that
|
||||||
|
@ -306,10 +316,10 @@ class NilmDB():
|
||||||
If layout is specified, include only streams with a layout
|
If layout is specified, include only streams with a layout
|
||||||
that matches the given string.
|
that matches the given string.
|
||||||
|
|
||||||
If extended=False, returns a list of lists containing
|
If extended = False, returns a list of lists containing
|
||||||
the path and layout: [ path, layout ]
|
the path and layout: [ path, layout ]
|
||||||
|
|
||||||
If extended=True, returns a list of lists containing
|
If extended = True, returns a list of lists containing
|
||||||
more information:
|
more information:
|
||||||
path
|
path
|
||||||
layout
|
layout
|
||||||
|
@ -336,9 +346,9 @@ class NilmDB():
|
||||||
params += (path,)
|
params += (path,)
|
||||||
query += " GROUP BY streams.id ORDER BY streams.path"
|
query += " GROUP BY streams.id ORDER BY streams.path"
|
||||||
result = self.con.execute(query, params).fetchall()
|
result = self.con.execute(query, params).fetchall()
|
||||||
return [list(x) for x in result]
|
return [ list(x) for x in result ]
|
||||||
|
|
||||||
def stream_intervals(self, path, start=None, end=None, diffpath=None):
|
def stream_intervals(self, path, start = None, end = None, diffpath = None):
|
||||||
"""
|
"""
|
||||||
List all intervals in 'path' between 'start' and 'end'. If
|
List all intervals in 'path' between 'start' and 'end'. If
|
||||||
'diffpath' is not none, list instead the set-difference
|
'diffpath' is not none, list instead the set-difference
|
||||||
|
@ -410,8 +420,8 @@ class NilmDB():
|
||||||
|
|
||||||
def stream_set_metadata(self, path, data):
|
def stream_set_metadata(self, path, data):
|
||||||
"""Set stream metadata from a dictionary, e.g.
|
"""Set stream metadata from a dictionary, e.g.
|
||||||
{ description: 'Downstairs lighting',
|
{ description = 'Downstairs lighting',
|
||||||
v_scaling: 123.45 }
|
v_scaling = 123.45 }
|
||||||
This replaces all existing metadata.
|
This replaces all existing metadata.
|
||||||
"""
|
"""
|
||||||
stream_id = self._stream_id(path)
|
stream_id = self._stream_id(path)
|
||||||
|
@ -459,7 +469,7 @@ class NilmDB():
|
||||||
|
|
||||||
# Verify that no intervals are present, and clear the cache
|
# Verify that no intervals are present, and clear the cache
|
||||||
iset = self._get_intervals(stream_id)
|
iset = self._get_intervals(stream_id)
|
||||||
if iset:
|
if len(iset):
|
||||||
raise NilmDBError("all intervals must be removed before "
|
raise NilmDBError("all intervals must be removed before "
|
||||||
"destroying a stream")
|
"destroying a stream")
|
||||||
self._get_intervals.cache_remove(self, stream_id)
|
self._get_intervals.cache_remove(self, stream_id)
|
||||||
|
@ -473,7 +483,7 @@ class NilmDB():
|
||||||
con.execute("DELETE FROM ranges WHERE stream_id=?", (stream_id,))
|
con.execute("DELETE FROM ranges WHERE stream_id=?", (stream_id,))
|
||||||
con.execute("DELETE FROM streams WHERE id=?", (stream_id,))
|
con.execute("DELETE FROM streams WHERE id=?", (stream_id,))
|
||||||
|
|
||||||
def stream_insert(self, path, start, end, data, binary=False):
|
def stream_insert(self, path, start, end, data, binary = False):
|
||||||
"""Insert new data into the database.
|
"""Insert new data into the database.
|
||||||
path: Path at which to add the data
|
path: Path at which to add the data
|
||||||
start: Starting timestamp
|
start: Starting timestamp
|
||||||
|
@ -509,7 +519,7 @@ class NilmDB():
|
||||||
# Like bisect.bisect_left, but doesn't choke on large indices on
|
# Like bisect.bisect_left, but doesn't choke on large indices on
|
||||||
# 32-bit systems, like bisect's fast C implementation does.
|
# 32-bit systems, like bisect's fast C implementation does.
|
||||||
while lo < hi:
|
while lo < hi:
|
||||||
mid = (lo + hi) // 2
|
mid = (lo + hi) / 2
|
||||||
if a[mid] < x:
|
if a[mid] < x:
|
||||||
lo = mid + 1
|
lo = mid + 1
|
||||||
else:
|
else:
|
||||||
|
@ -550,8 +560,8 @@ class NilmDB():
|
||||||
dbinterval.db_startpos,
|
dbinterval.db_startpos,
|
||||||
dbinterval.db_endpos)
|
dbinterval.db_endpos)
|
||||||
|
|
||||||
def stream_extract(self, path, start=None, end=None,
|
def stream_extract(self, path, start = None, end = None,
|
||||||
count=False, markup=False, binary=False):
|
count = False, markup = False, binary = False):
|
||||||
"""
|
"""
|
||||||
Returns (data, restart) tuple.
|
Returns (data, restart) tuple.
|
||||||
|
|
||||||
|
@ -607,8 +617,8 @@ class NilmDB():
|
||||||
|
|
||||||
# Add markup
|
# Add markup
|
||||||
if markup:
|
if markup:
|
||||||
result.append(b"# interval-start " +
|
result.append("# interval-start " +
|
||||||
timestamp_to_bytes(interval.start) + b"\n")
|
timestamp_to_string(interval.start) + "\n")
|
||||||
|
|
||||||
# Gather these results up
|
# Gather these results up
|
||||||
result.append(table.get_data(row_start, row_end, binary))
|
result.append(table.get_data(row_start, row_end, binary))
|
||||||
|
@ -619,19 +629,18 @@ class NilmDB():
|
||||||
# Add markup, and exit if restart is set.
|
# Add markup, and exit if restart is set.
|
||||||
if restart is not None:
|
if restart is not None:
|
||||||
if markup:
|
if markup:
|
||||||
result.append(b"# interval-end " +
|
result.append("# interval-end " +
|
||||||
timestamp_to_bytes(restart) + b"\n")
|
timestamp_to_string(restart) + "\n")
|
||||||
break
|
break
|
||||||
if markup:
|
if markup:
|
||||||
result.append(b"# interval-end " +
|
result.append("# interval-end " +
|
||||||
timestamp_to_bytes(interval.end) + b"\n")
|
timestamp_to_string(interval.end) + "\n")
|
||||||
|
|
||||||
if count:
|
if count:
|
||||||
return matched
|
return matched
|
||||||
full_result = b"".join(result)
|
return ("".join(result), restart)
|
||||||
return (full_result, restart)
|
|
||||||
|
|
||||||
def stream_remove(self, path, start=None, end=None):
|
def stream_remove(self, path, start = None, end = None):
|
||||||
"""
|
"""
|
||||||
Remove data from the specified time interval within a stream.
|
Remove data from the specified time interval within a stream.
|
||||||
|
|
||||||
|
@ -658,7 +667,7 @@ class NilmDB():
|
||||||
|
|
||||||
# Can't remove intervals from within the iterator, so we need to
|
# Can't remove intervals from within the iterator, so we need to
|
||||||
# remember what's currently in the intersection now.
|
# remember what's currently in the intersection now.
|
||||||
all_candidates = list(intervals.intersection(to_remove, orig=True))
|
all_candidates = list(intervals.intersection(to_remove, orig = True))
|
||||||
|
|
||||||
remove_start = None
|
remove_start = None
|
||||||
remove_end = None
|
remove_end = None
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
# cython: language_level=2
|
|
||||||
|
|
||||||
cdef class RBNode:
|
cdef class RBNode:
|
||||||
cdef public object obj
|
cdef public object obj
|
||||||
cdef public double start, end
|
cdef public double start, end
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
# cython: profile=False
|
# cython: profile=False
|
||||||
# cython: cdivision=True
|
# cython: cdivision=True
|
||||||
# cython: language_level=2
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Jim Paris <jim@jtan.com>
|
Jim Paris <jim@jtan.com>
|
||||||
|
|
|
@ -138,7 +138,7 @@ static void Rocket_dealloc(Rocket *self)
|
||||||
fclose(self->file);
|
fclose(self->file);
|
||||||
self->file = NULL;
|
self->file = NULL;
|
||||||
}
|
}
|
||||||
Py_TYPE(self)->tp_free((PyObject *)self);
|
self->ob_type->tp_free((PyObject *)self);
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *Rocket_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
|
static PyObject *Rocket_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
|
||||||
|
@ -160,19 +160,13 @@ static PyObject *Rocket_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
|
||||||
static int Rocket_init(Rocket *self, PyObject *args, PyObject *kwds)
|
static int Rocket_init(Rocket *self, PyObject *args, PyObject *kwds)
|
||||||
{
|
{
|
||||||
const char *layout, *path;
|
const char *layout, *path;
|
||||||
int pathlen;
|
|
||||||
static char *kwlist[] = { "layout", "file", NULL };
|
static char *kwlist[] = { "layout", "file", NULL };
|
||||||
if (!PyArg_ParseTupleAndKeywords(args, kwds, "sz#", kwlist,
|
if (!PyArg_ParseTupleAndKeywords(args, kwds, "sz", kwlist,
|
||||||
&layout, &path, &pathlen))
|
&layout, &path))
|
||||||
return -1;
|
return -1;
|
||||||
if (!layout)
|
if (!layout)
|
||||||
return -1;
|
return -1;
|
||||||
if (path) {
|
if (path) {
|
||||||
if (strlen(path) != (size_t)pathlen) {
|
|
||||||
PyErr_SetString(PyExc_ValueError, "path must not "
|
|
||||||
"contain NUL characters");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if ((self->file = fopen(path, "a+b")) == NULL) {
|
if ((self->file = fopen(path, "a+b")) == NULL) {
|
||||||
PyErr_SetFromErrno(PyExc_OSError);
|
PyErr_SetFromErrno(PyExc_OSError);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -245,7 +239,7 @@ static PyObject *Rocket_get_file_size(Rocket *self)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return PyLong_FromLong(self->file_size);
|
return PyInt_FromLong(self->file_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/****
|
/****
|
||||||
|
@ -279,9 +273,11 @@ static PyObject *Rocket_append_string(Rocket *self, PyObject *args)
|
||||||
union64_t t64;
|
union64_t t64;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* Input data is bytes. Using 'y#' instead of 'y' might be
|
/* It would be nice to use 't#' instead of 's' for data,
|
||||||
preferable, but strto* requires the null terminator. */
|
but we need the null termination for strto*. If we had
|
||||||
if (!PyArg_ParseTuple(args, "iyiiLLL:append_string", &count,
|
strnto* that took a length, we could use t# and not require
|
||||||
|
a copy. */
|
||||||
|
if (!PyArg_ParseTuple(args, "isiiLLL:append_string", &count,
|
||||||
&data, &offset, &linenum,
|
&data, &offset, &linenum,
|
||||||
&ll1, &ll2, &ll3))
|
&ll1, &ll2, &ll3))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -441,7 +437,7 @@ static PyObject *Rocket_append_binary(Rocket *self, PyObject *args)
|
||||||
timestamp_t end;
|
timestamp_t end;
|
||||||
timestamp_t last_timestamp;
|
timestamp_t last_timestamp;
|
||||||
|
|
||||||
if (!PyArg_ParseTuple(args, "iy#iiLLL:append_binary",
|
if (!PyArg_ParseTuple(args, "it#iiLLL:append_binary",
|
||||||
&count, &data, &data_len, &offset,
|
&count, &data, &data_len, &offset,
|
||||||
&linenum, &ll1, &ll2, &ll3))
|
&linenum, &ll1, &ll2, &ll3))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -477,7 +473,7 @@ static PyObject *Rocket_append_binary(Rocket *self, PyObject *args)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write binary data */
|
/* Write binary data */
|
||||||
if (fwrite(data, self->binary_size, rows, self->file) != (size_t)rows) {
|
if (fwrite(data, self->binary_size, rows, self->file) != rows) {
|
||||||
PyErr_SetFromErrno(PyExc_OSError);
|
PyErr_SetFromErrno(PyExc_OSError);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -491,7 +487,7 @@ static PyObject *Rocket_append_binary(Rocket *self, PyObject *args)
|
||||||
}
|
}
|
||||||
|
|
||||||
/****
|
/****
|
||||||
* Extract to binary bytes object containing ASCII text-formatted data
|
* Extract to string
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static PyObject *Rocket_extract_string(Rocket *self, PyObject *args)
|
static PyObject *Rocket_extract_string(Rocket *self, PyObject *args)
|
||||||
|
@ -589,7 +585,7 @@ static PyObject *Rocket_extract_string(Rocket *self, PyObject *args)
|
||||||
str[len++] = '\n';
|
str[len++] = '\n';
|
||||||
}
|
}
|
||||||
|
|
||||||
PyObject *pystr = PyBytes_FromStringAndSize(str, len);
|
PyObject *pystr = PyString_FromStringAndSize(str, len);
|
||||||
free(str);
|
free(str);
|
||||||
return pystr;
|
return pystr;
|
||||||
err:
|
err:
|
||||||
|
@ -599,7 +595,7 @@ err:
|
||||||
}
|
}
|
||||||
|
|
||||||
/****
|
/****
|
||||||
* Extract to binary bytes object containing raw little-endian binary data
|
* Extract to binary string containing raw little-endian binary data
|
||||||
*/
|
*/
|
||||||
static PyObject *Rocket_extract_binary(Rocket *self, PyObject *args)
|
static PyObject *Rocket_extract_binary(Rocket *self, PyObject *args)
|
||||||
{
|
{
|
||||||
|
@ -628,7 +624,7 @@ static PyObject *Rocket_extract_binary(Rocket *self, PyObject *args)
|
||||||
|
|
||||||
/* Data in the file is already in the desired little-endian
|
/* Data in the file is already in the desired little-endian
|
||||||
binary format, so just read it directly. */
|
binary format, so just read it directly. */
|
||||||
if (fread(str, self->binary_size, count, self->file) != (size_t)count) {
|
if (fread(str, self->binary_size, count, self->file) != count) {
|
||||||
free(str);
|
free(str);
|
||||||
PyErr_SetFromErrno(PyExc_OSError);
|
PyErr_SetFromErrno(PyExc_OSError);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -752,7 +748,7 @@ static PyMethodDef Rocket_methods[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static PyTypeObject RocketType = {
|
static PyTypeObject RocketType = {
|
||||||
PyVarObject_HEAD_INIT(NULL, 0)
|
PyObject_HEAD_INIT(NULL)
|
||||||
|
|
||||||
.tp_name = "rocket.Rocket",
|
.tp_name = "rocket.Rocket",
|
||||||
.tp_basicsize = sizeof(Rocket),
|
.tp_basicsize = sizeof(Rocket),
|
||||||
|
@ -777,23 +773,17 @@ static PyMethodDef module_methods[] = {
|
||||||
{ NULL },
|
{ NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct PyModuleDef moduledef = {
|
PyMODINIT_FUNC
|
||||||
PyModuleDef_HEAD_INIT,
|
initrocket(void)
|
||||||
.m_name = "rocker",
|
|
||||||
.m_doc = "Rocket data parsing and formatting module",
|
|
||||||
.m_size = -1,
|
|
||||||
.m_methods = module_methods,
|
|
||||||
};
|
|
||||||
|
|
||||||
PyMODINIT_FUNC PyInit_rocket(void)
|
|
||||||
{
|
{
|
||||||
PyObject *module;
|
PyObject *module;
|
||||||
|
|
||||||
RocketType.tp_new = PyType_GenericNew;
|
RocketType.tp_new = PyType_GenericNew;
|
||||||
if (PyType_Ready(&RocketType) < 0)
|
if (PyType_Ready(&RocketType) < 0)
|
||||||
return NULL;
|
return;
|
||||||
|
|
||||||
module = PyModule_Create(&moduledef);
|
module = Py_InitModule3("rocket", module_methods,
|
||||||
|
"Rocket data parsing and formatting module");
|
||||||
Py_INCREF(&RocketType);
|
Py_INCREF(&RocketType);
|
||||||
PyModule_AddObject(module, "Rocket", (PyObject *)&RocketType);
|
PyModule_AddObject(module, "Rocket", (PyObject *)&RocketType);
|
||||||
|
|
||||||
|
@ -802,5 +792,5 @@ PyMODINIT_FUNC PyInit_rocket(void)
|
||||||
PyModule_AddObject(module, "ParseError", ParseError);
|
PyModule_AddObject(module, "ParseError", ParseError);
|
||||||
add_parseerror_codes(module);
|
add_parseerror_codes(module);
|
||||||
|
|
||||||
return module;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,21 +1,26 @@
|
||||||
"""CherryPy-based server for accessing NILM database via HTTP"""
|
"""CherryPy-based server for accessing NILM database via HTTP"""
|
||||||
|
|
||||||
import os
|
# Need absolute_import so that "import nilmdb" won't pull in
|
||||||
import json
|
# nilmdb.py, but will pull the nilmdb module instead.
|
||||||
import socket
|
from __future__ import absolute_import
|
||||||
import traceback
|
|
||||||
|
|
||||||
import psutil
|
|
||||||
import cherrypy
|
|
||||||
|
|
||||||
import nilmdb.server
|
import nilmdb.server
|
||||||
from nilmdb.utils.printf import sprintf
|
from nilmdb.utils.printf import *
|
||||||
from nilmdb.server.errors import NilmDBError
|
from nilmdb.server.errors import NilmDBError
|
||||||
from nilmdb.utils.time import string_to_timestamp
|
from nilmdb.utils.time import string_to_timestamp
|
||||||
|
|
||||||
|
import cherrypy
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import simplejson as json
|
||||||
|
import decorator
|
||||||
|
import psutil
|
||||||
|
import traceback
|
||||||
|
|
||||||
from nilmdb.server.serverutil import (
|
from nilmdb.server.serverutil import (
|
||||||
chunked_response,
|
chunked_response,
|
||||||
response_type,
|
response_type,
|
||||||
|
workaround_cp_bug_1200,
|
||||||
exception_to_httperror,
|
exception_to_httperror,
|
||||||
CORS_allow,
|
CORS_allow,
|
||||||
json_to_request_params,
|
json_to_request_params,
|
||||||
|
@ -28,15 +33,17 @@ from nilmdb.server.serverutil import (
|
||||||
# Add CORS_allow tool
|
# Add CORS_allow tool
|
||||||
cherrypy.tools.CORS_allow = cherrypy.Tool('on_start_resource', CORS_allow)
|
cherrypy.tools.CORS_allow = cherrypy.Tool('on_start_resource', CORS_allow)
|
||||||
|
|
||||||
|
class NilmApp(object):
|
||||||
class NilmApp():
|
|
||||||
def __init__(self, db):
|
def __init__(self, db):
|
||||||
self.db = db
|
self.db = db
|
||||||
|
|
||||||
|
|
||||||
# CherryPy apps
|
# CherryPy apps
|
||||||
class Root(NilmApp):
|
class Root(NilmApp):
|
||||||
"""Root application for NILM database"""
|
"""Root application for NILM database"""
|
||||||
|
|
||||||
|
def __init__(self, db):
|
||||||
|
super(Root, self).__init__(db)
|
||||||
|
|
||||||
# /
|
# /
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def index(self):
|
def index(self):
|
||||||
|
@ -65,14 +72,11 @@ class Root(NilmApp):
|
||||||
path = self.db.get_basepath()
|
path = self.db.get_basepath()
|
||||||
usage = psutil.disk_usage(path)
|
usage = psutil.disk_usage(path)
|
||||||
dbsize = nilmdb.utils.du(path)
|
dbsize = nilmdb.utils.du(path)
|
||||||
return {
|
return { "path": path,
|
||||||
"path": path,
|
|
||||||
"size": dbsize,
|
"size": dbsize,
|
||||||
"other": max(usage.used - dbsize, 0),
|
"other": max(usage.used - dbsize, 0),
|
||||||
"reserved": max(usage.total - usage.used - usage.free, 0),
|
"reserved": max(usage.total - usage.used - usage.free, 0),
|
||||||
"free": usage.free
|
"free": usage.free }
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class Stream(NilmApp):
|
class Stream(NilmApp):
|
||||||
"""Stream-specific operations"""
|
"""Stream-specific operations"""
|
||||||
|
@ -85,8 +89,7 @@ class Stream(NilmApp):
|
||||||
start = string_to_timestamp(start_param)
|
start = string_to_timestamp(start_param)
|
||||||
except Exception:
|
except Exception:
|
||||||
raise cherrypy.HTTPError("400 Bad Request", sprintf(
|
raise cherrypy.HTTPError("400 Bad Request", sprintf(
|
||||||
"invalid start (%s): must be a numeric timestamp",
|
"invalid start (%s): must be a numeric timestamp", start_param))
|
||||||
start_param))
|
|
||||||
try:
|
try:
|
||||||
if end_param is not None:
|
if end_param is not None:
|
||||||
end = string_to_timestamp(end_param)
|
end = string_to_timestamp(end_param)
|
||||||
|
@ -106,7 +109,7 @@ class Stream(NilmApp):
|
||||||
# /stream/list?path=/newton/prep&extended=1
|
# /stream/list?path=/newton/prep&extended=1
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
def list(self, path=None, layout=None, extended=None):
|
def list(self, path = None, layout = None, extended = None):
|
||||||
"""List all streams in the database. With optional path or
|
"""List all streams in the database. With optional path or
|
||||||
layout parameter, just list streams that match the given path
|
layout parameter, just list streams that match the given path
|
||||||
or layout.
|
or layout.
|
||||||
|
@ -125,7 +128,7 @@ class Stream(NilmApp):
|
||||||
@cherrypy.tools.json_in()
|
@cherrypy.tools.json_in()
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError, ValueError)
|
@exception_to_httperror(NilmDBError, ValueError)
|
||||||
@cherrypy.tools.CORS_allow(methods=["POST"])
|
@cherrypy.tools.CORS_allow(methods = ["POST"])
|
||||||
def create(self, path, layout):
|
def create(self, path, layout):
|
||||||
"""Create a new stream in the database. Provide path
|
"""Create a new stream in the database. Provide path
|
||||||
and one of the nilmdb.layout.layouts keys.
|
and one of the nilmdb.layout.layouts keys.
|
||||||
|
@ -137,7 +140,7 @@ class Stream(NilmApp):
|
||||||
@cherrypy.tools.json_in()
|
@cherrypy.tools.json_in()
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError)
|
@exception_to_httperror(NilmDBError)
|
||||||
@cherrypy.tools.CORS_allow(methods=["POST"])
|
@cherrypy.tools.CORS_allow(methods = ["POST"])
|
||||||
def destroy(self, path):
|
def destroy(self, path):
|
||||||
"""Delete a stream. Fails if any data is still present."""
|
"""Delete a stream. Fails if any data is still present."""
|
||||||
return self.db.stream_destroy(path)
|
return self.db.stream_destroy(path)
|
||||||
|
@ -147,7 +150,7 @@ class Stream(NilmApp):
|
||||||
@cherrypy.tools.json_in()
|
@cherrypy.tools.json_in()
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError, ValueError)
|
@exception_to_httperror(NilmDBError, ValueError)
|
||||||
@cherrypy.tools.CORS_allow(methods=["POST"])
|
@cherrypy.tools.CORS_allow(methods = ["POST"])
|
||||||
def rename(self, oldpath, newpath):
|
def rename(self, oldpath, newpath):
|
||||||
"""Rename a stream."""
|
"""Rename a stream."""
|
||||||
return self.db.stream_rename(oldpath, newpath)
|
return self.db.stream_rename(oldpath, newpath)
|
||||||
|
@ -163,11 +166,11 @@ class Stream(NilmApp):
|
||||||
try:
|
try:
|
||||||
data = self.db.stream_get_metadata(path)
|
data = self.db.stream_get_metadata(path)
|
||||||
except nilmdb.server.nilmdb.StreamError as e:
|
except nilmdb.server.nilmdb.StreamError as e:
|
||||||
raise cherrypy.HTTPError("404 Not Found", str(e))
|
raise cherrypy.HTTPError("404 Not Found", e.message)
|
||||||
if key is None: # If no keys specified, return them all
|
if key is None: # If no keys specified, return them all
|
||||||
key = list(data.keys())
|
key = data.keys()
|
||||||
elif not isinstance(key, list):
|
elif not isinstance(key, list):
|
||||||
key = [key]
|
key = [ key ]
|
||||||
result = {}
|
result = {}
|
||||||
for k in key:
|
for k in key:
|
||||||
if k in data:
|
if k in data:
|
||||||
|
@ -182,9 +185,11 @@ class Stream(NilmApp):
|
||||||
try:
|
try:
|
||||||
data = dict(json.loads(data))
|
data = dict(json.loads(data))
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
raise NilmDBError("can't parse 'data' parameter: " + str(e))
|
raise NilmDBError("can't parse 'data' parameter: " + e.message)
|
||||||
for key in data:
|
for key in data:
|
||||||
if not isinstance(data[key], (str, float, int)):
|
if not (isinstance(data[key], basestring) or
|
||||||
|
isinstance(data[key], float) or
|
||||||
|
isinstance(data[key], int)):
|
||||||
raise NilmDBError("metadata values must be a string or number")
|
raise NilmDBError("metadata values must be a string or number")
|
||||||
function(path, data)
|
function(path, data)
|
||||||
|
|
||||||
|
@ -193,7 +198,7 @@ class Stream(NilmApp):
|
||||||
@cherrypy.tools.json_in()
|
@cherrypy.tools.json_in()
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError, LookupError)
|
@exception_to_httperror(NilmDBError, LookupError)
|
||||||
@cherrypy.tools.CORS_allow(methods=["POST"])
|
@cherrypy.tools.CORS_allow(methods = ["POST"])
|
||||||
def set_metadata(self, path, data):
|
def set_metadata(self, path, data):
|
||||||
"""Set metadata for the named stream, replacing any existing
|
"""Set metadata for the named stream, replacing any existing
|
||||||
metadata. Data can be json-encoded or a plain dictionary."""
|
metadata. Data can be json-encoded or a plain dictionary."""
|
||||||
|
@ -204,7 +209,7 @@ class Stream(NilmApp):
|
||||||
@cherrypy.tools.json_in()
|
@cherrypy.tools.json_in()
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError, LookupError, ValueError)
|
@exception_to_httperror(NilmDBError, LookupError, ValueError)
|
||||||
@cherrypy.tools.CORS_allow(methods=["POST"])
|
@cherrypy.tools.CORS_allow(methods = ["POST"])
|
||||||
def update_metadata(self, path, data):
|
def update_metadata(self, path, data):
|
||||||
"""Set metadata for the named stream, replacing any existing
|
"""Set metadata for the named stream, replacing any existing
|
||||||
metadata. Data can be json-encoded or a plain dictionary."""
|
metadata. Data can be json-encoded or a plain dictionary."""
|
||||||
|
@ -214,8 +219,8 @@ class Stream(NilmApp):
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_out()
|
@cherrypy.tools.json_out()
|
||||||
@exception_to_httperror(NilmDBError, ValueError)
|
@exception_to_httperror(NilmDBError, ValueError)
|
||||||
@cherrypy.tools.CORS_allow(methods=["PUT"])
|
@cherrypy.tools.CORS_allow(methods = ["PUT"])
|
||||||
def insert(self, path, start, end, binary=False):
|
def insert(self, path, start, end, binary = False):
|
||||||
"""
|
"""
|
||||||
Insert new data into the database. Provide textual data
|
Insert new data into the database. Provide textual data
|
||||||
(matching the path's layout) as a HTTP PUT.
|
(matching the path's layout) as a HTTP PUT.
|
||||||
|
@ -241,11 +246,8 @@ class Stream(NilmApp):
|
||||||
"application/octet-stream for "
|
"application/octet-stream for "
|
||||||
"binary data, not " + content_type)
|
"binary data, not " + content_type)
|
||||||
|
|
||||||
# Note that non-binary data is *not* decoded from bytes to string,
|
|
||||||
# but rather passed directly to stream_insert.
|
|
||||||
|
|
||||||
# Check path and get layout
|
# Check path and get layout
|
||||||
if len(self.db.stream_list(path=path)) != 1:
|
if len(self.db.stream_list(path = path)) != 1:
|
||||||
raise cherrypy.HTTPError("404", "No such stream: " + path)
|
raise cherrypy.HTTPError("404", "No such stream: " + path)
|
||||||
|
|
||||||
# Check limits
|
# Check limits
|
||||||
|
@ -262,10 +264,10 @@ class Stream(NilmApp):
|
||||||
# /stream/remove?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
# /stream/remove?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.tools.json_in()
|
@cherrypy.tools.json_in()
|
||||||
@cherrypy.tools.CORS_allow(methods=["POST"])
|
@cherrypy.tools.CORS_allow(methods = ["POST"])
|
||||||
@chunked_response
|
@chunked_response
|
||||||
@response_type("application/x-json-stream")
|
@response_type("application/x-json-stream")
|
||||||
def remove(self, path, start=None, end=None):
|
def remove(self, path, start = None, end = None):
|
||||||
"""
|
"""
|
||||||
Remove data from the backend database. Removes all data in
|
Remove data from the backend database. Removes all data in
|
||||||
the interval [start, end).
|
the interval [start, end).
|
||||||
|
@ -277,15 +279,15 @@ class Stream(NilmApp):
|
||||||
"""
|
"""
|
||||||
(start, end) = self._get_times(start, end)
|
(start, end) = self._get_times(start, end)
|
||||||
|
|
||||||
if len(self.db.stream_list(path=path)) != 1:
|
if len(self.db.stream_list(path = path)) != 1:
|
||||||
raise cherrypy.HTTPError("404", "No such stream: " + path)
|
raise cherrypy.HTTPError("404", "No such stream: " + path)
|
||||||
|
|
||||||
|
@workaround_cp_bug_1200
|
||||||
def content(start, end):
|
def content(start, end):
|
||||||
# Note: disable chunked responses to see tracebacks from here.
|
# Note: disable chunked responses to see tracebacks from here.
|
||||||
while True:
|
while True:
|
||||||
(removed, restart) = self.db.stream_remove(path, start, end)
|
(removed, restart) = self.db.stream_remove(path, start, end)
|
||||||
response = json.dumps(removed) + "\r\n"
|
yield json.dumps(removed) + "\r\n"
|
||||||
yield response.encode('utf-8')
|
|
||||||
if restart is None:
|
if restart is None:
|
||||||
break
|
break
|
||||||
start = restart
|
start = restart
|
||||||
|
@ -297,7 +299,7 @@ class Stream(NilmApp):
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@chunked_response
|
@chunked_response
|
||||||
@response_type("application/x-json-stream")
|
@response_type("application/x-json-stream")
|
||||||
def intervals(self, path, start=None, end=None, diffpath=None):
|
def intervals(self, path, start = None, end = None, diffpath = None):
|
||||||
"""
|
"""
|
||||||
Get intervals from backend database. Streams the resulting
|
Get intervals from backend database. Streams the resulting
|
||||||
intervals as JSON strings separated by CR LF pairs. This may
|
intervals as JSON strings separated by CR LF pairs. This may
|
||||||
|
@ -314,19 +316,20 @@ class Stream(NilmApp):
|
||||||
"""
|
"""
|
||||||
(start, end) = self._get_times(start, end)
|
(start, end) = self._get_times(start, end)
|
||||||
|
|
||||||
if len(self.db.stream_list(path=path)) != 1:
|
if len(self.db.stream_list(path = path)) != 1:
|
||||||
raise cherrypy.HTTPError("404", "No such stream: " + path)
|
raise cherrypy.HTTPError("404", "No such stream: " + path)
|
||||||
|
|
||||||
if diffpath and len(self.db.stream_list(path=diffpath)) != 1:
|
if diffpath and len(self.db.stream_list(path = diffpath)) != 1:
|
||||||
raise cherrypy.HTTPError("404", "No such stream: " + diffpath)
|
raise cherrypy.HTTPError("404", "No such stream: " + diffpath)
|
||||||
|
|
||||||
|
@workaround_cp_bug_1200
|
||||||
def content(start, end):
|
def content(start, end):
|
||||||
# Note: disable chunked responses to see tracebacks from here.
|
# Note: disable chunked responses to see tracebacks from here.
|
||||||
while True:
|
while True:
|
||||||
(ints, restart) = self.db.stream_intervals(path, start, end,
|
(ints, restart) = self.db.stream_intervals(path, start, end,
|
||||||
diffpath)
|
diffpath)
|
||||||
response = ''.join([json.dumps(i) + "\r\n" for i in ints])
|
response = ''.join([ json.dumps(i) + "\r\n" for i in ints ])
|
||||||
yield response.encode('utf-8')
|
yield response
|
||||||
if restart is None:
|
if restart is None:
|
||||||
break
|
break
|
||||||
start = restart
|
start = restart
|
||||||
|
@ -335,8 +338,8 @@ class Stream(NilmApp):
|
||||||
# /stream/extract?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
# /stream/extract?path=/newton/prep&start=1234567890.0&end=1234567899.0
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@chunked_response
|
@chunked_response
|
||||||
def extract(self, path, start=None, end=None,
|
def extract(self, path, start = None, end = None,
|
||||||
count=False, markup=False, binary=False):
|
count = False, markup = False, binary = False):
|
||||||
"""
|
"""
|
||||||
Extract data from backend database. Streams the resulting
|
Extract data from backend database. Streams the resulting
|
||||||
entries as ASCII text lines separated by newlines. This may
|
entries as ASCII text lines separated by newlines. This may
|
||||||
|
@ -360,7 +363,7 @@ class Stream(NilmApp):
|
||||||
(start, end) = self._get_times(start, end)
|
(start, end) = self._get_times(start, end)
|
||||||
|
|
||||||
# Check path and get layout
|
# Check path and get layout
|
||||||
if len(self.db.stream_list(path=path)) != 1:
|
if len(self.db.stream_list(path = path)) != 1:
|
||||||
raise cherrypy.HTTPError("404", "No such stream: " + path)
|
raise cherrypy.HTTPError("404", "No such stream: " + path)
|
||||||
|
|
||||||
if binary:
|
if binary:
|
||||||
|
@ -372,18 +375,19 @@ class Stream(NilmApp):
|
||||||
content_type = "text/plain"
|
content_type = "text/plain"
|
||||||
cherrypy.response.headers['Content-Type'] = content_type
|
cherrypy.response.headers['Content-Type'] = content_type
|
||||||
|
|
||||||
|
@workaround_cp_bug_1200
|
||||||
def content(start, end):
|
def content(start, end):
|
||||||
# Note: disable chunked responses to see tracebacks from here.
|
# Note: disable chunked responses to see tracebacks from here.
|
||||||
if count:
|
if count:
|
||||||
matched = self.db.stream_extract(path, start, end,
|
matched = self.db.stream_extract(path, start, end,
|
||||||
count=True)
|
count = True)
|
||||||
yield sprintf(b"%d\n", matched)
|
yield sprintf("%d\n", matched)
|
||||||
return
|
return
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
(data, restart) = self.db.stream_extract(
|
(data, restart) = self.db.stream_extract(
|
||||||
path, start, end, count=False,
|
path, start, end, count = False,
|
||||||
markup=markup, binary=binary)
|
markup = markup, binary = binary)
|
||||||
yield data
|
yield data
|
||||||
|
|
||||||
if restart is None:
|
if restart is None:
|
||||||
|
@ -391,31 +395,29 @@ class Stream(NilmApp):
|
||||||
start = restart
|
start = restart
|
||||||
return content(start, end)
|
return content(start, end)
|
||||||
|
|
||||||
|
class Exiter(object):
|
||||||
class Exiter():
|
|
||||||
"""App that exits the server, for testing"""
|
"""App that exits the server, for testing"""
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def index(self):
|
def index(self):
|
||||||
cherrypy.response.headers['Content-Type'] = 'text/plain'
|
cherrypy.response.headers['Content-Type'] = 'text/plain'
|
||||||
|
|
||||||
def content():
|
def content():
|
||||||
yield b'Exiting by request'
|
yield 'Exiting by request'
|
||||||
raise SystemExit
|
raise SystemExit
|
||||||
|
|
||||||
return content()
|
return content()
|
||||||
index._cp_config = {'response.stream': True}
|
index._cp_config = { 'response.stream': True }
|
||||||
|
|
||||||
|
class Server(object):
|
||||||
class Server():
|
def __init__(self, db, host = '127.0.0.1', port = 8080,
|
||||||
def __init__(self, db, host='127.0.0.1', port=8080,
|
stoppable = False, # whether /exit URL exists
|
||||||
stoppable=False, # whether /exit URL exists
|
embedded = True, # hide diagnostics and output, etc
|
||||||
fast_shutdown=False, # don't wait for clients to disconn.
|
fast_shutdown = False, # don't wait for clients to disconn.
|
||||||
force_traceback=False, # include traceback in all errors
|
force_traceback = False, # include traceback in all errors
|
||||||
basepath='', # base URL path for cherrypy.tree
|
basepath = '', # base URL path for cherrypy.tree
|
||||||
):
|
):
|
||||||
# Save server version, just for verification during tests
|
# Save server version, just for verification during tests
|
||||||
self.version = nilmdb.__version__
|
self.version = nilmdb.__version__
|
||||||
|
|
||||||
|
self.embedded = embedded
|
||||||
self.db = db
|
self.db = db
|
||||||
if not getattr(db, "_thread_safe", None):
|
if not getattr(db, "_thread_safe", None):
|
||||||
raise KeyError("Database object " + str(db) + " doesn't claim "
|
raise KeyError("Database object " + str(db) + " doesn't claim "
|
||||||
|
@ -425,12 +427,13 @@ class Server():
|
||||||
|
|
||||||
# Build up global server configuration
|
# Build up global server configuration
|
||||||
cherrypy.config.update({
|
cherrypy.config.update({
|
||||||
'environment': 'embedded',
|
|
||||||
'server.socket_host': host,
|
'server.socket_host': host,
|
||||||
'server.socket_port': port,
|
'server.socket_port': port,
|
||||||
'engine.autoreload.on': False,
|
'engine.autoreload.on': False,
|
||||||
'server.max_request_body_size': 8*1024*1024,
|
'server.max_request_body_size': 8*1024*1024,
|
||||||
})
|
})
|
||||||
|
if self.embedded:
|
||||||
|
cherrypy.config.update({ 'environment': 'embedded' })
|
||||||
|
|
||||||
# Build up application specific configuration
|
# Build up application specific configuration
|
||||||
app_config = {}
|
app_config = {}
|
||||||
|
@ -439,23 +442,23 @@ class Server():
|
||||||
})
|
})
|
||||||
|
|
||||||
# Some default headers to just help identify that things are working
|
# Some default headers to just help identify that things are working
|
||||||
app_config.update({'response.headers.X-Jim-Is-Awesome': 'yeah'})
|
app_config.update({ 'response.headers.X-Jim-Is-Awesome': 'yeah' })
|
||||||
|
|
||||||
# Set up Cross-Origin Resource Sharing (CORS) handler so we
|
# Set up Cross-Origin Resource Sharing (CORS) handler so we
|
||||||
# can correctly respond to browsers' CORS preflight requests.
|
# can correctly respond to browsers' CORS preflight requests.
|
||||||
# This also limits verbs to GET and HEAD by default.
|
# This also limits verbs to GET and HEAD by default.
|
||||||
app_config.update({'tools.CORS_allow.on': True,
|
app_config.update({ 'tools.CORS_allow.on': True,
|
||||||
'tools.CORS_allow.methods': ['GET', 'HEAD']})
|
'tools.CORS_allow.methods': ['GET', 'HEAD'] })
|
||||||
|
|
||||||
# Configure the 'json_in' tool to also allow other content-types
|
# Configure the 'json_in' tool to also allow other content-types
|
||||||
# (like x-www-form-urlencoded), and to treat JSON as a dict that
|
# (like x-www-form-urlencoded), and to treat JSON as a dict that
|
||||||
# fills requests.param.
|
# fills requests.param.
|
||||||
app_config.update({'tools.json_in.force': False,
|
app_config.update({ 'tools.json_in.force': False,
|
||||||
'tools.json_in.processor': json_to_request_params})
|
'tools.json_in.processor': json_to_request_params })
|
||||||
|
|
||||||
# Send tracebacks in error responses. They're hidden by the
|
# Send tracebacks in error responses. They're hidden by the
|
||||||
# error_page function for client errors (code 400-499).
|
# error_page function for client errors (code 400-499).
|
||||||
app_config.update({'request.show_tracebacks': True})
|
app_config.update({ 'request.show_tracebacks' : True })
|
||||||
self.force_traceback = force_traceback
|
self.force_traceback = force_traceback
|
||||||
|
|
||||||
# Patch CherryPy error handler to never pad out error messages.
|
# Patch CherryPy error handler to never pad out error messages.
|
||||||
|
@ -469,12 +472,13 @@ class Server():
|
||||||
if stoppable:
|
if stoppable:
|
||||||
root.exit = Exiter()
|
root.exit = Exiter()
|
||||||
cherrypy.tree.apps = {}
|
cherrypy.tree.apps = {}
|
||||||
cherrypy.tree.mount(root, basepath, config={"/": app_config})
|
cherrypy.tree.mount(root, basepath, config = { "/" : app_config })
|
||||||
|
|
||||||
# Shutdowns normally wait for clients to disconnect. To speed
|
# Shutdowns normally wait for clients to disconnect. To speed
|
||||||
# up tests, set fast_shutdown = True
|
# up tests, set fast_shutdown = True
|
||||||
if fast_shutdown:
|
if fast_shutdown:
|
||||||
cherrypy.server.shutdown_timeout = 0
|
# Setting timeout to 0 triggers os._exit(70) at shutdown, grr...
|
||||||
|
cherrypy.server.shutdown_timeout = 0.01
|
||||||
else:
|
else:
|
||||||
cherrypy.server.shutdown_timeout = 5
|
cherrypy.server.shutdown_timeout = 5
|
||||||
|
|
||||||
|
@ -486,21 +490,18 @@ class Server():
|
||||||
return json_error_page(status, message, traceback, version,
|
return json_error_page(status, message, traceback, version,
|
||||||
self.force_traceback)
|
self.force_traceback)
|
||||||
|
|
||||||
def start(self, blocking=False, event=None):
|
def start(self, blocking = False, event = None):
|
||||||
cherrypy_start(blocking, event)
|
cherrypy_start(blocking, event, self.embedded)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
cherrypy_stop()
|
cherrypy_stop()
|
||||||
|
|
||||||
|
|
||||||
# Use a single global nilmdb.server.NilmDB and nilmdb.server.Server
|
# Use a single global nilmdb.server.NilmDB and nilmdb.server.Server
|
||||||
# instance since the database can only be opened once. For this to
|
# instance since the database can only be opened once. For this to
|
||||||
# work, the web server must use only a single process and single
|
# work, the web server must use only a single process and single
|
||||||
# Python interpreter. Multiple threads are OK.
|
# Python interpreter. Multiple threads are OK.
|
||||||
_wsgi_server = None
|
_wsgi_server = None
|
||||||
|
def wsgi_application(dbpath, basepath): # pragma: no cover
|
||||||
|
|
||||||
def wsgi_application(dbpath, basepath):
|
|
||||||
"""Return a WSGI application object with a database at the
|
"""Return a WSGI application object with a database at the
|
||||||
specified path.
|
specified path.
|
||||||
|
|
||||||
|
@ -515,16 +516,17 @@ def wsgi_application(dbpath, basepath):
|
||||||
if _wsgi_server is None:
|
if _wsgi_server is None:
|
||||||
# Try to start the server
|
# Try to start the server
|
||||||
try:
|
try:
|
||||||
db = nilmdb.utils.serializer_proxy(
|
db = nilmdb.utils.serializer_proxy(nilmdb.server.NilmDB)(dbpath)
|
||||||
nilmdb.server.NilmDB)(dbpath)
|
|
||||||
_wsgi_server = nilmdb.server.Server(
|
_wsgi_server = nilmdb.server.Server(
|
||||||
db, basepath=basepath.rstrip('/'))
|
db, embedded = True,
|
||||||
|
basepath = basepath.rstrip('/'))
|
||||||
except Exception:
|
except Exception:
|
||||||
# Build an error message on failure
|
# Build an error message on failure
|
||||||
import pprint
|
import pprint
|
||||||
err = sprintf("Initializing database at path '%s' failed:\n\n",
|
err = sprintf("Initializing database at path '%s' failed:\n\n",
|
||||||
dbpath)
|
dbpath)
|
||||||
err += traceback.format_exc()
|
err += traceback.format_exc()
|
||||||
|
try:
|
||||||
import pwd
|
import pwd
|
||||||
import grp
|
import grp
|
||||||
err += sprintf("\nRunning as: uid=%d (%s), gid=%d (%s) "
|
err += sprintf("\nRunning as: uid=%d (%s), gid=%d (%s) "
|
||||||
|
@ -532,14 +534,15 @@ def wsgi_application(dbpath, basepath):
|
||||||
os.getuid(), pwd.getpwuid(os.getuid())[0],
|
os.getuid(), pwd.getpwuid(os.getuid())[0],
|
||||||
os.getgid(), grp.getgrgid(os.getgid())[0],
|
os.getgid(), grp.getgrgid(os.getgid())[0],
|
||||||
socket.gethostname(), os.getpid())
|
socket.gethostname(), os.getpid())
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
err += sprintf("\nEnvironment:\n%s\n", pprint.pformat(environ))
|
err += sprintf("\nEnvironment:\n%s\n", pprint.pformat(environ))
|
||||||
if _wsgi_server is None:
|
if _wsgi_server is None:
|
||||||
# Serve up the error with our own mini WSGI app.
|
# Serve up the error with our own mini WSGI app.
|
||||||
err_b = err.encode('utf-8')
|
headers = [ ('Content-type', 'text/plain'),
|
||||||
headers = [('Content-type', 'text/plain; charset=utf-8'),
|
('Content-length', str(len(err))) ]
|
||||||
('Content-length', str(len(err_b)))]
|
|
||||||
start_response("500 Internal Server Error", headers)
|
start_response("500 Internal Server Error", headers)
|
||||||
return [err_b]
|
return [err]
|
||||||
|
|
||||||
# Call the normal application
|
# Call the normal application
|
||||||
return _wsgi_server.wsgi_application(environ, start_response)
|
return _wsgi_server.wsgi_application(environ, start_response)
|
||||||
|
|
|
@ -1,15 +1,11 @@
|
||||||
"""Miscellaneous decorators and other helpers for running a CherryPy
|
"""Miscellaneous decorators and other helpers for running a CherryPy
|
||||||
server"""
|
server"""
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import json
|
|
||||||
import decorator
|
|
||||||
import functools
|
|
||||||
import threading
|
|
||||||
|
|
||||||
import cherrypy
|
import cherrypy
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import decorator
|
||||||
|
import simplejson as json
|
||||||
|
|
||||||
# Helper to parse parameters into booleans
|
# Helper to parse parameters into booleans
|
||||||
def bool_param(s):
|
def bool_param(s):
|
||||||
|
@ -17,25 +13,23 @@ def bool_param(s):
|
||||||
supporting a few different types for 's'."""
|
supporting a few different types for 's'."""
|
||||||
try:
|
try:
|
||||||
ss = s.lower()
|
ss = s.lower()
|
||||||
if ss in ["0", "false", "f", "no", "n"]:
|
if ss in [ "0", "false", "f", "no", "n" ]:
|
||||||
return False
|
return False
|
||||||
if ss in ["1", "true", "t", "yes", "y"]:
|
if ss in [ "1", "true", "t", "yes", "y" ]:
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
return bool(s)
|
return bool(s)
|
||||||
raise cherrypy.HTTPError("400 Bad Request",
|
raise cherrypy.HTTPError("400 Bad Request",
|
||||||
"can't parse parameter: " + ss)
|
"can't parse parameter: " + ss)
|
||||||
|
|
||||||
|
|
||||||
# Decorators
|
# Decorators
|
||||||
def chunked_response(func):
|
def chunked_response(func):
|
||||||
"""Decorator to enable chunked responses."""
|
"""Decorator to enable chunked responses."""
|
||||||
# Set this to False to get better tracebacks from some requests
|
# Set this to False to get better tracebacks from some requests
|
||||||
# (/stream/extract, /stream/intervals).
|
# (/stream/extract, /stream/intervals).
|
||||||
func._cp_config = {'response.stream': True}
|
func._cp_config = { 'response.stream': True }
|
||||||
return func
|
return func
|
||||||
|
|
||||||
|
|
||||||
def response_type(content_type):
|
def response_type(content_type):
|
||||||
"""Return a decorator-generating function that sets the
|
"""Return a decorator-generating function that sets the
|
||||||
response type to the specified string."""
|
response type to the specified string."""
|
||||||
|
@ -44,6 +38,27 @@ def response_type(content_type):
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
return decorator.decorator(wrapper)
|
return decorator.decorator(wrapper)
|
||||||
|
|
||||||
|
@decorator.decorator
|
||||||
|
def workaround_cp_bug_1200(func, *args, **kwargs): # pragma: no cover
|
||||||
|
"""Decorator to work around CherryPy bug #1200 in a response
|
||||||
|
generator.
|
||||||
|
|
||||||
|
Even if chunked responses are disabled, LookupError or
|
||||||
|
UnicodeError exceptions may still be swallowed by CherryPy due to
|
||||||
|
bug #1200. This throws them as generic Exceptions instead so that
|
||||||
|
they make it through.
|
||||||
|
"""
|
||||||
|
exc_info = None
|
||||||
|
try:
|
||||||
|
for val in func(*args, **kwargs):
|
||||||
|
yield val
|
||||||
|
except (LookupError, UnicodeError):
|
||||||
|
# Re-raise it, but maintain the original traceback
|
||||||
|
exc_info = sys.exc_info()
|
||||||
|
new_exc = Exception(exc_info[0].__name__ + ": " + str(exc_info[1]))
|
||||||
|
raise new_exc, None, exc_info[2]
|
||||||
|
finally:
|
||||||
|
del exc_info
|
||||||
|
|
||||||
def exception_to_httperror(*expected):
|
def exception_to_httperror(*expected):
|
||||||
"""Return a decorator-generating function that catches expected
|
"""Return a decorator-generating function that catches expected
|
||||||
|
@ -61,7 +76,7 @@ def exception_to_httperror(*expected):
|
||||||
# Re-raise it, but maintain the original traceback
|
# Re-raise it, but maintain the original traceback
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
new_exc = cherrypy.HTTPError("400 Bad Request", str(exc_info[1]))
|
new_exc = cherrypy.HTTPError("400 Bad Request", str(exc_info[1]))
|
||||||
raise new_exc.with_traceback(exc_info[2])
|
raise new_exc, None, exc_info[2]
|
||||||
finally:
|
finally:
|
||||||
del exc_info
|
del exc_info
|
||||||
# We need to preserve the function's argspecs for CherryPy to
|
# We need to preserve the function's argspecs for CherryPy to
|
||||||
|
@ -69,8 +84,8 @@ def exception_to_httperror(*expected):
|
||||||
# care of that.
|
# care of that.
|
||||||
return decorator.decorator(wrapper)
|
return decorator.decorator(wrapper)
|
||||||
|
|
||||||
|
|
||||||
# Custom CherryPy tools
|
# Custom CherryPy tools
|
||||||
|
|
||||||
def CORS_allow(methods):
|
def CORS_allow(methods):
|
||||||
"""This does several things:
|
"""This does several things:
|
||||||
|
|
||||||
|
@ -86,12 +101,12 @@ def CORS_allow(methods):
|
||||||
request = cherrypy.request.headers
|
request = cherrypy.request.headers
|
||||||
response = cherrypy.response.headers
|
response = cherrypy.response.headers
|
||||||
|
|
||||||
if not isinstance(methods, (tuple, list)):
|
if not isinstance(methods, (tuple, list)): # pragma: no cover
|
||||||
methods = [methods]
|
methods = [ methods ]
|
||||||
methods = [m.upper() for m in methods if m]
|
methods = [ m.upper() for m in methods if m ]
|
||||||
if not methods:
|
if not methods: # pragma: no cover
|
||||||
methods = ['GET', 'HEAD']
|
methods = [ 'GET', 'HEAD' ]
|
||||||
elif 'GET' in methods and 'HEAD' not in methods:
|
elif 'GET' in methods and 'HEAD' not in methods: # pragma: no cover
|
||||||
methods.append('HEAD')
|
methods.append('HEAD')
|
||||||
response['Allow'] = ', '.join(methods)
|
response['Allow'] = ', '.join(methods)
|
||||||
|
|
||||||
|
@ -108,7 +123,7 @@ def CORS_allow(methods):
|
||||||
response['Access-Control-Allow-Methods'] = ', '.join(methods)
|
response['Access-Control-Allow-Methods'] = ', '.join(methods)
|
||||||
# Try to stop further processing and return a 200 OK
|
# Try to stop further processing and return a 200 OK
|
||||||
cherrypy.response.status = "200 OK"
|
cherrypy.response.status = "200 OK"
|
||||||
cherrypy.response.body = b""
|
cherrypy.response.body = ""
|
||||||
cherrypy.request.handler = lambda: ""
|
cherrypy.request.handler = lambda: ""
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -125,83 +140,58 @@ def json_to_request_params(body):
|
||||||
raise cherrypy.HTTPError(415)
|
raise cherrypy.HTTPError(415)
|
||||||
cherrypy.request.params.update(cherrypy.request.json)
|
cherrypy.request.params.update(cherrypy.request.json)
|
||||||
|
|
||||||
|
|
||||||
# Used as an "error_page.default" handler
|
# Used as an "error_page.default" handler
|
||||||
def json_error_page(status, message, traceback, version,
|
def json_error_page(status, message, traceback, version,
|
||||||
force_traceback=False):
|
force_traceback = False):
|
||||||
"""Return a custom error page in JSON so the client can parse it"""
|
"""Return a custom error page in JSON so the client can parse it"""
|
||||||
errordata = {"status": status,
|
errordata = { "status" : status,
|
||||||
"message": message,
|
"message" : message,
|
||||||
"version": version,
|
"traceback" : traceback }
|
||||||
"traceback": traceback}
|
|
||||||
# Don't send a traceback if the error was 400-499 (client's fault)
|
# Don't send a traceback if the error was 400-499 (client's fault)
|
||||||
|
try:
|
||||||
code = int(status.split()[0])
|
code = int(status.split()[0])
|
||||||
if not force_traceback:
|
if not force_traceback:
|
||||||
if 400 <= code <= 499:
|
if code >= 400 and code <= 499:
|
||||||
errordata["traceback"] = ""
|
errordata["traceback"] = ""
|
||||||
|
except Exception: # pragma: no cover
|
||||||
|
pass
|
||||||
# Override the response type, which was previously set to text/html
|
# Override the response type, which was previously set to text/html
|
||||||
cherrypy.serving.response.headers['Content-Type'] = (
|
cherrypy.serving.response.headers['Content-Type'] = (
|
||||||
"application/json;charset=utf-8")
|
"application/json;charset=utf-8" )
|
||||||
# Undo the HTML escaping that cherrypy's get_error_page function applies
|
# Undo the HTML escaping that cherrypy's get_error_page function applies
|
||||||
# (cherrypy issue 1135)
|
# (cherrypy issue 1135)
|
||||||
for k, v in errordata.items():
|
for k, v in errordata.iteritems():
|
||||||
v = v.replace("<", "<")
|
v = v.replace("<","<")
|
||||||
v = v.replace(">", ">")
|
v = v.replace(">",">")
|
||||||
v = v.replace("&", "&")
|
v = v.replace("&","&")
|
||||||
errordata[k] = v
|
errordata[k] = v
|
||||||
return json.dumps(errordata, separators=(',', ':'))
|
return json.dumps(errordata, separators=(',',':'))
|
||||||
|
|
||||||
|
|
||||||
class CherryPyExit(SystemExit):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def cherrypy_patch_exit():
|
|
||||||
# Cherrypy stupidly calls os._exit(70) when it can't bind the port
|
|
||||||
# and exits. Instead of that, raise a CherryPyExit (derived from
|
|
||||||
# SystemExit). This exception may not make it back up to the caller
|
|
||||||
# due to internal thread use in the CherryPy engine, but there should
|
|
||||||
# be at least some indication that it happened.
|
|
||||||
bus = cherrypy.process.wspbus.bus
|
|
||||||
if "_patched_exit" in bus.__dict__:
|
|
||||||
return
|
|
||||||
bus._patched_exit = True
|
|
||||||
|
|
||||||
def patched_exit(orig):
|
|
||||||
real_exit = os._exit
|
|
||||||
|
|
||||||
def fake_exit(code):
|
|
||||||
raise CherryPyExit(code)
|
|
||||||
os._exit = fake_exit
|
|
||||||
try:
|
|
||||||
orig()
|
|
||||||
finally:
|
|
||||||
os._exit = real_exit
|
|
||||||
bus.exit = functools.partial(patched_exit, bus.exit)
|
|
||||||
|
|
||||||
# A behavior change in Python 3.8 means that some thread exceptions,
|
|
||||||
# derived from SystemExit, now print tracebacks where they didn't
|
|
||||||
# used to: https://bugs.python.org/issue1230540
|
|
||||||
# Install a thread exception hook that ignores CherryPyExit;
|
|
||||||
# to make this match the behavior where we didn't set
|
|
||||||
# threading.excepthook, we also need to ignore SystemExit.
|
|
||||||
def hook(args):
|
|
||||||
if args.exc_type == CherryPyExit or args.exc_type == SystemExit:
|
|
||||||
return
|
|
||||||
sys.excepthook(args.exc_type, args.exc_value,
|
|
||||||
args.exc_traceback) # pragma: no cover
|
|
||||||
threading.excepthook = hook
|
|
||||||
|
|
||||||
|
|
||||||
# Start/stop CherryPy standalone server
|
# Start/stop CherryPy standalone server
|
||||||
def cherrypy_start(blocking=False, event=False):
|
def cherrypy_start(blocking = False, event = False, embedded = False):
|
||||||
"""Start the CherryPy server, handling errors and signals
|
"""Start the CherryPy server, handling errors and signals
|
||||||
somewhat gracefully."""
|
somewhat gracefully."""
|
||||||
|
|
||||||
cherrypy_patch_exit()
|
if not embedded: # pragma: no cover
|
||||||
|
# Handle signals nicely
|
||||||
|
if hasattr(cherrypy.engine, "signal_handler"):
|
||||||
|
cherrypy.engine.signal_handler.subscribe()
|
||||||
|
if hasattr(cherrypy.engine, "console_control_handler"):
|
||||||
|
cherrypy.engine.console_control_handler.subscribe()
|
||||||
|
|
||||||
# Start the server
|
# Cherrypy stupidly calls os._exit(70) when it can't bind the
|
||||||
|
# port. At least try to print a reasonable error and continue
|
||||||
|
# in this case, rather than just dying silently (as we would
|
||||||
|
# otherwise do in embedded mode)
|
||||||
|
real_exit = os._exit
|
||||||
|
def fake_exit(code): # pragma: no cover
|
||||||
|
if code == os.EX_SOFTWARE:
|
||||||
|
fprintf(sys.stderr, "error: CherryPy called os._exit!\n")
|
||||||
|
else:
|
||||||
|
real_exit(code)
|
||||||
|
os._exit = fake_exit
|
||||||
cherrypy.engine.start()
|
cherrypy.engine.start()
|
||||||
|
os._exit = real_exit
|
||||||
|
|
||||||
# Signal that the engine has started successfully
|
# Signal that the engine has started successfully
|
||||||
if event is not None:
|
if event is not None:
|
||||||
|
@ -210,16 +200,15 @@ def cherrypy_start(blocking=False, event=False):
|
||||||
if blocking:
|
if blocking:
|
||||||
try:
|
try:
|
||||||
cherrypy.engine.wait(cherrypy.engine.states.EXITING,
|
cherrypy.engine.wait(cherrypy.engine.states.EXITING,
|
||||||
interval=0.1, channel='main')
|
interval = 0.1, channel = 'main')
|
||||||
except (KeyboardInterrupt, IOError):
|
except (KeyboardInterrupt, IOError): # pragma: no cover
|
||||||
cherrypy.engine.log('Keyboard Interrupt: shutting down')
|
cherrypy.engine.log('Keyboard Interrupt: shutting down bus')
|
||||||
cherrypy.engine.exit()
|
cherrypy.engine.exit()
|
||||||
except SystemExit:
|
except SystemExit: # pragma: no cover
|
||||||
cherrypy.engine.log('SystemExit raised: shutting down')
|
cherrypy.engine.log('SystemExit raised: shutting down bus')
|
||||||
cherrypy.engine.exit()
|
cherrypy.engine.exit()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
# Stop CherryPy server
|
# Stop CherryPy server
|
||||||
def cherrypy_stop():
|
def cherrypy_stop():
|
||||||
cherrypy.engine.exit()
|
cherrypy.engine.exit()
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
"""NilmDB utilities"""
|
"""NilmDB utilities"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from nilmdb.utils.timer import Timer
|
from nilmdb.utils.timer import Timer
|
||||||
from nilmdb.utils.serializer import serializer_proxy
|
from nilmdb.utils.serializer import serializer_proxy
|
||||||
from nilmdb.utils.lrucache import lru_cache
|
from nilmdb.utils.lrucache import lru_cache
|
||||||
|
@ -14,3 +14,4 @@ import nilmdb.utils.iterator
|
||||||
import nilmdb.utils.interval
|
import nilmdb.utils.interval
|
||||||
import nilmdb.utils.lock
|
import nilmdb.utils.lock
|
||||||
import nilmdb.utils.sort
|
import nilmdb.utils.sort
|
||||||
|
import nilmdb.utils.unicode
|
||||||
|
|
|
@ -2,12 +2,12 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
def replace_file(filename, content):
|
def replace_file(filename, content):
|
||||||
"""Attempt to atomically and durably replace the filename with the
|
"""Attempt to atomically and durably replace the filename with the
|
||||||
given contents"""
|
given contents. This is intended to be 'pretty good on most
|
||||||
|
OSes', but not necessarily bulletproof."""
|
||||||
|
|
||||||
newfilename = filename + b".new"
|
newfilename = filename + ".new"
|
||||||
|
|
||||||
# Write to new file, flush it
|
# Write to new file, flush it
|
||||||
with open(newfilename, "wb") as f:
|
with open(newfilename, "wb") as f:
|
||||||
|
@ -16,4 +16,11 @@ def replace_file(filename, content):
|
||||||
os.fsync(f.fileno())
|
os.fsync(f.fileno())
|
||||||
|
|
||||||
# Move new file over old one
|
# Move new file over old one
|
||||||
os.replace(newfilename, filename)
|
try:
|
||||||
|
os.rename(newfilename, filename)
|
||||||
|
except OSError: # pragma: no cover
|
||||||
|
# Some OSes might not support renaming over an existing file.
|
||||||
|
# This is definitely NOT atomic!
|
||||||
|
os.remove(filename)
|
||||||
|
os.rename(newfilename, filename)
|
||||||
|
|
||||||
|
|
710
nilmdb/utils/datetime_tz/__init__.py
Normal file
710
nilmdb/utils/datetime_tz/__init__.py
Normal file
|
@ -0,0 +1,710 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
#
|
||||||
|
# Copyright 2009 Google Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Disable the invalid name warning as we are inheriting from a standard library
|
||||||
|
# object.
|
||||||
|
# pylint: disable-msg=C6409,W0212
|
||||||
|
|
||||||
|
"""A version of the datetime module which *cares* about timezones.
|
||||||
|
|
||||||
|
This module will never return a naive datetime object. This requires the module
|
||||||
|
know your local timezone, which it tries really hard to figure out.
|
||||||
|
|
||||||
|
You can override the detection by using the datetime.tzaware.defaulttz_set
|
||||||
|
method. It the module is unable to figure out the timezone itself this method
|
||||||
|
*must* be called before the normal module is imported. If done before importing
|
||||||
|
it can also speed up the time taken to import as the defaulttz will no longer
|
||||||
|
try and do the detection.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__author__ = "tansell@google.com (Tim Ansell)"
|
||||||
|
|
||||||
|
import calendar
|
||||||
|
import datetime
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
import dateutil.parser
|
||||||
|
import dateutil.relativedelta
|
||||||
|
import dateutil.tz
|
||||||
|
import pytz
|
||||||
|
import pytz_abbr
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
# pylint: disable-msg=C6204
|
||||||
|
import functools
|
||||||
|
except ImportError, e:
|
||||||
|
|
||||||
|
class functools(object):
|
||||||
|
"""Fake replacement for a full functools."""
|
||||||
|
|
||||||
|
# pylint: disable-msg=W0613
|
||||||
|
@staticmethod
|
||||||
|
def wraps(f, *args, **kw):
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
# Need to patch pytz.utc to have a _utcoffset so you can normalize/localize
|
||||||
|
# using it.
|
||||||
|
pytz.utc._utcoffset = datetime.timedelta()
|
||||||
|
|
||||||
|
|
||||||
|
timedelta = datetime.timedelta
|
||||||
|
|
||||||
|
|
||||||
|
def _tzinfome(tzinfo):
|
||||||
|
"""Gets a tzinfo object from a string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tzinfo: A string (or string like) object, or a datetime.tzinfo object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An datetime.tzinfo object.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
UnknownTimeZoneError: If the timezone given can't be decoded.
|
||||||
|
"""
|
||||||
|
if not isinstance(tzinfo, datetime.tzinfo):
|
||||||
|
try:
|
||||||
|
tzinfo = pytz.timezone(tzinfo)
|
||||||
|
except AttributeError:
|
||||||
|
raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo)
|
||||||
|
return tzinfo
|
||||||
|
|
||||||
|
|
||||||
|
# Our "local" timezone
|
||||||
|
_localtz = None
|
||||||
|
|
||||||
|
|
||||||
|
def localtz():
|
||||||
|
"""Get the local timezone.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The localtime timezone as a tzinfo object.
|
||||||
|
"""
|
||||||
|
# pylint: disable-msg=W0603
|
||||||
|
global _localtz
|
||||||
|
if _localtz is None:
|
||||||
|
_localtz = detect_timezone()
|
||||||
|
return _localtz
|
||||||
|
|
||||||
|
|
||||||
|
def localtz_set(timezone):
|
||||||
|
"""Set the local timezone."""
|
||||||
|
# pylint: disable-msg=W0603
|
||||||
|
global _localtz
|
||||||
|
_localtz = _tzinfome(timezone)
|
||||||
|
|
||||||
|
|
||||||
|
def detect_timezone():
|
||||||
|
"""Try and detect the timezone that Python is currently running in.
|
||||||
|
|
||||||
|
We have a bunch of different methods for trying to figure this out (listed in
|
||||||
|
order they are attempted).
|
||||||
|
* Try TZ environment variable.
|
||||||
|
* Try and find /etc/timezone file (with timezone name).
|
||||||
|
* Try and find /etc/localtime file (with timezone data).
|
||||||
|
* Try and match a TZ to the current dst/offset/shortname.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The detected local timezone as a tzinfo object
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
pytz.UnknownTimeZoneError: If it was unable to detect a timezone.
|
||||||
|
"""
|
||||||
|
# First we try the TZ variable
|
||||||
|
tz = _detect_timezone_environ()
|
||||||
|
if tz is not None:
|
||||||
|
return tz
|
||||||
|
|
||||||
|
# Second we try /etc/timezone and use the value in that
|
||||||
|
tz = _detect_timezone_etc_timezone()
|
||||||
|
if tz is not None:
|
||||||
|
return tz
|
||||||
|
|
||||||
|
# Next we try and see if something matches the tzinfo in /etc/localtime
|
||||||
|
tz = _detect_timezone_etc_localtime()
|
||||||
|
if tz is not None:
|
||||||
|
return tz
|
||||||
|
|
||||||
|
# Next we try and use a similiar method to what PHP does.
|
||||||
|
# We first try to search on time.tzname, time.timezone, time.daylight to
|
||||||
|
# match a pytz zone.
|
||||||
|
warnings.warn("Had to fall back to worst detection method (the 'PHP' "
|
||||||
|
"method).")
|
||||||
|
|
||||||
|
tz = _detect_timezone_php()
|
||||||
|
if tz is not None:
|
||||||
|
return tz
|
||||||
|
|
||||||
|
raise pytz.UnknownTimeZoneError("Unable to detect your timezone!")
|
||||||
|
|
||||||
|
|
||||||
|
def _detect_timezone_environ():
|
||||||
|
if "TZ" in os.environ:
|
||||||
|
try:
|
||||||
|
return pytz.timezone(os.environ["TZ"])
|
||||||
|
except (IOError, pytz.UnknownTimeZoneError):
|
||||||
|
warnings.warn("You provided a TZ environment value (%r) we did not "
|
||||||
|
"understand!" % os.environ["TZ"])
|
||||||
|
|
||||||
|
|
||||||
|
def _detect_timezone_etc_timezone():
|
||||||
|
if os.path.exists("/etc/timezone"):
|
||||||
|
try:
|
||||||
|
tz = file("/etc/timezone").read().strip()
|
||||||
|
try:
|
||||||
|
return pytz.timezone(tz)
|
||||||
|
except (IOError, pytz.UnknownTimeZoneError), ei:
|
||||||
|
warnings.warn("Your /etc/timezone file references a timezone (%r) that"
|
||||||
|
" is not valid (%r)." % (tz, ei))
|
||||||
|
|
||||||
|
# Problem reading the /etc/timezone file
|
||||||
|
except IOError, eo:
|
||||||
|
warnings.warn("Could not access your /etc/timezone file: %s" % eo)
|
||||||
|
|
||||||
|
|
||||||
|
def _detect_timezone_etc_localtime():
|
||||||
|
matches = []
|
||||||
|
if os.path.exists("/etc/localtime"):
|
||||||
|
localtime = pytz.tzfile.build_tzinfo("/etc/localtime",
|
||||||
|
file("/etc/localtime"))
|
||||||
|
|
||||||
|
# See if we can find a "Human Name" for this..
|
||||||
|
for tzname in pytz.all_timezones:
|
||||||
|
tz = _tzinfome(tzname)
|
||||||
|
|
||||||
|
if dir(tz) != dir(localtime):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for attrib in dir(tz):
|
||||||
|
# Ignore functions and specials
|
||||||
|
if callable(getattr(tz, attrib)) or attrib.startswith("__"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# This will always be different
|
||||||
|
if attrib == "zone" or attrib == "_tzinfos":
|
||||||
|
continue
|
||||||
|
|
||||||
|
if getattr(tz, attrib) != getattr(localtime, attrib):
|
||||||
|
break
|
||||||
|
|
||||||
|
# We get here iff break didn't happen, i.e. no meaningful attributes
|
||||||
|
# differ between tz and localtime
|
||||||
|
else:
|
||||||
|
matches.append(tzname)
|
||||||
|
|
||||||
|
if len(matches) == 1:
|
||||||
|
return _tzinfome(matches[0])
|
||||||
|
else:
|
||||||
|
# Warn the person about this!
|
||||||
|
warning = "Could not get a human name for your timezone: "
|
||||||
|
if len(matches) > 1:
|
||||||
|
warning += ("We detected multiple matches for your /etc/localtime. "
|
||||||
|
"(Matches where %s)" % matches)
|
||||||
|
return _tzinfome(matches[0])
|
||||||
|
else:
|
||||||
|
warning += "We detected no matches for your /etc/localtime."
|
||||||
|
warnings.warn(warning)
|
||||||
|
|
||||||
|
# Register /etc/localtime as the timezone loaded.
|
||||||
|
pytz._tzinfo_cache['/etc/localtime'] = localtime
|
||||||
|
return localtime
|
||||||
|
|
||||||
|
|
||||||
|
def _detect_timezone_php():
|
||||||
|
tomatch = (time.tzname[0], time.timezone, time.daylight)
|
||||||
|
now = datetime.datetime.now()
|
||||||
|
|
||||||
|
matches = []
|
||||||
|
for tzname in pytz.all_timezones:
|
||||||
|
try:
|
||||||
|
tz = pytz.timezone(tzname)
|
||||||
|
except IOError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
indst = tz.localize(now).timetuple()[-1]
|
||||||
|
|
||||||
|
if tomatch == (tz._tzname, -tz._utcoffset.seconds, indst):
|
||||||
|
matches.append(tzname)
|
||||||
|
|
||||||
|
# pylint: disable-msg=W0704
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if len(matches) > 1:
|
||||||
|
warnings.warn("We detected multiple matches for the timezone, choosing "
|
||||||
|
"the first %s. (Matches where %s)" % (matches[0], matches))
|
||||||
|
return pytz.timezone(matches[0])
|
||||||
|
|
||||||
|
|
||||||
|
class datetime_tz(datetime.datetime):
|
||||||
|
"""An extension of the inbuilt datetime adding more functionality.
|
||||||
|
|
||||||
|
The extra functionality includes:
|
||||||
|
* Partial parsing support (IE 2006/02/30 matches %Y/%M/%D %H:%M)
|
||||||
|
* Full integration with pytz (just give it the string of the timezone!)
|
||||||
|
* Proper support for going to/from Unix timestamps (which are in UTC!).
|
||||||
|
"""
|
||||||
|
__slots__ = ["is_dst"]
|
||||||
|
|
||||||
|
def __new__(cls, *args, **kw):
|
||||||
|
args = list(args)
|
||||||
|
if not args:
|
||||||
|
raise TypeError("Not enough arguments given.")
|
||||||
|
|
||||||
|
# See if we are given a tzinfo object...
|
||||||
|
tzinfo = None
|
||||||
|
if isinstance(args[-1], (datetime.tzinfo, basestring)):
|
||||||
|
tzinfo = _tzinfome(args.pop(-1))
|
||||||
|
elif kw.get("tzinfo", None) is not None:
|
||||||
|
tzinfo = _tzinfome(kw.pop("tzinfo"))
|
||||||
|
|
||||||
|
# Create a datetime object if we don't have one
|
||||||
|
if isinstance(args[0], datetime.datetime):
|
||||||
|
# Convert the datetime instance to a datetime object.
|
||||||
|
newargs = (list(args[0].timetuple()[0:6]) +
|
||||||
|
[args[0].microsecond, args[0].tzinfo])
|
||||||
|
dt = datetime.datetime(*newargs)
|
||||||
|
|
||||||
|
if tzinfo is None and dt.tzinfo is None:
|
||||||
|
raise TypeError("Must specify a timezone!")
|
||||||
|
|
||||||
|
if tzinfo is not None and dt.tzinfo is not None:
|
||||||
|
raise TypeError("Can not give a timezone with timezone aware"
|
||||||
|
" datetime object! (Use localize.)")
|
||||||
|
else:
|
||||||
|
dt = datetime.datetime(*args, **kw)
|
||||||
|
|
||||||
|
if dt.tzinfo is not None:
|
||||||
|
# Re-normalize the dt object
|
||||||
|
dt = dt.tzinfo.normalize(dt)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if tzinfo is None:
|
||||||
|
tzinfo = localtz()
|
||||||
|
|
||||||
|
try:
|
||||||
|
dt = tzinfo.localize(dt, is_dst=None)
|
||||||
|
except pytz.AmbiguousTimeError:
|
||||||
|
is_dst = None
|
||||||
|
if "is_dst" in kw:
|
||||||
|
is_dst = kw.pop("is_dst")
|
||||||
|
|
||||||
|
try:
|
||||||
|
dt = tzinfo.localize(dt, is_dst)
|
||||||
|
except IndexError:
|
||||||
|
raise pytz.AmbiguousTimeError("No such time exists!")
|
||||||
|
|
||||||
|
newargs = list(dt.timetuple()[0:6])+[dt.microsecond, dt.tzinfo]
|
||||||
|
obj = datetime.datetime.__new__(cls, *newargs)
|
||||||
|
obj.is_dst = obj.dst() != datetime.timedelta(0)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def asdatetime(self, naive=True):
|
||||||
|
"""Return this datetime_tz as a datetime object.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
naive: Return *without* any tz info.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
This datetime_tz as a datetime object.
|
||||||
|
"""
|
||||||
|
args = list(self.timetuple()[0:6])+[self.microsecond]
|
||||||
|
if not naive:
|
||||||
|
args.append(self.tzinfo)
|
||||||
|
return datetime.datetime(*args)
|
||||||
|
|
||||||
|
def asdate(self):
|
||||||
|
"""Return this datetime_tz as a date object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
This datetime_tz as a date object.
|
||||||
|
"""
|
||||||
|
return datetime.date(self.year, self.month, self.day)
|
||||||
|
|
||||||
|
def totimestamp(self):
|
||||||
|
"""Convert this datetime object back to a unix timestamp.
|
||||||
|
|
||||||
|
The Unix epoch is the time 00:00:00 UTC on January 1, 1970.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Unix timestamp.
|
||||||
|
"""
|
||||||
|
return calendar.timegm(self.utctimetuple())+1e-6*self.microsecond
|
||||||
|
|
||||||
|
def astimezone(self, tzinfo):
|
||||||
|
"""Returns a version of this timestamp converted to the given timezone.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tzinfo: Either a datetime.tzinfo object or a string (which will be looked
|
||||||
|
up in pytz.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A datetime_tz object in the given timezone.
|
||||||
|
"""
|
||||||
|
# Assert we are not a naive datetime object
|
||||||
|
assert self.tzinfo is not None
|
||||||
|
|
||||||
|
tzinfo = _tzinfome(tzinfo)
|
||||||
|
|
||||||
|
d = self.asdatetime(naive=False).astimezone(tzinfo)
|
||||||
|
return datetime_tz(d)
|
||||||
|
|
||||||
|
# pylint: disable-msg=C6113
|
||||||
|
def replace(self, **kw):
|
||||||
|
"""Return datetime with new specified fields given as arguments.
|
||||||
|
|
||||||
|
For example, dt.replace(days=4) would return a new datetime_tz object with
|
||||||
|
exactly the same as dt but with the days attribute equal to 4.
|
||||||
|
|
||||||
|
Any attribute can be replaced, but tzinfo can not be set to None.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
Any datetime_tz attribute.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A datetime_tz object with the attributes replaced.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TypeError: If the given replacement is invalid.
|
||||||
|
"""
|
||||||
|
if "tzinfo" in kw:
|
||||||
|
if kw["tzinfo"] is None:
|
||||||
|
raise TypeError("Can not remove the timezone use asdatetime()")
|
||||||
|
|
||||||
|
is_dst = None
|
||||||
|
if "is_dst" in kw:
|
||||||
|
is_dst = kw["is_dst"]
|
||||||
|
del kw["is_dst"]
|
||||||
|
else:
|
||||||
|
# Use our own DST setting..
|
||||||
|
is_dst = self.is_dst
|
||||||
|
|
||||||
|
replaced = self.asdatetime().replace(**kw)
|
||||||
|
|
||||||
|
return datetime_tz(replaced, tzinfo=self.tzinfo.zone, is_dst=is_dst)
|
||||||
|
|
||||||
|
# pylint: disable-msg=C6310
|
||||||
|
@classmethod
|
||||||
|
def smartparse(cls, toparse, tzinfo=None):
|
||||||
|
"""Method which uses dateutil.parse and extras to try and parse the string.
|
||||||
|
|
||||||
|
Valid dates are found at:
|
||||||
|
http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2
|
||||||
|
|
||||||
|
Other valid formats include:
|
||||||
|
"now" or "today"
|
||||||
|
"yesterday"
|
||||||
|
"tommorrow"
|
||||||
|
"5 minutes ago"
|
||||||
|
"10 hours ago"
|
||||||
|
"10h5m ago"
|
||||||
|
"start of yesterday"
|
||||||
|
"end of tommorrow"
|
||||||
|
"end of 3rd of March"
|
||||||
|
|
||||||
|
Args:
|
||||||
|
toparse: The string to parse.
|
||||||
|
tzinfo: Timezone for the resultant datetime_tz object should be in.
|
||||||
|
(Defaults to your local timezone.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
New datetime_tz object.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If unable to make sense of the input.
|
||||||
|
"""
|
||||||
|
# Default for empty fields are:
|
||||||
|
# year/month/day == now
|
||||||
|
# hour/minute/second/microsecond == 0
|
||||||
|
toparse = toparse.strip()
|
||||||
|
|
||||||
|
if tzinfo is None:
|
||||||
|
dt = cls.now()
|
||||||
|
else:
|
||||||
|
dt = cls.now(tzinfo)
|
||||||
|
|
||||||
|
default = dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
|
||||||
|
# Remove "start of " and "end of " prefix in the string
|
||||||
|
if toparse.lower().startswith("end of "):
|
||||||
|
toparse = toparse[7:].strip()
|
||||||
|
|
||||||
|
dt += datetime.timedelta(days=1)
|
||||||
|
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
dt -= datetime.timedelta(microseconds=1)
|
||||||
|
|
||||||
|
default = dt
|
||||||
|
|
||||||
|
elif toparse.lower().startswith("start of "):
|
||||||
|
toparse = toparse[9:].strip()
|
||||||
|
|
||||||
|
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
default = dt
|
||||||
|
|
||||||
|
# Handle strings with "now", "today", "yesterday", "tomorrow" and "ago".
|
||||||
|
# Need to use lowercase
|
||||||
|
toparselower = toparse.lower()
|
||||||
|
|
||||||
|
if toparselower in ["now", "today"]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
elif toparselower == "yesterday":
|
||||||
|
dt -= datetime.timedelta(days=1)
|
||||||
|
|
||||||
|
elif toparselower == "tommorrow":
|
||||||
|
dt += datetime.timedelta(days=1)
|
||||||
|
|
||||||
|
elif "ago" in toparselower:
|
||||||
|
# Remove the "ago" bit
|
||||||
|
toparselower = toparselower[:-3]
|
||||||
|
# Replace all "a day and an hour" with "1 day 1 hour"
|
||||||
|
toparselower = toparselower.replace("a ", "1 ")
|
||||||
|
toparselower = toparselower.replace("an ", "1 ")
|
||||||
|
toparselower = toparselower.replace(" and ", " ")
|
||||||
|
|
||||||
|
# Match the following
|
||||||
|
# 1 hour ago
|
||||||
|
# 1h ago
|
||||||
|
# 1 h ago
|
||||||
|
# 1 hour ago
|
||||||
|
# 2 hours ago
|
||||||
|
# Same with minutes, seconds, etc.
|
||||||
|
|
||||||
|
tocheck = ("seconds", "minutes", "hours", "days", "weeks", "months",
|
||||||
|
"years")
|
||||||
|
result = {}
|
||||||
|
for match in re.finditer("([0-9]+)([^0-9]*)", toparselower):
|
||||||
|
amount = int(match.group(1))
|
||||||
|
unit = match.group(2).strip()
|
||||||
|
|
||||||
|
for bit in tocheck:
|
||||||
|
regex = "^([%s]|((%s)s?))$" % (
|
||||||
|
bit[0], bit[:-1])
|
||||||
|
|
||||||
|
bitmatch = re.search(regex, unit)
|
||||||
|
if bitmatch:
|
||||||
|
result[bit] = amount
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise ValueError("Was not able to parse date unit %r!" % unit)
|
||||||
|
|
||||||
|
delta = dateutil.relativedelta.relativedelta(**result)
|
||||||
|
dt -= delta
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Handle strings with normal datetime format, use original case.
|
||||||
|
dt = dateutil.parser.parse(toparse, default=default.asdatetime(),
|
||||||
|
tzinfos=pytz_abbr.tzinfos)
|
||||||
|
if dt is None:
|
||||||
|
raise ValueError("Was not able to parse date!")
|
||||||
|
|
||||||
|
if dt.tzinfo is pytz_abbr.unknown:
|
||||||
|
dt = dt.replace(tzinfo=None)
|
||||||
|
|
||||||
|
if dt.tzinfo is None:
|
||||||
|
if tzinfo is None:
|
||||||
|
tzinfo = localtz()
|
||||||
|
dt = cls(dt, tzinfo)
|
||||||
|
else:
|
||||||
|
if isinstance(dt.tzinfo, pytz_abbr.tzabbr):
|
||||||
|
abbr = dt.tzinfo
|
||||||
|
dt = dt.replace(tzinfo=None)
|
||||||
|
dt = cls(dt, abbr.zone, is_dst=abbr.dst)
|
||||||
|
|
||||||
|
dt = cls(dt)
|
||||||
|
|
||||||
|
return dt
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def utcfromtimestamp(cls, timestamp):
|
||||||
|
"""Returns a datetime object of a given timestamp (in UTC)."""
|
||||||
|
obj = datetime.datetime.utcfromtimestamp(timestamp)
|
||||||
|
obj = pytz.utc.localize(obj)
|
||||||
|
return cls(obj)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fromtimestamp(cls, timestamp):
|
||||||
|
"""Returns a datetime object of a given timestamp (in local tz)."""
|
||||||
|
d = cls.utcfromtimestamp(timestamp)
|
||||||
|
return d.astimezone(localtz())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def utcnow(cls):
|
||||||
|
"""Return a new datetime representing UTC day and time."""
|
||||||
|
obj = datetime.datetime.utcnow()
|
||||||
|
obj = cls(obj, tzinfo=pytz.utc)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def now(cls, tzinfo=None):
|
||||||
|
"""[tz] -> new datetime with tz's local day and time."""
|
||||||
|
obj = cls.utcnow()
|
||||||
|
if tzinfo is None:
|
||||||
|
tzinfo = localtz()
|
||||||
|
return obj.astimezone(tzinfo)
|
||||||
|
|
||||||
|
today = now
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def fromordinal(ordinal):
|
||||||
|
raise SyntaxError("Not enough information to create a datetime_tz object "
|
||||||
|
"from an ordinal. Please use datetime.date.fromordinal")
|
||||||
|
|
||||||
|
|
||||||
|
class iterate(object):
|
||||||
|
"""Helpful iterators for working with datetime_tz objects."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def between(start, delta, end=None):
|
||||||
|
"""Return an iterator between this date till given end point.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
>>> d = datetime_tz.smartparse("5 days ago")
|
||||||
|
2008/05/12 11:45
|
||||||
|
>>> for i in d.between(timedelta(days=1), datetime_tz.now()):
|
||||||
|
>>> print i
|
||||||
|
2008/05/12 11:45
|
||||||
|
2008/05/13 11:45
|
||||||
|
2008/05/14 11:45
|
||||||
|
2008/05/15 11:45
|
||||||
|
2008/05/16 11:45
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start: The date to start at.
|
||||||
|
delta: The interval to iterate with.
|
||||||
|
end: (Optional) Date to end at. If not given the iterator will never
|
||||||
|
terminate.
|
||||||
|
|
||||||
|
Yields:
|
||||||
|
datetime_tz objects.
|
||||||
|
"""
|
||||||
|
toyield = start
|
||||||
|
while end is None or toyield < end:
|
||||||
|
yield toyield
|
||||||
|
toyield += delta
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def weeks(start, end=None):
|
||||||
|
"""Iterate over the weeks between the given datetime_tzs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start: datetime_tz to start from.
|
||||||
|
end: (Optional) Date to end at, if not given the iterator will never
|
||||||
|
terminate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An iterator which generates datetime_tz objects a week apart.
|
||||||
|
"""
|
||||||
|
return iterate.between(start, datetime.timedelta(days=7), end)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def days(start, end=None):
|
||||||
|
"""Iterate over the days between the given datetime_tzs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start: datetime_tz to start from.
|
||||||
|
end: (Optional) Date to end at, if not given the iterator will never
|
||||||
|
terminate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An iterator which generates datetime_tz objects a day apart.
|
||||||
|
"""
|
||||||
|
return iterate.between(start, datetime.timedelta(days=1), end)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def hours(start, end=None):
|
||||||
|
"""Iterate over the hours between the given datetime_tzs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start: datetime_tz to start from.
|
||||||
|
end: (Optional) Date to end at, if not given the iterator will never
|
||||||
|
terminate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An iterator which generates datetime_tz objects a hour apart.
|
||||||
|
"""
|
||||||
|
return iterate.between(start, datetime.timedelta(hours=1), end)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def minutes(start, end=None):
|
||||||
|
"""Iterate over the minutes between the given datetime_tzs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start: datetime_tz to start from.
|
||||||
|
end: (Optional) Date to end at, if not given the iterator will never
|
||||||
|
terminate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An iterator which generates datetime_tz objects a minute apart.
|
||||||
|
"""
|
||||||
|
return iterate.between(start, datetime.timedelta(minutes=1), end)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def seconds(start, end=None):
|
||||||
|
"""Iterate over the seconds between the given datetime_tzs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start: datetime_tz to start from.
|
||||||
|
end: (Optional) Date to end at, if not given the iterator will never
|
||||||
|
terminate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An iterator which generates datetime_tz objects a second apart.
|
||||||
|
"""
|
||||||
|
return iterate.between(start, datetime.timedelta(minutes=1), end)
|
||||||
|
|
||||||
|
|
||||||
|
def _wrap_method(name):
|
||||||
|
"""Wrap a method.
|
||||||
|
|
||||||
|
Patch a method which might return a datetime.datetime to return a
|
||||||
|
datetime_tz.datetime_tz instead.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: The name of the method to patch
|
||||||
|
"""
|
||||||
|
method = getattr(datetime.datetime, name)
|
||||||
|
|
||||||
|
# Have to give the second argument as method has no __module__ option.
|
||||||
|
@functools.wraps(method, ("__name__", "__doc__"), ())
|
||||||
|
def wrapper(*args, **kw):
|
||||||
|
r = method(*args, **kw)
|
||||||
|
|
||||||
|
if isinstance(r, datetime.datetime) and not isinstance(r, datetime_tz):
|
||||||
|
r = datetime_tz(r)
|
||||||
|
return r
|
||||||
|
|
||||||
|
setattr(datetime_tz, name, wrapper)
|
||||||
|
|
||||||
|
for methodname in ["__add__", "__radd__", "__rsub__", "__sub__", "combine"]:
|
||||||
|
|
||||||
|
# Make sure we have not already got an override for this method
|
||||||
|
assert methodname not in datetime_tz.__dict__
|
||||||
|
|
||||||
|
_wrap_method(methodname)
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['datetime_tz', 'detect_timezone', 'iterate', 'localtz',
|
||||||
|
'localtz_set', 'timedelta', '_detect_timezone_environ',
|
||||||
|
'_detect_timezone_etc_localtime', '_detect_timezone_etc_timezone',
|
||||||
|
'_detect_timezone_php']
|
230
nilmdb/utils/datetime_tz/pytz_abbr.py
Normal file
230
nilmdb/utils/datetime_tz/pytz_abbr.py
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
#!/usr/bin/python2.4
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2010 Google Inc. All Rights Reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
Common time zone acronyms/abbreviations for use with the datetime_tz module.
|
||||||
|
|
||||||
|
*WARNING*: There are lots of caveats when using this module which are listed
|
||||||
|
below.
|
||||||
|
|
||||||
|
CAVEAT 1: The acronyms/abbreviations are not globally unique, they are not even
|
||||||
|
unique within a region. For example, EST can mean any of,
|
||||||
|
Eastern Standard Time in Australia (which is 10 hour ahead of UTC)
|
||||||
|
Eastern Standard Time in North America (which is 5 hours behind UTC)
|
||||||
|
|
||||||
|
Where there are two abbreviations the more popular one will appear in the all
|
||||||
|
dictionary, while the less common one will only appear in that countries region
|
||||||
|
dictionary. IE If using all, EST will be mapped to Eastern Standard Time in
|
||||||
|
North America.
|
||||||
|
|
||||||
|
CAVEAT 2: Many of the acronyms don't map to a neat Oslon timezones. For example,
|
||||||
|
Eastern European Summer Time (EEDT) is used by many different countries in
|
||||||
|
Europe *at different times*! If the acronym does not map neatly to one zone it
|
||||||
|
is mapped to the Etc/GMT+-XX Oslon zone. This means that any date manipulations
|
||||||
|
can end up with idiot things like summer time in the middle of winter.
|
||||||
|
|
||||||
|
CAVEAT 3: The Summer/Standard time difference is really important! For an hour
|
||||||
|
each year it is needed to determine which time you are actually talking about.
|
||||||
|
2002-10-27 01:20:00 EST != 2002-10-27 01:20:00 EDT
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import pytz
|
||||||
|
import pytz.tzfile
|
||||||
|
|
||||||
|
|
||||||
|
class tzabbr(datetime.tzinfo):
|
||||||
|
"""A timezone abbreviation.
|
||||||
|
|
||||||
|
*WARNING*: This is not a tzinfo implementation! Trying to use this as tzinfo
|
||||||
|
object will result in failure. We inherit from datetime.tzinfo so we can get
|
||||||
|
through the dateutil checks.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# A "marker" tzinfo object which is used to signify an unknown timezone.
|
||||||
|
unknown = datetime.tzinfo(0)
|
||||||
|
|
||||||
|
|
||||||
|
regions = {'all': {}, 'military': {}}
|
||||||
|
# Create a special alias for the all and military regions
|
||||||
|
all = regions['all']
|
||||||
|
military = regions['military']
|
||||||
|
|
||||||
|
|
||||||
|
def tzabbr_register(abbr, name, region, zone, dst):
|
||||||
|
"""Register a new timezone abbreviation in the global registry.
|
||||||
|
|
||||||
|
If another abbreviation with the same name has already been registered it new
|
||||||
|
abbreviation will only be registered in region specific dictionary.
|
||||||
|
"""
|
||||||
|
newabbr = tzabbr()
|
||||||
|
newabbr.abbr = abbr
|
||||||
|
newabbr.name = name
|
||||||
|
newabbr.region = region
|
||||||
|
newabbr.zone = zone
|
||||||
|
newabbr.dst = dst
|
||||||
|
|
||||||
|
if abbr not in all:
|
||||||
|
all[abbr] = newabbr
|
||||||
|
|
||||||
|
if not region in regions:
|
||||||
|
regions[region] = {}
|
||||||
|
|
||||||
|
assert abbr not in regions[region]
|
||||||
|
regions[region][abbr] = newabbr
|
||||||
|
|
||||||
|
|
||||||
|
def tzinfos_create(use_region):
|
||||||
|
abbrs = regions[use_region]
|
||||||
|
|
||||||
|
def tzinfos(abbr, offset):
|
||||||
|
if abbr:
|
||||||
|
if abbr in abbrs:
|
||||||
|
result = abbrs[abbr]
|
||||||
|
if offset:
|
||||||
|
# FIXME: Check the offset matches the abbreviation we just selected.
|
||||||
|
pass
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
raise ValueError, "Unknown timezone found %s" % abbr
|
||||||
|
if offset == 0:
|
||||||
|
return pytz.utc
|
||||||
|
if offset:
|
||||||
|
return pytz.FixedOffset(offset/60)
|
||||||
|
return unknown
|
||||||
|
|
||||||
|
return tzinfos
|
||||||
|
|
||||||
|
|
||||||
|
# Create a special alias for the all tzinfos
|
||||||
|
tzinfos = tzinfos_create('all')
|
||||||
|
|
||||||
|
|
||||||
|
# Create the abbreviations.
|
||||||
|
# *WARNING*: Order matters!
|
||||||
|
tzabbr_register("A", u"Alpha Time Zone", u"Military", "Etc/GMT-1", False)
|
||||||
|
tzabbr_register("ACDT", u"Australian Central Daylight Time", u"Australia",
|
||||||
|
"Australia/Adelaide", True)
|
||||||
|
tzabbr_register("ACST", u"Australian Central Standard Time", u"Australia",
|
||||||
|
"Australia/Adelaide", False)
|
||||||
|
tzabbr_register("ADT", u"Atlantic Daylight Time", u"North America",
|
||||||
|
"America/Halifax", True)
|
||||||
|
tzabbr_register("AEDT", u"Australian Eastern Daylight Time", u"Australia",
|
||||||
|
"Australia/Sydney", True)
|
||||||
|
tzabbr_register("AEST", u"Australian Eastern Standard Time", u"Australia",
|
||||||
|
"Australia/Sydney", False)
|
||||||
|
tzabbr_register("AKDT", u"Alaska Daylight Time", u"North America",
|
||||||
|
"US/Alaska", True)
|
||||||
|
tzabbr_register("AKST", u"Alaska Standard Time", u"North America",
|
||||||
|
"US/Alaska", False)
|
||||||
|
tzabbr_register("AST", u"Atlantic Standard Time", u"North America",
|
||||||
|
"America/Halifax", False)
|
||||||
|
tzabbr_register("AWDT", u"Australian Western Daylight Time", u"Australia",
|
||||||
|
"Australia/West", True)
|
||||||
|
tzabbr_register("AWST", u"Australian Western Standard Time", u"Australia",
|
||||||
|
"Australia/West", False)
|
||||||
|
tzabbr_register("B", u"Bravo Time Zone", u"Military", "Etc/GMT-2", False)
|
||||||
|
tzabbr_register("BST", u"British Summer Time", u"Europe", "Europe/London", True)
|
||||||
|
tzabbr_register("C", u"Charlie Time Zone", u"Military", "Etc/GMT-2", False)
|
||||||
|
tzabbr_register("CDT", u"Central Daylight Time", u"North America",
|
||||||
|
"US/Central", True)
|
||||||
|
tzabbr_register("CEDT", u"Central European Daylight Time", u"Europe",
|
||||||
|
"Etc/GMT+2", True)
|
||||||
|
tzabbr_register("CEST", u"Central European Summer Time", u"Europe",
|
||||||
|
"Etc/GMT+2", True)
|
||||||
|
tzabbr_register("CET", u"Central European Time", u"Europe", "Etc/GMT+1", False)
|
||||||
|
tzabbr_register("CST", u"Central Standard Time", u"North America",
|
||||||
|
"US/Central", False)
|
||||||
|
tzabbr_register("CXT", u"Christmas Island Time", u"Australia",
|
||||||
|
"Indian/Christmas", False)
|
||||||
|
tzabbr_register("D", u"Delta Time Zone", u"Military", "Etc/GMT-2", False)
|
||||||
|
tzabbr_register("E", u"Echo Time Zone", u"Military", "Etc/GMT-2", False)
|
||||||
|
tzabbr_register("EDT", u"Eastern Daylight Time", u"North America",
|
||||||
|
"US/Eastern", True)
|
||||||
|
tzabbr_register("EEDT", u"Eastern European Daylight Time", u"Europe",
|
||||||
|
"Etc/GMT+3", True)
|
||||||
|
tzabbr_register("EEST", u"Eastern European Summer Time", u"Europe",
|
||||||
|
"Etc/GMT+3", True)
|
||||||
|
tzabbr_register("EET", u"Eastern European Time", u"Europe", "Etc/GMT+2", False)
|
||||||
|
tzabbr_register("EST", u"Eastern Standard Time", u"North America",
|
||||||
|
"US/Eastern", False)
|
||||||
|
tzabbr_register("F", u"Foxtrot Time Zone", u"Military", "Etc/GMT-6", False)
|
||||||
|
tzabbr_register("G", u"Golf Time Zone", u"Military", "Etc/GMT-7", False)
|
||||||
|
tzabbr_register("GMT", u"Greenwich Mean Time", u"Europe", pytz.utc, False)
|
||||||
|
tzabbr_register("H", u"Hotel Time Zone", u"Military", "Etc/GMT-8", False)
|
||||||
|
#tzabbr_register("HAA", u"Heure Avancée de l'Atlantique", u"North America", u"UTC - 3 hours")
|
||||||
|
#tzabbr_register("HAC", u"Heure Avancée du Centre", u"North America", u"UTC - 5 hours")
|
||||||
|
tzabbr_register("HADT", u"Hawaii-Aleutian Daylight Time", u"North America",
|
||||||
|
"Pacific/Honolulu", True)
|
||||||
|
#tzabbr_register("HAE", u"Heure Avancée de l'Est", u"North America", u"UTC - 4 hours")
|
||||||
|
#tzabbr_register("HAP", u"Heure Avancée du Pacifique", u"North America", u"UTC - 7 hours")
|
||||||
|
#tzabbr_register("HAR", u"Heure Avancée des Rocheuses", u"North America", u"UTC - 6 hours")
|
||||||
|
tzabbr_register("HAST", u"Hawaii-Aleutian Standard Time", u"North America",
|
||||||
|
"Pacific/Honolulu", False)
|
||||||
|
#tzabbr_register("HAT", u"Heure Avancée de Terre-Neuve", u"North America", u"UTC - 2:30 hours")
|
||||||
|
#tzabbr_register("HAY", u"Heure Avancée du Yukon", u"North America", u"UTC - 8 hours")
|
||||||
|
tzabbr_register("HDT", u"Hawaii Daylight Time", u"North America",
|
||||||
|
"Pacific/Honolulu", True)
|
||||||
|
#tzabbr_register("HNA", u"Heure Normale de l'Atlantique", u"North America", u"UTC - 4 hours")
|
||||||
|
#tzabbr_register("HNC", u"Heure Normale du Centre", u"North America", u"UTC - 6 hours")
|
||||||
|
#tzabbr_register("HNE", u"Heure Normale de l'Est", u"North America", u"UTC - 5 hours")
|
||||||
|
#tzabbr_register("HNP", u"Heure Normale du Pacifique", u"North America", u"UTC - 8 hours")
|
||||||
|
#tzabbr_register("HNR", u"Heure Normale des Rocheuses", u"North America", u"UTC - 7 hours")
|
||||||
|
#tzabbr_register("HNT", u"Heure Normale de Terre-Neuve", u"North America", u"UTC - 3:30 hours")
|
||||||
|
#tzabbr_register("HNY", u"Heure Normale du Yukon", u"North America", u"UTC - 9 hours")
|
||||||
|
tzabbr_register("HST", u"Hawaii Standard Time", u"North America",
|
||||||
|
"Pacific/Honolulu", False)
|
||||||
|
tzabbr_register("I", u"India Time Zone", u"Military", "Etc/GMT-9", False)
|
||||||
|
tzabbr_register("IST", u"Irish Summer Time", u"Europe", "Europe/Dublin", True)
|
||||||
|
tzabbr_register("K", u"Kilo Time Zone", u"Military", "Etc/GMT-10", False)
|
||||||
|
tzabbr_register("L", u"Lima Time Zone", u"Military", "Etc/GMT-11", False)
|
||||||
|
tzabbr_register("M", u"Mike Time Zone", u"Military", "Etc/GMT-12", False)
|
||||||
|
tzabbr_register("MDT", u"Mountain Daylight Time", u"North America",
|
||||||
|
"US/Mountain", True)
|
||||||
|
#tzabbr_register("MESZ", u"Mitteleuroäische Sommerzeit", u"Europe", u"UTC + 2 hours")
|
||||||
|
#tzabbr_register("MEZ", u"Mitteleuropäische Zeit", u"Europe", u"UTC + 1 hour")
|
||||||
|
tzabbr_register("MSD", u"Moscow Daylight Time", u"Europe",
|
||||||
|
"Europe/Moscow", True)
|
||||||
|
tzabbr_register("MSK", u"Moscow Standard Time", u"Europe",
|
||||||
|
"Europe/Moscow", False)
|
||||||
|
tzabbr_register("MST", u"Mountain Standard Time", u"North America",
|
||||||
|
"US/Mountain", False)
|
||||||
|
tzabbr_register("N", u"November Time Zone", u"Military", "Etc/GMT+1", False)
|
||||||
|
tzabbr_register("NDT", u"Newfoundland Daylight Time", u"North America",
|
||||||
|
"America/St_Johns", True)
|
||||||
|
tzabbr_register("NFT", u"Norfolk (Island) Time", u"Australia",
|
||||||
|
"Pacific/Norfolk", False)
|
||||||
|
tzabbr_register("NST", u"Newfoundland Standard Time", u"North America",
|
||||||
|
"America/St_Johns", False)
|
||||||
|
tzabbr_register("O", u"Oscar Time Zone", u"Military", "Etc/GMT+2", False)
|
||||||
|
tzabbr_register("P", u"Papa Time Zone", u"Military", "Etc/GMT+3", False)
|
||||||
|
tzabbr_register("PDT", u"Pacific Daylight Time", u"North America",
|
||||||
|
"US/Pacific", True)
|
||||||
|
tzabbr_register("PST", u"Pacific Standard Time", u"North America",
|
||||||
|
"US/Pacific", False)
|
||||||
|
tzabbr_register("Q", u"Quebec Time Zone", u"Military", "Etc/GMT+4", False)
|
||||||
|
tzabbr_register("R", u"Romeo Time Zone", u"Military", "Etc/GMT+5", False)
|
||||||
|
tzabbr_register("S", u"Sierra Time Zone", u"Military", "Etc/GMT+6", False)
|
||||||
|
tzabbr_register("T", u"Tango Time Zone", u"Military", "Etc/GMT+7", False)
|
||||||
|
tzabbr_register("U", u"Uniform Time Zone", u"Military", "Etc/GMT+8", False)
|
||||||
|
tzabbr_register("UTC", u"Coordinated Universal Time", u"Europe",
|
||||||
|
pytz.utc, False)
|
||||||
|
tzabbr_register("V", u"Victor Time Zone", u"Military", "Etc/GMT+9", False)
|
||||||
|
tzabbr_register("W", u"Whiskey Time Zone", u"Military", "Etc/GMT+10", False)
|
||||||
|
tzabbr_register("WDT", u"Western Daylight Time", u"Australia",
|
||||||
|
"Australia/West", True)
|
||||||
|
tzabbr_register("WEDT", u"Western European Daylight Time", u"Europe",
|
||||||
|
"Etc/GMT+1", True)
|
||||||
|
tzabbr_register("WEST", u"Western European Summer Time", u"Europe",
|
||||||
|
"Etc/GMT+1", True)
|
||||||
|
tzabbr_register("WET", u"Western European Time", u"Europe", pytz.utc, False)
|
||||||
|
tzabbr_register("WST", u"Western Standard Time", u"Australia",
|
||||||
|
"Australia/West", False)
|
||||||
|
tzabbr_register("X", u"X-ray Time Zone", u"Military", "Etc/GMT+11", False)
|
||||||
|
tzabbr_register("Y", u"Yankee Time Zone", u"Military", "Etc/GMT+12", False)
|
||||||
|
tzabbr_register("Z", u"Zulu Time Zone", u"Military", pytz.utc, False)
|
|
@ -2,21 +2,19 @@ import os
|
||||||
import errno
|
import errno
|
||||||
from math import log
|
from math import log
|
||||||
|
|
||||||
|
|
||||||
def human_size(num):
|
def human_size(num):
|
||||||
"""Human friendly file size"""
|
"""Human friendly file size"""
|
||||||
unit_list = list(zip(['bytes', 'kiB', 'MiB', 'GiB', 'TiB'],
|
unit_list = zip(['bytes', 'kiB', 'MiB', 'GiB', 'TiB'], [0, 0, 1, 2, 2])
|
||||||
[0, 0, 1, 2, 2]))
|
if num > 1:
|
||||||
if num == 0:
|
|
||||||
return '0 bytes'
|
|
||||||
if num == 1:
|
|
||||||
return '1 byte'
|
|
||||||
exponent = min(int(log(num, 1024)), len(unit_list) - 1)
|
exponent = min(int(log(num, 1024)), len(unit_list) - 1)
|
||||||
quotient = float(num) / 1024**exponent
|
quotient = float(num) / 1024**exponent
|
||||||
unit, num_decimals = unit_list[exponent]
|
unit, num_decimals = unit_list[exponent]
|
||||||
format_string = '{:.%sf} {}' % (num_decimals)
|
format_string = '{:.%sf} {}' % (num_decimals)
|
||||||
return format_string.format(quotient, unit)
|
return format_string.format(quotient, unit)
|
||||||
|
if num == 0: # pragma: no cover
|
||||||
|
return '0 bytes'
|
||||||
|
if num == 1: # pragma: no cover
|
||||||
|
return '1 byte'
|
||||||
|
|
||||||
def du(path):
|
def du(path):
|
||||||
"""Like du -sb, returns total size of path in bytes. Ignore
|
"""Like du -sb, returns total size of path in bytes. Ignore
|
||||||
|
@ -30,7 +28,7 @@ def du(path):
|
||||||
filepath = os.path.join(path, thisfile)
|
filepath = os.path.join(path, thisfile)
|
||||||
size += du(filepath)
|
size += du(filepath)
|
||||||
return size
|
return size
|
||||||
except OSError as e:
|
except OSError as e: # pragma: no cover
|
||||||
if e.errno != errno.ENOENT:
|
if e.errno != errno.ENOENT:
|
||||||
raise
|
raise
|
||||||
return 0
|
return 0
|
||||||
|
|
|
@ -1,20 +1,49 @@
|
||||||
# Implementation of hole punching via fallocate, if the OS
|
# Implementation of hole punching via fallocate, if the OS
|
||||||
# and filesystem support it.
|
# and filesystem support it.
|
||||||
|
|
||||||
import fallocate
|
try:
|
||||||
|
import os
|
||||||
|
import ctypes
|
||||||
|
import ctypes.util
|
||||||
|
|
||||||
|
def make_fallocate():
|
||||||
|
libc_name = ctypes.util.find_library('c')
|
||||||
|
libc = ctypes.CDLL(libc_name, use_errno=True)
|
||||||
|
|
||||||
def punch_hole(filename, offset, length, ignore_errors=True):
|
_fallocate = libc.fallocate
|
||||||
|
_fallocate.restype = ctypes.c_int
|
||||||
|
_fallocate.argtypes = [ ctypes.c_int, ctypes.c_int,
|
||||||
|
ctypes.c_int64, ctypes.c_int64 ]
|
||||||
|
|
||||||
|
del libc
|
||||||
|
del libc_name
|
||||||
|
|
||||||
|
def fallocate(fd, mode, offset, len_):
|
||||||
|
res = _fallocate(fd, mode, offset, len_)
|
||||||
|
if res != 0: # pragma: no cover
|
||||||
|
errno = ctypes.get_errno()
|
||||||
|
raise IOError(errno, os.strerror(errno))
|
||||||
|
return fallocate
|
||||||
|
|
||||||
|
fallocate = make_fallocate()
|
||||||
|
del make_fallocate
|
||||||
|
except Exception: # pragma: no cover
|
||||||
|
fallocate = None
|
||||||
|
|
||||||
|
FALLOC_FL_KEEP_SIZE = 0x01
|
||||||
|
FALLOC_FL_PUNCH_HOLE = 0x02
|
||||||
|
|
||||||
|
def punch_hole(filename, offset, length, ignore_errors = True):
|
||||||
"""Punch a hole in the file. This isn't well supported, so errors
|
"""Punch a hole in the file. This isn't well supported, so errors
|
||||||
are ignored by default."""
|
are ignored by default."""
|
||||||
try:
|
try:
|
||||||
|
if fallocate is None: # pragma: no cover
|
||||||
|
raise IOError("fallocate not available")
|
||||||
with open(filename, "r+") as f:
|
with open(filename, "r+") as f:
|
||||||
fallocate.fallocate(
|
fallocate(f.fileno(),
|
||||||
f.fileno(),
|
FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
|
||||||
offset,
|
offset, length)
|
||||||
length,
|
except IOError: # pragma: no cover
|
||||||
fallocate.FALLOC_FL_KEEP_SIZE | fallocate.FALLOC_FL_PUNCH_HOLE)
|
|
||||||
except Exception:
|
|
||||||
if ignore_errors:
|
if ignore_errors:
|
||||||
return
|
return
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -9,12 +9,10 @@ Intervals are half-open, ie. they include data points with timestamps
|
||||||
import nilmdb.utils.time
|
import nilmdb.utils.time
|
||||||
import nilmdb.utils.iterator
|
import nilmdb.utils.iterator
|
||||||
|
|
||||||
|
|
||||||
class IntervalError(Exception):
|
class IntervalError(Exception):
|
||||||
"""Error due to interval overlap, etc"""
|
"""Error due to interval overlap, etc"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# Interval
|
# Interval
|
||||||
class Interval:
|
class Interval:
|
||||||
"""Represents an interval of time."""
|
"""Represents an interval of time."""
|
||||||
|
@ -24,7 +22,7 @@ class Interval:
|
||||||
'start' and 'end' are arbitrary numbers that represent time
|
'start' and 'end' are arbitrary numbers that represent time
|
||||||
"""
|
"""
|
||||||
if start >= end:
|
if start >= end:
|
||||||
# Explicitly disallow zero-width intervals, since they're half-open
|
# Explicitly disallow zero-width intervals (since they're half-open)
|
||||||
raise IntervalError("start %s must precede end %s" % (start, end))
|
raise IntervalError("start %s must precede end %s" % (start, end))
|
||||||
self.start = start
|
self.start = start
|
||||||
self.end = end
|
self.end = end
|
||||||
|
@ -41,24 +39,9 @@ class Interval:
|
||||||
return ("[ " + nilmdb.utils.time.timestamp_to_human(self.start) +
|
return ("[ " + nilmdb.utils.time.timestamp_to_human(self.start) +
|
||||||
" -> " + nilmdb.utils.time.timestamp_to_human(self.end) + " ]")
|
" -> " + nilmdb.utils.time.timestamp_to_human(self.end) + " ]")
|
||||||
|
|
||||||
# Compare two intervals. If non-equal, order by start then end
|
def __cmp__(self, other):
|
||||||
def __lt__(self, other):
|
"""Compare two intervals. If non-equal, order by start then end"""
|
||||||
return (self.start, self.end) < (other.start, other.end)
|
return cmp(self.start, other.start) or cmp(self.end, other.end)
|
||||||
|
|
||||||
def __gt__(self, other):
|
|
||||||
return (self.start, self.end) > (other.start, other.end)
|
|
||||||
|
|
||||||
def __le__(self, other):
|
|
||||||
return (self.start, self.end) <= (other.start, other.end)
|
|
||||||
|
|
||||||
def __ge__(self, other):
|
|
||||||
return (self.start, self.end) >= (other.start, other.end)
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
return (self.start, self.end) == (other.start, other.end)
|
|
||||||
|
|
||||||
def __ne__(self, other):
|
|
||||||
return (self.start, self.end) != (other.start, other.end)
|
|
||||||
|
|
||||||
def intersects(self, other):
|
def intersects(self, other):
|
||||||
"""Return True if two Interval objects intersect"""
|
"""Return True if two Interval objects intersect"""
|
||||||
|
@ -75,8 +58,7 @@ class Interval:
|
||||||
raise IntervalError("not a subset")
|
raise IntervalError("not a subset")
|
||||||
return Interval(start, end)
|
return Interval(start, end)
|
||||||
|
|
||||||
|
def _interval_math_helper(a, b, op, subset = True):
|
||||||
def _interval_math_helper(a, b, op, subset=True):
|
|
||||||
"""Helper for set_difference, intersection functions,
|
"""Helper for set_difference, intersection functions,
|
||||||
to compute interval subsets based on a math operator on ranges
|
to compute interval subsets based on a math operator on ranges
|
||||||
present in A and B. Subsets are computed from A, or new intervals
|
present in A and B. Subsets are computed from A, or new intervals
|
||||||
|
@ -106,7 +88,7 @@ def _interval_math_helper(a, b, op, subset=True):
|
||||||
in_b = True
|
in_b = True
|
||||||
elif k == 2:
|
elif k == 2:
|
||||||
in_a = False
|
in_a = False
|
||||||
else: # k == 3
|
elif k == 3:
|
||||||
in_b = False
|
in_b = False
|
||||||
include = op(in_a, in_b)
|
include = op(in_a, in_b)
|
||||||
if include and out_start is None:
|
if include and out_start is None:
|
||||||
|
@ -119,7 +101,6 @@ def _interval_math_helper(a, b, op, subset=True):
|
||||||
yield Interval(out_start, ts)
|
yield Interval(out_start, ts)
|
||||||
out_start = None
|
out_start = None
|
||||||
|
|
||||||
|
|
||||||
def set_difference(a, b):
|
def set_difference(a, b):
|
||||||
"""
|
"""
|
||||||
Compute the difference (a \\ b) between the intervals in 'a' and
|
Compute the difference (a \\ b) between the intervals in 'a' and
|
||||||
|
@ -134,7 +115,6 @@ def set_difference(a, b):
|
||||||
"""
|
"""
|
||||||
return _interval_math_helper(a, b, (lambda a, b: a and not b))
|
return _interval_math_helper(a, b, (lambda a, b: a and not b))
|
||||||
|
|
||||||
|
|
||||||
def intersection(a, b):
|
def intersection(a, b):
|
||||||
"""
|
"""
|
||||||
Compute the intersection between the intervals in 'a' and the
|
Compute the intersection between the intervals in 'a' and the
|
||||||
|
@ -149,7 +129,6 @@ def intersection(a, b):
|
||||||
"""
|
"""
|
||||||
return _interval_math_helper(a, b, (lambda a, b: a and b))
|
return _interval_math_helper(a, b, (lambda a, b: a and b))
|
||||||
|
|
||||||
|
|
||||||
def optimize(it):
|
def optimize(it):
|
||||||
"""
|
"""
|
||||||
Given an iterable 'it' with intervals, optimize them by joining
|
Given an iterable 'it' with intervals, optimize them by joining
|
||||||
|
|
|
@ -2,8 +2,6 @@
|
||||||
|
|
||||||
# Iterator merging, based on http://code.activestate.com/recipes/491285/
|
# Iterator merging, based on http://code.activestate.com/recipes/491285/
|
||||||
import heapq
|
import heapq
|
||||||
|
|
||||||
|
|
||||||
def imerge(*iterables):
|
def imerge(*iterables):
|
||||||
'''Merge multiple sorted inputs into a single sorted output.
|
'''Merge multiple sorted inputs into a single sorted output.
|
||||||
|
|
||||||
|
@ -19,8 +17,8 @@ def imerge(*iterables):
|
||||||
h_append = h.append
|
h_append = h.append
|
||||||
for it in map(iter, iterables):
|
for it in map(iter, iterables):
|
||||||
try:
|
try:
|
||||||
nexter = it.__next__
|
next = it.next
|
||||||
h_append([nexter(), nexter])
|
h_append([next(), next])
|
||||||
except _Stop:
|
except _Stop:
|
||||||
pass
|
pass
|
||||||
heapq.heapify(h)
|
heapq.heapify(h)
|
||||||
|
@ -28,9 +26,9 @@ def imerge(*iterables):
|
||||||
while 1:
|
while 1:
|
||||||
try:
|
try:
|
||||||
while 1:
|
while 1:
|
||||||
v, nexter = s = h[0] # raises IndexError when h is empty
|
v, next = s = h[0] # raises IndexError when h is empty
|
||||||
yield v
|
yield v
|
||||||
s[0] = nexter() # raises StopIteration when exhausted
|
s[0] = next() # raises StopIteration when exhausted
|
||||||
siftup(h, 0) # restore heap condition
|
siftup(h, 0) # restore heap condition
|
||||||
except _Stop:
|
except _Stop:
|
||||||
heappop(h) # remove empty iterator
|
heappop(h) # remove empty iterator
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
# File locking
|
# File locking
|
||||||
|
|
||||||
import fcntl
|
import warnings
|
||||||
import errno
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import fcntl
|
||||||
|
import errno
|
||||||
|
|
||||||
def exclusive_lock(f):
|
def exclusive_lock(f):
|
||||||
"""Acquire an exclusive lock. Returns True on successful
|
"""Acquire an exclusive lock. Returns True on successful
|
||||||
lock, or False on error."""
|
lock, or False on error."""
|
||||||
try:
|
try:
|
||||||
|
@ -12,11 +14,20 @@ def exclusive_lock(f):
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||||
return False
|
return False
|
||||||
else:
|
else: # pragma: no cover
|
||||||
raise
|
raise
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def exclusive_unlock(f):
|
||||||
def exclusive_unlock(f):
|
|
||||||
"""Release an exclusive lock."""
|
"""Release an exclusive lock."""
|
||||||
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
|
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
except ImportError: # pragma: no cover
|
||||||
|
def exclusive_lock(f):
|
||||||
|
"""Dummy lock function -- does not lock!"""
|
||||||
|
warnings.warn("Pretending to lock " + str(f))
|
||||||
|
return True
|
||||||
|
|
||||||
|
def exclusive_unlock(f):
|
||||||
|
"""Release an exclusive lock."""
|
||||||
|
return
|
||||||
|
|
|
@ -6,11 +6,10 @@
|
||||||
import collections
|
import collections
|
||||||
import decorator
|
import decorator
|
||||||
|
|
||||||
|
def lru_cache(size = 10, onremove = None, keys = slice(None)):
|
||||||
def lru_cache(size=10, onremove=None, keys=slice(None)):
|
|
||||||
"""Least-recently-used cache decorator.
|
"""Least-recently-used cache decorator.
|
||||||
|
|
||||||
@lru_cache(size=10, onremove=None)
|
@lru_cache(size = 10, onevict = None)
|
||||||
def f(...):
|
def f(...):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -54,17 +53,14 @@ def lru_cache(size=10, onremove=None, keys=slice(None)):
|
||||||
if key in cache:
|
if key in cache:
|
||||||
evict(cache.pop(key))
|
evict(cache.pop(key))
|
||||||
else:
|
else:
|
||||||
if cache:
|
if len(cache) > 0 and len(args) != len(cache.iterkeys().next()):
|
||||||
if len(args) != len(next(iter(cache.keys()))):
|
|
||||||
raise KeyError("trying to remove from LRU cache, but "
|
raise KeyError("trying to remove from LRU cache, but "
|
||||||
"number of arguments doesn't match the "
|
"number of arguments doesn't match the "
|
||||||
"cache key length")
|
"cache key length")
|
||||||
|
|
||||||
def cache_remove_all():
|
def cache_remove_all():
|
||||||
nonlocal cache
|
|
||||||
for key in cache:
|
for key in cache:
|
||||||
evict(cache[key])
|
evict(cache.pop(key))
|
||||||
cache = collections.OrderedDict()
|
|
||||||
|
|
||||||
def cache_info():
|
def cache_info():
|
||||||
return (func.cache_hits, func.cache_misses)
|
return (func.cache_hits, func.cache_misses)
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
|
from nilmdb.utils.printf import *
|
||||||
import sys
|
import sys
|
||||||
import inspect
|
import inspect
|
||||||
import decorator
|
import decorator
|
||||||
from nilmdb.utils.printf import fprintf
|
|
||||||
|
|
||||||
|
def must_close(errorfile = sys.stderr, wrap_verify = False):
|
||||||
def must_close(errorfile=sys.stderr, wrap_verify=False):
|
|
||||||
"""Class decorator that warns on 'errorfile' at deletion time if
|
"""Class decorator that warns on 'errorfile' at deletion time if
|
||||||
the class's close() member wasn't called.
|
the class's close() member wasn't called.
|
||||||
|
|
||||||
|
@ -13,17 +12,12 @@ def must_close(errorfile=sys.stderr, wrap_verify=False):
|
||||||
already been called."""
|
already been called."""
|
||||||
def class_decorator(cls):
|
def class_decorator(cls):
|
||||||
|
|
||||||
def is_method_or_function(x):
|
|
||||||
return inspect.ismethod(x) or inspect.isfunction(x)
|
|
||||||
|
|
||||||
def wrap_class_method(wrapper):
|
def wrap_class_method(wrapper):
|
||||||
try:
|
try:
|
||||||
orig = getattr(cls, wrapper.__name__)
|
orig = getattr(cls, wrapper.__name__).im_func
|
||||||
except AttributeError:
|
except Exception:
|
||||||
orig = lambda x: None
|
orig = lambda x: None
|
||||||
if is_method_or_function(orig):
|
setattr(cls, wrapper.__name__, decorator.decorator(wrapper, orig))
|
||||||
setattr(cls, wrapper.__name__,
|
|
||||||
decorator.decorator(wrapper, orig))
|
|
||||||
|
|
||||||
@wrap_class_method
|
@wrap_class_method
|
||||||
def __init__(orig, self, *args, **kwargs):
|
def __init__(orig, self, *args, **kwargs):
|
||||||
|
@ -39,7 +33,7 @@ def must_close(errorfile=sys.stderr, wrap_verify=False):
|
||||||
fprintf(errorfile, "error: %s.close() wasn't called!\n",
|
fprintf(errorfile, "error: %s.close() wasn't called!\n",
|
||||||
self.__class__.__name__)
|
self.__class__.__name__)
|
||||||
return orig(self, *args, **kwargs)
|
return orig(self, *args, **kwargs)
|
||||||
except:
|
except: # pragma: no cover
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@wrap_class_method
|
@wrap_class_method
|
||||||
|
@ -55,17 +49,16 @@ def must_close(errorfile=sys.stderr, wrap_verify=False):
|
||||||
raise AssertionError("called " + str(orig) + " after close")
|
raise AssertionError("called " + str(orig) + " after close")
|
||||||
return orig(self, *args, **kwargs)
|
return orig(self, *args, **kwargs)
|
||||||
if wrap_verify:
|
if wrap_verify:
|
||||||
for (name, method) in inspect.getmembers(cls,
|
for (name, method) in inspect.getmembers(cls, inspect.ismethod):
|
||||||
is_method_or_function):
|
# Skip class methods
|
||||||
|
if method.__self__ is not None:
|
||||||
|
continue
|
||||||
# Skip some methods
|
# Skip some methods
|
||||||
if name in ["__del__", "__init__"]:
|
if name in [ "__del__", "__init__" ]:
|
||||||
continue
|
continue
|
||||||
# Set up wrapper
|
# Set up wrapper
|
||||||
if inspect.ismethod(method):
|
setattr(cls, name, decorator.decorator(verifier,
|
||||||
func = method.__func__
|
method.im_func))
|
||||||
else:
|
|
||||||
func = method
|
|
||||||
setattr(cls, name, decorator.decorator(verifier, func))
|
|
||||||
|
|
||||||
return cls
|
return cls
|
||||||
return class_decorator
|
return class_decorator
|
||||||
|
|
|
@ -1,13 +1,9 @@
|
||||||
"""printf, fprintf, sprintf"""
|
"""printf, fprintf, sprintf"""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
def printf(_str, *args):
|
def printf(_str, *args):
|
||||||
print(_str % args, end='')
|
print(_str % args, end='')
|
||||||
|
|
||||||
|
|
||||||
def fprintf(_file, _str, *args):
|
def fprintf(_file, _str, *args):
|
||||||
print(_str % args, end='', file=_file)
|
print(_str % args, end='', file=_file)
|
||||||
|
|
||||||
|
|
||||||
def sprintf(_str, *args):
|
def sprintf(_str, *args):
|
||||||
return (_str % args)
|
return (_str % args)
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
import queue
|
import Queue
|
||||||
import threading
|
import threading
|
||||||
import sys
|
import sys
|
||||||
|
import decorator
|
||||||
|
import inspect
|
||||||
|
import types
|
||||||
|
import functools
|
||||||
|
|
||||||
# This file provides a class that will wrap an object and serialize
|
# This file provides a class that will wrap an object and serialize
|
||||||
# all calls to its methods. All calls to that object will be queued
|
# all calls to its methods. All calls to that object will be queued
|
||||||
|
@ -9,7 +13,6 @@ import sys
|
||||||
|
|
||||||
# Based partially on http://stackoverflow.com/questions/2642515/
|
# Based partially on http://stackoverflow.com/questions/2642515/
|
||||||
|
|
||||||
|
|
||||||
class SerializerThread(threading.Thread):
|
class SerializerThread(threading.Thread):
|
||||||
"""Thread that retrieves call information from the queue, makes the
|
"""Thread that retrieves call information from the queue, makes the
|
||||||
call, and returns the results."""
|
call, and returns the results."""
|
||||||
|
@ -37,7 +40,6 @@ class SerializerThread(threading.Thread):
|
||||||
result_queue.put((exception, result))
|
result_queue.put((exception, result))
|
||||||
del exception, result
|
del exception, result
|
||||||
|
|
||||||
|
|
||||||
def serializer_proxy(obj_or_type):
|
def serializer_proxy(obj_or_type):
|
||||||
"""Wrap the given object or type in a SerializerObjectProxy.
|
"""Wrap the given object or type in a SerializerObjectProxy.
|
||||||
|
|
||||||
|
@ -47,54 +49,45 @@ def serializer_proxy(obj_or_type):
|
||||||
The proxied requests, including instantiation, are performed in a
|
The proxied requests, including instantiation, are performed in a
|
||||||
single thread and serialized between caller threads.
|
single thread and serialized between caller threads.
|
||||||
"""
|
"""
|
||||||
class SerializerCallProxy():
|
class SerializerCallProxy(object):
|
||||||
def __init__(self, call_queue, func, objectproxy):
|
def __init__(self, call_queue, func, objectproxy):
|
||||||
self.call_queue = call_queue
|
self.call_queue = call_queue
|
||||||
self.func = func
|
self.func = func
|
||||||
# Need to hold a reference to object proxy so it doesn't
|
# Need to hold a reference to object proxy so it doesn't
|
||||||
# go away (and kill the thread) until after get called.
|
# go away (and kill the thread) until after get called.
|
||||||
self.objectproxy = objectproxy
|
self.objectproxy = objectproxy
|
||||||
|
|
||||||
def __call__(self, *args, **kwargs):
|
def __call__(self, *args, **kwargs):
|
||||||
result_queue = queue.Queue()
|
result_queue = Queue.Queue()
|
||||||
self.call_queue.put((result_queue, self.func, args, kwargs))
|
self.call_queue.put((result_queue, self.func, args, kwargs))
|
||||||
(exc_info, result) = result_queue.get()
|
( exc_info, result ) = result_queue.get()
|
||||||
if exc_info is None:
|
if exc_info is None:
|
||||||
return result
|
return result
|
||||||
else:
|
else:
|
||||||
raise exc_info[1].with_traceback(exc_info[2])
|
raise exc_info[0], exc_info[1], exc_info[2]
|
||||||
|
|
||||||
class SerializerObjectProxy():
|
class SerializerObjectProxy(object):
|
||||||
def __init__(self, obj_or_type, *args, **kwargs):
|
def __init__(self, obj_or_type, *args, **kwargs):
|
||||||
self.__object = obj_or_type
|
self.__object = obj_or_type
|
||||||
if isinstance(obj_or_type, type):
|
try:
|
||||||
|
if type(obj_or_type) in (types.TypeType, types.ClassType):
|
||||||
classname = obj_or_type.__name__
|
classname = obj_or_type.__name__
|
||||||
else:
|
else:
|
||||||
classname = obj_or_type.__class__.__name__
|
classname = obj_or_type.__class__.__name__
|
||||||
self.__call_queue = queue.Queue()
|
except AttributeError: # pragma: no cover
|
||||||
|
classname = "???"
|
||||||
|
self.__call_queue = Queue.Queue()
|
||||||
self.__thread = SerializerThread(classname, self.__call_queue)
|
self.__thread = SerializerThread(classname, self.__call_queue)
|
||||||
self.__thread.daemon = True
|
self.__thread.daemon = True
|
||||||
self.__thread.start()
|
self.__thread.start()
|
||||||
self._thread_safe = True
|
self._thread_safe = True
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
# If the attribute is a function, we want to return a
|
if key.startswith("_SerializerObjectProxy__"): # pragma: no cover
|
||||||
# proxy that will perform the call through the serializer
|
raise AttributeError
|
||||||
# when called. Otherwise, we want to return the value
|
|
||||||
# directly. This means we need to grab the attribute once,
|
|
||||||
# and therefore self.__object.__getattr__ may be called
|
|
||||||
# in an unsafe way, from the caller's thread.
|
|
||||||
attr = getattr(self.__object, key)
|
attr = getattr(self.__object, key)
|
||||||
if not callable(attr):
|
if not callable(attr):
|
||||||
# It's not callable, so perform the getattr from within
|
|
||||||
# the serializer thread, then return its value.
|
|
||||||
# That may differ from the "attr" value we just grabbed
|
|
||||||
# from here, due to forced ordering in the serializer.
|
|
||||||
getter = SerializerCallProxy(self.__call_queue, getattr, self)
|
getter = SerializerCallProxy(self.__call_queue, getattr, self)
|
||||||
return getter(self.__object, key)
|
return getter(self.__object, key)
|
||||||
else:
|
|
||||||
# It is callable, so return an object that will proxy through
|
|
||||||
# the serializer when called.
|
|
||||||
r = SerializerCallProxy(self.__call_queue, attr, self)
|
r = SerializerCallProxy(self.__call_queue, attr, self)
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
@ -105,10 +98,9 @@ def serializer_proxy(obj_or_type):
|
||||||
attr = getattr(self.__object, "__iter__")
|
attr = getattr(self.__object, "__iter__")
|
||||||
self.__iter = SerializerCallProxy(self.__call_queue, attr, self)()
|
self.__iter = SerializerCallProxy(self.__call_queue, attr, self)()
|
||||||
return self
|
return self
|
||||||
|
def next(self):
|
||||||
def __next__(self):
|
|
||||||
return SerializerCallProxy(self.__call_queue,
|
return SerializerCallProxy(self.__call_queue,
|
||||||
self.__iter.__next__, self)()
|
self.__iter.next, self)()
|
||||||
|
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
return self.__getattr__("__getitem__")(key)
|
return self.__getattr__("__getitem__")(key)
|
||||||
|
@ -118,7 +110,7 @@ def serializer_proxy(obj_or_type):
|
||||||
to serializer_proxy. Otherwise, pass the call through."""
|
to serializer_proxy. Otherwise, pass the call through."""
|
||||||
ret = SerializerCallProxy(self.__call_queue,
|
ret = SerializerCallProxy(self.__call_queue,
|
||||||
self.__object, self)(*args, **kwargs)
|
self.__object, self)(*args, **kwargs)
|
||||||
if isinstance(self.__object, type):
|
if type(self.__object) in (types.TypeType, types.ClassType):
|
||||||
# Instantiation
|
# Instantiation
|
||||||
self.__object = ret
|
self.__object = ret
|
||||||
return self
|
return self
|
||||||
|
@ -126,9 +118,9 @@ def serializer_proxy(obj_or_type):
|
||||||
|
|
||||||
def __del__(self):
|
def __del__(self):
|
||||||
try:
|
try:
|
||||||
# Signal thread to exit, but don't wait for it.
|
|
||||||
self.__call_queue.put((None, None, None, None))
|
self.__call_queue.put((None, None, None, None))
|
||||||
except:
|
self.__thread.join()
|
||||||
|
except: # pragma: no cover
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return SerializerObjectProxy(obj_or_type)
|
return SerializerObjectProxy(obj_or_type)
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
def sort_human(items, key = None):
|
||||||
def sort_human(items, key=None):
|
|
||||||
"""Human-friendly sort (/stream/2 before /stream/10)"""
|
"""Human-friendly sort (/stream/2 before /stream/10)"""
|
||||||
def to_num(val):
|
def to_num(val):
|
||||||
try:
|
try:
|
||||||
|
@ -14,6 +13,6 @@ def sort_human(items, key=None):
|
||||||
text = key(text)
|
text = key(text)
|
||||||
# Break into character and numeric chunks.
|
# Break into character and numeric chunks.
|
||||||
chunks = re.split(r'([0-9]+)', text)
|
chunks = re.split(r'([0-9]+)', text)
|
||||||
return [to_num(c) for c in chunks]
|
return [ to_num(c) for c in chunks ]
|
||||||
|
|
||||||
return sorted(items, key=human_key)
|
return sorted(items, key = human_key)
|
||||||
|
|
|
@ -1,25 +1,26 @@
|
||||||
|
from nilmdb.utils.printf import *
|
||||||
import threading
|
import threading
|
||||||
from nilmdb.utils.printf import sprintf
|
import warnings
|
||||||
|
import types
|
||||||
|
|
||||||
|
def verify_proxy(obj_or_type, exception = False, check_thread = True,
|
||||||
def verify_proxy(obj_or_type, check_thread=True,
|
check_concurrent = True):
|
||||||
check_concurrent=True):
|
|
||||||
"""Wrap the given object or type in a VerifyObjectProxy.
|
"""Wrap the given object or type in a VerifyObjectProxy.
|
||||||
|
|
||||||
Returns a VerifyObjectProxy that proxies all method calls to the
|
Returns a VerifyObjectProxy that proxies all method calls to the
|
||||||
given object, as well as attribute retrievals.
|
given object, as well as attribute retrievals.
|
||||||
|
|
||||||
When calling methods, the following checks are performed. On
|
When calling methods, the following checks are performed. If
|
||||||
failure, an exception is raised.
|
exception is True, an exception is raised. Otherwise, a warning
|
||||||
|
is printed.
|
||||||
|
|
||||||
check_thread = True # Fail if two different threads call methods.
|
check_thread = True # Warn/fail if two different threads call methods.
|
||||||
check_concurrent = True # Fail if two functions are concurrently
|
check_concurrent = True # Warn/fail if two functions are concurrently
|
||||||
# run through this proxy
|
# run through this proxy
|
||||||
"""
|
"""
|
||||||
class Namespace():
|
class Namespace(object):
|
||||||
pass
|
pass
|
||||||
|
class VerifyCallProxy(object):
|
||||||
class VerifyCallProxy():
|
|
||||||
def __init__(self, func, parent_namespace):
|
def __init__(self, func, parent_namespace):
|
||||||
self.func = func
|
self.func = func
|
||||||
self.parent_namespace = parent_namespace
|
self.parent_namespace = parent_namespace
|
||||||
|
@ -41,16 +42,22 @@ def verify_proxy(obj_or_type, check_thread=True,
|
||||||
" but %s called %s.%s",
|
" but %s called %s.%s",
|
||||||
p.thread.name, p.classname, p.thread_callee,
|
p.thread.name, p.classname, p.thread_callee,
|
||||||
this.name, p.classname, callee)
|
this.name, p.classname, callee)
|
||||||
|
if exception:
|
||||||
raise AssertionError(err)
|
raise AssertionError(err)
|
||||||
|
else: # pragma: no cover
|
||||||
|
warnings.warn(err)
|
||||||
|
|
||||||
need_concur_unlock = False
|
need_concur_unlock = False
|
||||||
if check_concurrent:
|
if check_concurrent:
|
||||||
if not p.concur_lock.acquire(False):
|
if p.concur_lock.acquire(False) == False:
|
||||||
err = sprintf("unsafe concurrency: %s called %s.%s "
|
err = sprintf("unsafe concurrency: %s called %s.%s "
|
||||||
"while %s is still in %s.%s",
|
"while %s is still in %s.%s",
|
||||||
this.name, p.classname, callee,
|
this.name, p.classname, callee,
|
||||||
p.concur_tname, p.classname, p.concur_callee)
|
p.concur_tname, p.classname, p.concur_callee)
|
||||||
|
if exception:
|
||||||
raise AssertionError(err)
|
raise AssertionError(err)
|
||||||
|
else: # pragma: no cover
|
||||||
|
warnings.warn(err)
|
||||||
else:
|
else:
|
||||||
p.concur_tname = this.name
|
p.concur_tname = this.name
|
||||||
p.concur_callee = callee
|
p.concur_callee = callee
|
||||||
|
@ -63,7 +70,7 @@ def verify_proxy(obj_or_type, check_thread=True,
|
||||||
p.concur_lock.release()
|
p.concur_lock.release()
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
class VerifyObjectProxy():
|
class VerifyObjectProxy(object):
|
||||||
def __init__(self, obj_or_type, *args, **kwargs):
|
def __init__(self, obj_or_type, *args, **kwargs):
|
||||||
p = Namespace()
|
p = Namespace()
|
||||||
self.__ns = p
|
self.__ns = p
|
||||||
|
@ -73,12 +80,17 @@ def verify_proxy(obj_or_type, check_thread=True,
|
||||||
p.concur_tname = None
|
p.concur_tname = None
|
||||||
p.concur_callee = None
|
p.concur_callee = None
|
||||||
self.__obj = obj_or_type
|
self.__obj = obj_or_type
|
||||||
if isinstance(obj_or_type, type):
|
try:
|
||||||
|
if type(obj_or_type) in (types.TypeType, types.ClassType):
|
||||||
p.classname = self.__obj.__name__
|
p.classname = self.__obj.__name__
|
||||||
else:
|
else:
|
||||||
p.classname = self.__obj.__class__.__name__
|
p.classname = self.__obj.__class__.__name__
|
||||||
|
except AttributeError: # pragma: no cover
|
||||||
|
p.classname = "???"
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
|
if key.startswith("_VerifyObjectProxy__"): # pragma: no cover
|
||||||
|
raise AttributeError
|
||||||
attr = getattr(self.__obj, key)
|
attr = getattr(self.__obj, key)
|
||||||
if not callable(attr):
|
if not callable(attr):
|
||||||
return VerifyCallProxy(getattr, self.__ns)(self.__obj, key)
|
return VerifyCallProxy(getattr, self.__ns)(self.__obj, key)
|
||||||
|
@ -88,7 +100,7 @@ def verify_proxy(obj_or_type, check_thread=True,
|
||||||
"""Call this to instantiate the type, if a type was passed
|
"""Call this to instantiate the type, if a type was passed
|
||||||
to verify_proxy. Otherwise, pass the call through."""
|
to verify_proxy. Otherwise, pass the call through."""
|
||||||
ret = VerifyCallProxy(self.__obj, self.__ns)(*args, **kwargs)
|
ret = VerifyCallProxy(self.__obj, self.__ns)(*args, **kwargs)
|
||||||
if isinstance(self.__obj, type):
|
if type(self.__obj) in (types.TypeType, types.ClassType):
|
||||||
# Instantiation
|
# Instantiation
|
||||||
self.__obj = ret
|
self.__obj = ret
|
||||||
return self
|
return self
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from nilmdb.utils import datetime_tz
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
import datetime_tz
|
|
||||||
|
|
||||||
# Range
|
# Range
|
||||||
min_timestamp = (-2**63)
|
min_timestamp = (-2**63)
|
||||||
|
@ -9,17 +11,15 @@ max_timestamp = (2**63 - 1)
|
||||||
# Smallest representable step
|
# Smallest representable step
|
||||||
epsilon = 1
|
epsilon = 1
|
||||||
|
|
||||||
|
def string_to_timestamp(str):
|
||||||
def string_to_timestamp(string):
|
|
||||||
"""Convert a string that represents an integer number of microseconds
|
"""Convert a string that represents an integer number of microseconds
|
||||||
since epoch."""
|
since epoch."""
|
||||||
try:
|
try:
|
||||||
# Parse a string like "1234567890123456" and return an integer
|
# Parse a string like "1234567890123456" and return an integer
|
||||||
return int(string)
|
return int(str)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# Try parsing as a float, in case it's "1234567890123456.0"
|
# Try parsing as a float, in case it's "1234567890123456.0"
|
||||||
return int(round(float(string)))
|
return int(round(float(str)))
|
||||||
|
|
||||||
|
|
||||||
def timestamp_to_string(timestamp):
|
def timestamp_to_string(timestamp):
|
||||||
"""Convert a timestamp (integer microseconds since epoch) to a string"""
|
"""Convert a timestamp (integer microseconds since epoch) to a string"""
|
||||||
|
@ -28,13 +28,6 @@ def timestamp_to_string(timestamp):
|
||||||
else:
|
else:
|
||||||
return str(timestamp)
|
return str(timestamp)
|
||||||
|
|
||||||
|
|
||||||
def timestamp_to_bytes(timestamp):
|
|
||||||
"""Convert a timestamp (integer microseconds since epoch) to a Python
|
|
||||||
bytes object"""
|
|
||||||
return timestamp_to_string(timestamp).encode('utf-8')
|
|
||||||
|
|
||||||
|
|
||||||
def timestamp_to_human(timestamp):
|
def timestamp_to_human(timestamp):
|
||||||
"""Convert a timestamp (integer microseconds since epoch) to a
|
"""Convert a timestamp (integer microseconds since epoch) to a
|
||||||
human-readable string, using the local timezone for display
|
human-readable string, using the local timezone for display
|
||||||
|
@ -46,30 +39,24 @@ def timestamp_to_human(timestamp):
|
||||||
dt = datetime_tz.datetime_tz.fromtimestamp(timestamp_to_unix(timestamp))
|
dt = datetime_tz.datetime_tz.fromtimestamp(timestamp_to_unix(timestamp))
|
||||||
return dt.strftime("%a, %d %b %Y %H:%M:%S.%f %z")
|
return dt.strftime("%a, %d %b %Y %H:%M:%S.%f %z")
|
||||||
|
|
||||||
|
|
||||||
def unix_to_timestamp(unix):
|
def unix_to_timestamp(unix):
|
||||||
"""Convert a Unix timestamp (floating point seconds since epoch)
|
"""Convert a Unix timestamp (floating point seconds since epoch)
|
||||||
into a NILM timestamp (integer microseconds since epoch)"""
|
into a NILM timestamp (integer microseconds since epoch)"""
|
||||||
return int(round(unix * 1e6))
|
return int(round(unix * 1e6))
|
||||||
|
seconds_to_timestamp = unix_to_timestamp
|
||||||
|
|
||||||
def timestamp_to_unix(timestamp):
|
def timestamp_to_unix(timestamp):
|
||||||
"""Convert a NILM timestamp (integer microseconds since epoch)
|
"""Convert a NILM timestamp (integer microseconds since epoch)
|
||||||
into a Unix timestamp (floating point seconds since epoch)"""
|
into a Unix timestamp (floating point seconds since epoch)"""
|
||||||
return timestamp / 1e6
|
return timestamp / 1e6
|
||||||
|
|
||||||
|
|
||||||
seconds_to_timestamp = unix_to_timestamp
|
|
||||||
timestamp_to_seconds = timestamp_to_unix
|
timestamp_to_seconds = timestamp_to_unix
|
||||||
|
|
||||||
|
def rate_to_period(hz, cycles = 1):
|
||||||
def rate_to_period(hz, cycles=1):
|
|
||||||
"""Convert a rate (in Hz) to a period (in timestamp units).
|
"""Convert a rate (in Hz) to a period (in timestamp units).
|
||||||
Returns an integer."""
|
Returns an integer."""
|
||||||
period = unix_to_timestamp(cycles) / float(hz)
|
period = unix_to_timestamp(cycles) / float(hz)
|
||||||
return int(round(period))
|
return int(round(period))
|
||||||
|
|
||||||
|
|
||||||
def parse_time(toparse):
|
def parse_time(toparse):
|
||||||
"""
|
"""
|
||||||
Parse a free-form time string and return a nilmdb timestamp
|
Parse a free-form time string and return a nilmdb timestamp
|
||||||
|
@ -108,9 +95,9 @@ def parse_time(toparse):
|
||||||
try:
|
try:
|
||||||
val = float(toparse)
|
val = float(toparse)
|
||||||
# range is from about year 2001 - 2128
|
# range is from about year 2001 - 2128
|
||||||
if 1e9 < val < 5e9:
|
if val > 1e9 and val < 5e9:
|
||||||
return unix_to_timestamp(val)
|
return unix_to_timestamp(val)
|
||||||
if 1e15 < val < 5e15:
|
if val > 1e15 and val < 5e15:
|
||||||
return val
|
return val
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
|
@ -142,7 +129,6 @@ def parse_time(toparse):
|
||||||
# just give up for now.
|
# just give up for now.
|
||||||
raise ValueError("unable to parse timestamp")
|
raise ValueError("unable to parse timestamp")
|
||||||
|
|
||||||
|
|
||||||
def now():
|
def now():
|
||||||
"""Return current timestamp"""
|
"""Return current timestamp"""
|
||||||
return unix_to_timestamp(time.time())
|
return unix_to_timestamp(time.time())
|
||||||
|
|
|
@ -5,17 +5,18 @@
|
||||||
# with nilmdb.utils.Timer("flush"):
|
# with nilmdb.utils.Timer("flush"):
|
||||||
# foo.flush()
|
# foo.flush()
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import absolute_import
|
||||||
import contextlib
|
import contextlib
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def Timer(name=None, tosyslog=False):
|
def Timer(name = None, tosyslog = False):
|
||||||
start = time.time()
|
start = time.time()
|
||||||
yield
|
yield
|
||||||
elapsed = int((time.time() - start) * 1000)
|
elapsed = int((time.time() - start) * 1000)
|
||||||
msg = (name or 'elapsed') + ": " + str(elapsed) + " ms"
|
msg = (name or 'elapsed') + ": " + str(elapsed) + " ms"
|
||||||
if tosyslog:
|
if tosyslog: # pragma: no cover
|
||||||
import syslog
|
import syslog
|
||||||
syslog.syslog(msg)
|
syslog.syslog(msg)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1,17 +1,16 @@
|
||||||
"""File-like objects that add timestamps to the input lines"""
|
"""File-like objects that add timestamps to the input lines"""
|
||||||
|
|
||||||
from nilmdb.utils.printf import sprintf
|
from nilmdb.utils.printf import *
|
||||||
import nilmdb.utils.time
|
import nilmdb.utils.time
|
||||||
|
|
||||||
|
class Timestamper(object):
|
||||||
class Timestamper():
|
|
||||||
"""A file-like object that adds timestamps to lines of an input file."""
|
"""A file-like object that adds timestamps to lines of an input file."""
|
||||||
def __init__(self, infile, ts_iter):
|
def __init__(self, infile, ts_iter):
|
||||||
"""file: filename, or another file-like object
|
"""file: filename, or another file-like object
|
||||||
ts_iter: iterator that returns a timestamp string for
|
ts_iter: iterator that returns a timestamp string for
|
||||||
each line of the file"""
|
each line of the file"""
|
||||||
if isinstance(infile, str):
|
if isinstance(infile, basestring):
|
||||||
self.file = open(infile, "rb")
|
self.file = open(infile, "r")
|
||||||
else:
|
else:
|
||||||
self.file = infile
|
self.file = infile
|
||||||
self.ts_iter = ts_iter
|
self.ts_iter = ts_iter
|
||||||
|
@ -23,19 +22,17 @@ class Timestamper():
|
||||||
while True:
|
while True:
|
||||||
line = self.file.readline(*args)
|
line = self.file.readline(*args)
|
||||||
if not line:
|
if not line:
|
||||||
return b""
|
return ""
|
||||||
if line[0:1] == b'#':
|
if line[0] == '#':
|
||||||
continue
|
continue
|
||||||
# For some reason, coverage on python 3.8 reports that
|
break
|
||||||
# we never hit this break, even though we definitely do.
|
|
||||||
break # pragma: no cover
|
|
||||||
try:
|
try:
|
||||||
return next(self.ts_iter) + line
|
return self.ts_iter.next() + line
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return b""
|
return ""
|
||||||
|
|
||||||
def readlines(self, size=None):
|
def readlines(self, size = None):
|
||||||
out = b""
|
out = ""
|
||||||
while True:
|
while True:
|
||||||
line = self.readline()
|
line = self.readline()
|
||||||
out += line
|
out += line
|
||||||
|
@ -46,16 +43,15 @@ class Timestamper():
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __next__(self):
|
def next(self):
|
||||||
result = self.readline()
|
result = self.readline()
|
||||||
if not result:
|
if not result:
|
||||||
raise StopIteration
|
raise StopIteration
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class TimestamperRate(Timestamper):
|
class TimestamperRate(Timestamper):
|
||||||
"""Timestamper that uses a start time and a fixed rate"""
|
"""Timestamper that uses a start time and a fixed rate"""
|
||||||
def __init__(self, infile, start, rate, end=None):
|
def __init__(self, infile, start, rate, end = None):
|
||||||
"""
|
"""
|
||||||
file: file name or object
|
file: file name or object
|
||||||
|
|
||||||
|
@ -65,39 +61,33 @@ class TimestamperRate(Timestamper):
|
||||||
|
|
||||||
end: If specified, raise StopIteration before outputting a value
|
end: If specified, raise StopIteration before outputting a value
|
||||||
greater than this."""
|
greater than this."""
|
||||||
timestamp_to_bytes = nilmdb.utils.time.timestamp_to_bytes
|
timestamp_to_string = nilmdb.utils.time.timestamp_to_string
|
||||||
rate_to_period = nilmdb.utils.time.rate_to_period
|
rate_to_period = nilmdb.utils.time.rate_to_period
|
||||||
|
|
||||||
def iterator(start, rate, end):
|
def iterator(start, rate, end):
|
||||||
n = 0
|
n = 0
|
||||||
rate = float(rate)
|
rate = float(rate)
|
||||||
while True:
|
while True:
|
||||||
now = start + rate_to_period(rate, n)
|
now = start + rate_to_period(rate, n)
|
||||||
if end and now >= end:
|
if end and now >= end:
|
||||||
return
|
raise StopIteration
|
||||||
yield timestamp_to_bytes(now) + b" "
|
yield timestamp_to_string(now) + " "
|
||||||
n += 1
|
n += 1
|
||||||
Timestamper.__init__(self, infile, iterator(start, rate, end))
|
Timestamper.__init__(self, infile, iterator(start, rate, end))
|
||||||
self.start = start
|
self.start = start
|
||||||
self.rate = rate
|
self.rate = rate
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return sprintf("TimestamperRate(..., start=\"%s\", rate=%g)",
|
return sprintf("TimestamperRate(..., start=\"%s\", rate=%g)",
|
||||||
nilmdb.utils.time.timestamp_to_human(self.start),
|
nilmdb.utils.time.timestamp_to_human(self.start),
|
||||||
self.rate)
|
self.rate)
|
||||||
|
|
||||||
|
|
||||||
class TimestamperNow(Timestamper):
|
class TimestamperNow(Timestamper):
|
||||||
"""Timestamper that uses current time"""
|
"""Timestamper that uses current time"""
|
||||||
def __init__(self, infile):
|
def __init__(self, infile):
|
||||||
timestamp_to_bytes = nilmdb.utils.time.timestamp_to_bytes
|
timestamp_to_string = nilmdb.utils.time.timestamp_to_string
|
||||||
get_now = nilmdb.utils.time.now
|
get_now = nilmdb.utils.time.now
|
||||||
|
|
||||||
def iterator():
|
def iterator():
|
||||||
while True:
|
while True:
|
||||||
yield timestamp_to_bytes(get_now()) + b" "
|
yield timestamp_to_string(get_now()) + " "
|
||||||
|
|
||||||
Timestamper.__init__(self, infile, iterator())
|
Timestamper.__init__(self, infile, iterator())
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "TimestamperNow(...)"
|
return "TimestamperNow(...)"
|
||||||
|
|
29
nilmdb/utils/unicode.py
Normal file
29
nilmdb/utils/unicode.py
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if sys.version_info[0] >= 3: # pragma: no cover (future Python3 compat)
|
||||||
|
text_type = str
|
||||||
|
else:
|
||||||
|
text_type = unicode
|
||||||
|
|
||||||
|
def encode(u):
|
||||||
|
"""Try to encode something from Unicode to a string using the
|
||||||
|
default encoding. If it fails, try encoding as UTF-8."""
|
||||||
|
if not isinstance(u, text_type):
|
||||||
|
return u
|
||||||
|
try:
|
||||||
|
return u.encode()
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
return u.encode("utf-8")
|
||||||
|
|
||||||
|
def decode(s):
|
||||||
|
"""Try to decode someting from string to Unicode using the
|
||||||
|
default encoding. If it fails, try decoding as UTF-8."""
|
||||||
|
if isinstance(s, text_type):
|
||||||
|
return s
|
||||||
|
try:
|
||||||
|
return s.decode()
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
try:
|
||||||
|
return s.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
return s # best we can do
|
|
@ -1,41 +0,0 @@
|
||||||
argcomplete==1.12.0
|
|
||||||
CherryPy==18.6.0
|
|
||||||
coverage==5.2.1
|
|
||||||
Cython==0.29.21
|
|
||||||
decorator==4.4.2
|
|
||||||
fallocate==1.6.4
|
|
||||||
flake8==3.8.3
|
|
||||||
nose==1.3.7
|
|
||||||
numpy==1.19.1
|
|
||||||
progressbar==2.5
|
|
||||||
psutil==5.7.2
|
|
||||||
python-datetime-tz==0.5.4
|
|
||||||
python-dateutil==2.8.1
|
|
||||||
requests==2.24.0
|
|
||||||
tz==0.2.2
|
|
||||||
yappi==1.2.5
|
|
||||||
|
|
||||||
## The following requirements were added by pip freeze:
|
|
||||||
beautifulsoup4==4.9.1
|
|
||||||
certifi==2020.6.20
|
|
||||||
chardet==3.0.4
|
|
||||||
cheroot==8.4.2
|
|
||||||
idna==2.10
|
|
||||||
jaraco.classes==3.1.0
|
|
||||||
jaraco.collections==3.0.0
|
|
||||||
jaraco.functools==3.0.1
|
|
||||||
jaraco.text==3.2.0
|
|
||||||
mccabe==0.6.1
|
|
||||||
more-itertools==8.4.0
|
|
||||||
portend==2.6
|
|
||||||
pycodestyle==2.6.0
|
|
||||||
pyflakes==2.2.0
|
|
||||||
pytz==2020.1
|
|
||||||
six==1.15.0
|
|
||||||
soupsieve==2.0.1
|
|
||||||
tempora==4.0.0
|
|
||||||
urllib3==1.25.10
|
|
||||||
waitress==1.4.4
|
|
||||||
WebOb==1.8.6
|
|
||||||
WebTest==2.0.35
|
|
||||||
zc.lockfile==2.0
|
|
22
setup.cfg
22
setup.cfg
|
@ -13,6 +13,8 @@ cover-package=nilmdb
|
||||||
cover-erase=1
|
cover-erase=1
|
||||||
# this works, puts html output in cover/ dir:
|
# this works, puts html output in cover/ dir:
|
||||||
# cover-html=1
|
# cover-html=1
|
||||||
|
# need nose 1.1.3 for this:
|
||||||
|
# cover-branches=1
|
||||||
#debug=nose
|
#debug=nose
|
||||||
#debug-log=nose.log
|
#debug-log=nose.log
|
||||||
stop=1
|
stop=1
|
||||||
|
@ -37,23 +39,3 @@ tests=tests
|
||||||
#with-profile=1
|
#with-profile=1
|
||||||
#profile-sort=time
|
#profile-sort=time
|
||||||
##profile-restrict=10 # doesn't work right, treated as string or something
|
##profile-restrict=10 # doesn't work right, treated as string or something
|
||||||
|
|
||||||
[versioneer]
|
|
||||||
VCS=git
|
|
||||||
style=pep440
|
|
||||||
versionfile_source=nilmdb/_version.py
|
|
||||||
versionfile_build=nilmdb/_version.py
|
|
||||||
tag_prefix=nilmdb-
|
|
||||||
parentdir_prefix=nilmdb-
|
|
||||||
|
|
||||||
[flake8]
|
|
||||||
exclude=_version.py
|
|
||||||
extend-ignore=E731
|
|
||||||
per-file-ignores=__init__.py:F401,E402 \
|
|
||||||
serializer.py:E722 \
|
|
||||||
mustclose.py:E722 \
|
|
||||||
fsck.py:E266
|
|
||||||
|
|
||||||
[pylint]
|
|
||||||
ignore=_version.py
|
|
||||||
disable=C0103,C0111,R0913,R0914
|
|
||||||
|
|
104
setup.py
104
setup.py
|
@ -1,51 +1,119 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/python
|
||||||
|
|
||||||
# To release a new version, tag it:
|
# To release a new version, tag it:
|
||||||
# git tag -a nilmdb-1.1 -m "Version 1.1"
|
# git tag -a nilmdb-1.1 -m "Version 1.1"
|
||||||
# git push --tags
|
# git push --tags
|
||||||
# Then just package it up:
|
# Then just package it up:
|
||||||
# python3 setup.py sdist
|
# python setup.py sdist
|
||||||
|
|
||||||
|
import traceback
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
from setuptools import setup
|
|
||||||
from distutils.extension import Extension
|
try:
|
||||||
|
from setuptools import setup, find_packages
|
||||||
|
from distutils.extension import Extension
|
||||||
|
import distutils.version
|
||||||
|
except ImportError:
|
||||||
|
traceback.print_exc()
|
||||||
|
print "Please install the prerequisites listed in README.txt"
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
# Versioneer manages version numbers from git tags.
|
# Versioneer manages version numbers from git tags.
|
||||||
# https://github.com/warner/python-versioneer
|
# https://github.com/warner/python-versioneer
|
||||||
import versioneer
|
import versioneer
|
||||||
|
versioneer.versionfile_source = 'nilmdb/_version.py'
|
||||||
|
versioneer.versionfile_build = 'nilmdb/_version.py'
|
||||||
|
versioneer.tag_prefix = 'nilmdb-'
|
||||||
|
versioneer.parentdir_prefix = 'nilmdb-'
|
||||||
|
|
||||||
|
# Hack to workaround logging/multiprocessing issue:
|
||||||
|
# https://groups.google.com/d/msg/nose-users/fnJ-kAUbYHQ/_UsLN786ygcJ
|
||||||
|
try: import multiprocessing
|
||||||
|
except Exception: pass
|
||||||
|
|
||||||
|
# Use Cython if it's new enough, otherwise use preexisting C files.
|
||||||
|
cython_modules = [ 'nilmdb.server.interval',
|
||||||
|
'nilmdb.server.rbtree' ]
|
||||||
|
try:
|
||||||
|
import Cython
|
||||||
|
from Cython.Build import cythonize
|
||||||
|
if (distutils.version.LooseVersion(Cython.__version__) <
|
||||||
|
distutils.version.LooseVersion("0.16")):
|
||||||
|
print "Cython version", Cython.__version__, "is too old; not using it."
|
||||||
|
raise ImportError()
|
||||||
|
use_cython = True
|
||||||
|
except ImportError:
|
||||||
|
use_cython = False
|
||||||
|
|
||||||
# External modules that need to be built
|
|
||||||
ext_modules = [ Extension('nilmdb.server.rocket', ['nilmdb/server/rocket.c' ]) ]
|
ext_modules = [ Extension('nilmdb.server.rocket', ['nilmdb/server/rocket.c' ]) ]
|
||||||
|
|
||||||
# Use Cython.
|
|
||||||
cython_modules = [ 'nilmdb.server.interval', 'nilmdb.server.rbtree' ]
|
|
||||||
import Cython
|
|
||||||
from Cython.Build import cythonize
|
|
||||||
for modulename in cython_modules:
|
for modulename in cython_modules:
|
||||||
filename = modulename.replace('.','/')
|
filename = modulename.replace('.','/')
|
||||||
|
if use_cython:
|
||||||
ext_modules.extend(cythonize(filename + ".pyx"))
|
ext_modules.extend(cythonize(filename + ".pyx"))
|
||||||
|
else:
|
||||||
|
cfile = filename + ".c"
|
||||||
|
if not os.path.exists(cfile):
|
||||||
|
raise Exception("Missing source file " + cfile + ". "
|
||||||
|
"Try installing cython >= 0.16.")
|
||||||
|
ext_modules.append(Extension(modulename, [ cfile ]))
|
||||||
|
|
||||||
# Get list of requirements to use in `install_requires` below. Note
|
# We need a MANIFEST.in. Generate it here rather than polluting the
|
||||||
# that we don't make a distinction between things that are actually
|
# repository with yet another setup-related file.
|
||||||
# required for end-users vs developers (or use `test_requires` or
|
with open("MANIFEST.in", "w") as m:
|
||||||
# anything else) -- just install everything for simplicity.
|
m.write("""
|
||||||
install_requires = open('requirements.txt').readlines()
|
# Root
|
||||||
|
include README.txt
|
||||||
|
include setup.cfg
|
||||||
|
include setup.py
|
||||||
|
include versioneer.py
|
||||||
|
include Makefile
|
||||||
|
include .coveragerc
|
||||||
|
include .pylintrc
|
||||||
|
|
||||||
|
# Cython files -- include source.
|
||||||
|
recursive-include nilmdb/server *.pyx *.pyxdep *.pxd
|
||||||
|
|
||||||
|
# Tests
|
||||||
|
recursive-include tests *.py
|
||||||
|
recursive-include tests/data *
|
||||||
|
include tests/test.order
|
||||||
|
|
||||||
|
# Docs
|
||||||
|
recursive-include docs Makefile *.md
|
||||||
|
|
||||||
|
# Extras
|
||||||
|
recursive-include extras *
|
||||||
|
""")
|
||||||
|
|
||||||
# Run setup
|
# Run setup
|
||||||
setup(name='nilmdb',
|
setup(name='nilmdb',
|
||||||
version = versioneer.get_version(),
|
version = versioneer.get_version(),
|
||||||
cmdclass = versioneer.get_cmdclass(),
|
cmdclass = versioneer.get_cmdclass(),
|
||||||
url = 'https://git.jim.sh/nilm/nilmdb.git',
|
url = 'https://git.jim.sh/jim/lees/nilmdb.git',
|
||||||
author = 'Jim Paris',
|
author = 'Jim Paris',
|
||||||
description = "NILM Database",
|
description = "NILM Database",
|
||||||
long_description = "NILM Database",
|
long_description = "NILM Database",
|
||||||
license = "Proprietary",
|
license = "Proprietary",
|
||||||
author_email = 'jim@jtan.com',
|
author_email = 'jim@jtan.com',
|
||||||
setup_requires = [ 'setuptools' ],
|
tests_require = [ 'nose',
|
||||||
install_requires = install_requires,
|
'coverage',
|
||||||
|
'numpy',
|
||||||
|
],
|
||||||
|
setup_requires = [ 'setuptools',
|
||||||
|
],
|
||||||
|
install_requires = [ 'decorator',
|
||||||
|
'cherrypy >= 3.2',
|
||||||
|
'simplejson',
|
||||||
|
'python-dateutil',
|
||||||
|
'pytz',
|
||||||
|
'psutil >= 0.3.0',
|
||||||
|
'requests >= 1.1.0',
|
||||||
|
'progressbar >= 2.2',
|
||||||
|
],
|
||||||
packages = [ 'nilmdb',
|
packages = [ 'nilmdb',
|
||||||
'nilmdb.utils',
|
'nilmdb.utils',
|
||||||
|
'nilmdb.utils.datetime_tz',
|
||||||
'nilmdb.server',
|
'nilmdb.server',
|
||||||
'nilmdb.client',
|
'nilmdb.client',
|
||||||
'nilmdb.cmdline',
|
'nilmdb.cmdline',
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# comments are cool? what if they contain →UNICODEâ†<C3A2> or invalid utf-8 like Ã(
|
# comments are cool?
|
||||||
2.66568e+05 2.24029e+05 5.16140e+03 2.52517e+03 8.35084e+03 3.72470e+03 1.35534e+03 2.03900e+03
|
2.66568e+05 2.24029e+05 5.16140e+03 2.52517e+03 8.35084e+03 3.72470e+03 1.35534e+03 2.03900e+03
|
||||||
2.57914e+05 2.27183e+05 4.30368e+03 4.13080e+03 7.25535e+03 4.89047e+03 1.63859e+03 1.93496e+03
|
2.57914e+05 2.27183e+05 4.30368e+03 4.13080e+03 7.25535e+03 4.89047e+03 1.63859e+03 1.93496e+03
|
||||||
2.51717e+05 2.26047e+05 5.99445e+03 3.49363e+03 8.07250e+03 5.08267e+03 2.26917e+03 2.86231e+03
|
2.51717e+05 2.26047e+05 5.99445e+03 3.49363e+03 8.07250e+03 5.08267e+03 2.26917e+03 2.86231e+03
|
||||||
|
|
Binary file not shown.
|
@ -1 +0,0 @@
|
||||||
hi
|
|
|
@ -1 +0,0 @@
|
||||||
hi
|
|
Binary file not shown.
Binary file not shown.
|
@ -1 +0,0 @@
|
||||||
hi
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1 +0,0 @@
|
||||||
hi
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user