2013-01-02 00:00:30 -05:00
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
2013-03-08 17:10:18 -05:00
|
|
|
|
import nilmdb.server
|
|
|
|
|
import nilmdb.client
|
|
|
|
|
|
2012-12-31 15:52:28 -05:00
|
|
|
|
from nilmdb.utils.printf import *
|
2013-01-28 19:04:52 -05:00
|
|
|
|
from nilmdb.utils import timestamper
|
2012-03-20 18:21:16 -04:00
|
|
|
|
from nilmdb.client import ClientError, ServerError
|
2013-01-30 19:03:42 -05:00
|
|
|
|
from nilmdb.utils import datetime_tz
|
2012-03-24 13:32:11 -04:00
|
|
|
|
|
2013-02-26 19:41:23 -05:00
|
|
|
|
from nose.plugins.skip import SkipTest
|
2012-03-20 10:26:29 -04:00
|
|
|
|
from nose.tools import *
|
|
|
|
|
from nose.tools import assert_raises
|
2012-03-20 17:46:18 -04:00
|
|
|
|
import itertools
|
|
|
|
|
import distutils.version
|
2012-03-20 10:26:29 -04:00
|
|
|
|
import os
|
|
|
|
|
import sys
|
|
|
|
|
import threading
|
2012-03-31 00:15:29 -04:00
|
|
|
|
import cStringIO
|
2012-05-23 16:00:01 -04:00
|
|
|
|
import simplejson as json
|
2012-05-25 12:44:24 -04:00
|
|
|
|
import unittest
|
2012-06-04 19:46:33 -04:00
|
|
|
|
import warnings
|
2013-01-03 19:20:51 -05:00
|
|
|
|
import resource
|
2013-02-16 18:47:07 -05:00
|
|
|
|
import time
|
2013-03-01 16:04:00 -05:00
|
|
|
|
import re
|
2013-04-07 16:06:52 -04:00
|
|
|
|
import struct
|
2012-03-20 10:26:29 -04:00
|
|
|
|
|
2013-01-05 15:00:34 -05:00
|
|
|
|
from testutil.helpers import *
|
2012-03-20 10:26:29 -04:00
|
|
|
|
|
2012-03-29 17:43:05 -04:00
|
|
|
|
testdb = "tests/client-testdb"
|
2013-03-02 13:19:44 -05:00
|
|
|
|
testurl = "http://localhost:32180/"
|
2012-03-20 10:26:29 -04:00
|
|
|
|
|
2012-03-21 20:14:13 -04:00
|
|
|
|
def setup_module():
|
|
|
|
|
global test_server, test_db
|
|
|
|
|
# Clear out DB
|
2012-03-29 17:43:05 -04:00
|
|
|
|
recursive_unlink(testdb)
|
2012-03-21 20:14:13 -04:00
|
|
|
|
|
|
|
|
|
# Start web app on a custom port
|
2013-03-10 17:09:01 -04:00
|
|
|
|
test_db = nilmdb.utils.serializer_proxy(nilmdb.server.NilmDB)(testdb)
|
2013-03-08 17:10:18 -05:00
|
|
|
|
test_server = nilmdb.server.Server(test_db, host = "127.0.0.1",
|
|
|
|
|
port = 32180, stoppable = False,
|
|
|
|
|
fast_shutdown = True,
|
|
|
|
|
force_traceback = True)
|
2012-03-21 20:14:13 -04:00
|
|
|
|
test_server.start(blocking = False)
|
|
|
|
|
|
|
|
|
|
def teardown_module():
|
|
|
|
|
global test_server, test_db
|
|
|
|
|
# Close web app
|
|
|
|
|
test_server.stop()
|
|
|
|
|
test_db.close()
|
2012-03-20 10:26:29 -04:00
|
|
|
|
|
2012-03-21 20:14:13 -04:00
|
|
|
|
class TestClient(object):
|
2012-03-20 10:26:29 -04:00
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_01_basic(self):
|
2012-03-20 10:26:29 -04:00
|
|
|
|
# Test a fake host
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = "http://localhost:1/")
|
2012-03-26 18:28:33 -04:00
|
|
|
|
with assert_raises(nilmdb.client.ServerError):
|
2012-03-20 17:46:18 -04:00
|
|
|
|
client.version()
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
2012-03-20 17:46:18 -04:00
|
|
|
|
|
|
|
|
|
# Then a fake URL on a real host
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = "http://localhost:32180/fake/")
|
2012-03-26 18:28:33 -04:00
|
|
|
|
with assert_raises(nilmdb.client.ClientError):
|
2012-03-20 10:26:29 -04:00
|
|
|
|
client.version()
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
2012-03-20 10:26:29 -04:00
|
|
|
|
|
2012-04-03 18:21:42 -04:00
|
|
|
|
# Now a real URL with no http:// prefix
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = "localhost:32180")
|
2012-04-03 18:21:42 -04:00
|
|
|
|
version = client.version()
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
2012-04-03 18:21:42 -04:00
|
|
|
|
|
2012-03-20 17:46:18 -04:00
|
|
|
|
# Now use the real URL
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2012-03-20 17:46:18 -04:00
|
|
|
|
version = client.version()
|
2013-02-05 19:07:38 -05:00
|
|
|
|
eq_(distutils.version.LooseVersion(version),
|
|
|
|
|
distutils.version.LooseVersion(test_server.version))
|
2012-03-20 17:46:18 -04:00
|
|
|
|
|
2013-01-17 19:58:48 -05:00
|
|
|
|
# Bad URLs should give 404, not 500
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.http.get("/stream/create")
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
2013-01-17 19:58:48 -05:00
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_02_createlist(self):
|
2012-03-20 17:46:18 -04:00
|
|
|
|
# Basic stream tests, like those in test_nilmdb:test_stream
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2012-03-20 17:46:18 -04:00
|
|
|
|
|
|
|
|
|
# Database starts empty
|
|
|
|
|
eq_(client.stream_list(), [])
|
|
|
|
|
|
|
|
|
|
# Bad path
|
|
|
|
|
with assert_raises(ClientError):
|
2013-03-03 13:34:09 -05:00
|
|
|
|
client.stream_create("foo/bar/baz", "float32_8")
|
2012-03-20 17:46:18 -04:00
|
|
|
|
with assert_raises(ClientError):
|
2013-03-03 13:34:09 -05:00
|
|
|
|
client.stream_create("/foo", "float32_8")
|
2012-03-20 17:46:18 -04:00
|
|
|
|
# Bad layout type
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.stream_create("/newton/prep", "NoSuchLayout")
|
2013-01-02 00:00:30 -05:00
|
|
|
|
|
2013-02-25 18:07:28 -05:00
|
|
|
|
# Bad method types
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.http.put("/stream/list","")
|
|
|
|
|
# Try a bunch of times to make sure the request body is getting consumed
|
|
|
|
|
for x in range(10):
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.http.post("/stream/list")
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2013-02-25 18:07:28 -05:00
|
|
|
|
|
2013-04-10 19:59:38 -04:00
|
|
|
|
# Create four streams
|
2013-03-03 13:34:09 -05:00
|
|
|
|
client.stream_create("/newton/prep", "float32_8")
|
|
|
|
|
client.stream_create("/newton/raw", "uint16_6")
|
2013-04-10 19:59:38 -04:00
|
|
|
|
client.stream_create("/newton/zzz/rawnotch2", "uint16_9")
|
|
|
|
|
client.stream_create("/newton/zzz/rawnotch11", "uint16_9")
|
2012-03-20 17:46:18 -04:00
|
|
|
|
|
2013-04-10 19:59:38 -04:00
|
|
|
|
# Verify we got 4 streams in the right order
|
2013-03-03 13:34:09 -05:00
|
|
|
|
eq_(client.stream_list(), [ ["/newton/prep", "float32_8"],
|
|
|
|
|
["/newton/raw", "uint16_6"],
|
2013-04-10 19:59:38 -04:00
|
|
|
|
["/newton/zzz/rawnotch2", "uint16_9"],
|
|
|
|
|
["/newton/zzz/rawnotch11", "uint16_9"]
|
2012-03-21 12:28:51 -04:00
|
|
|
|
])
|
2013-04-10 19:59:38 -04:00
|
|
|
|
|
2012-03-20 17:46:18 -04:00
|
|
|
|
# Match just one type or one path
|
2013-03-03 13:34:09 -05:00
|
|
|
|
eq_(client.stream_list(layout="uint16_6"),
|
|
|
|
|
[ ["/newton/raw", "uint16_6"] ])
|
2013-02-16 18:47:07 -05:00
|
|
|
|
eq_(client.stream_list(path="/newton/raw"),
|
2013-03-03 13:34:09 -05:00
|
|
|
|
[ ["/newton/raw", "uint16_6"] ])
|
2012-03-20 17:46:18 -04:00
|
|
|
|
|
2013-01-03 19:20:51 -05:00
|
|
|
|
# Try messing with resource limits to trigger errors and get
|
|
|
|
|
# more coverage. Here, make it so we can only create files 1
|
|
|
|
|
# byte in size, which will trigger an IOError in the server when
|
|
|
|
|
# we create a table.
|
|
|
|
|
limit = resource.getrlimit(resource.RLIMIT_FSIZE)
|
|
|
|
|
resource.setrlimit(resource.RLIMIT_FSIZE, (1, limit[1]))
|
|
|
|
|
with assert_raises(ServerError) as e:
|
2013-03-03 13:34:09 -05:00
|
|
|
|
client.stream_create("/newton/hello", "uint16_6")
|
2013-01-03 19:20:51 -05:00
|
|
|
|
resource.setrlimit(resource.RLIMIT_FSIZE, limit)
|
|
|
|
|
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
2013-01-03 19:20:51 -05:00
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_03_metadata(self):
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2013-01-03 19:20:51 -05:00
|
|
|
|
|
2012-03-20 17:46:18 -04:00
|
|
|
|
# Set / get metadata
|
|
|
|
|
eq_(client.stream_get_metadata("/newton/prep"), {})
|
|
|
|
|
eq_(client.stream_get_metadata("/newton/raw"), {})
|
|
|
|
|
meta1 = { "description": "The Data",
|
|
|
|
|
"v_scale": "1.234" }
|
|
|
|
|
meta2 = { "description": "The Data" }
|
|
|
|
|
meta3 = { "v_scale": "1.234" }
|
|
|
|
|
client.stream_set_metadata("/newton/prep", meta1)
|
|
|
|
|
client.stream_update_metadata("/newton/prep", {})
|
|
|
|
|
client.stream_update_metadata("/newton/raw", meta2)
|
|
|
|
|
client.stream_update_metadata("/newton/raw", meta3)
|
|
|
|
|
eq_(client.stream_get_metadata("/newton/prep"), meta1)
|
|
|
|
|
eq_(client.stream_get_metadata("/newton/raw"), meta1)
|
2013-02-16 18:47:07 -05:00
|
|
|
|
eq_(client.stream_get_metadata("/newton/raw",
|
|
|
|
|
[ "description" ] ), meta2)
|
|
|
|
|
eq_(client.stream_get_metadata("/newton/raw",
|
|
|
|
|
[ "description", "v_scale" ] ), meta1)
|
2012-03-21 12:28:51 -04:00
|
|
|
|
|
2012-04-04 18:34:01 -04:00
|
|
|
|
# missing key
|
|
|
|
|
eq_(client.stream_get_metadata("/newton/raw", "descr"),
|
|
|
|
|
{ "descr": None })
|
|
|
|
|
eq_(client.stream_get_metadata("/newton/raw", [ "descr" ]),
|
|
|
|
|
{ "descr": None })
|
|
|
|
|
|
2012-03-21 12:28:51 -04:00
|
|
|
|
# test wrong types (list instead of dict)
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.stream_set_metadata("/newton/prep", [1,2,3])
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.stream_update_metadata("/newton/prep", [1,2,3])
|
2013-03-05 13:22:17 -05:00
|
|
|
|
|
|
|
|
|
# test wrong types (dict of non-strings)
|
|
|
|
|
# numbers are OK; they'll get converted to strings
|
|
|
|
|
client.stream_set_metadata("/newton/prep", { "hello": 1234 })
|
|
|
|
|
# anything else is not
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.stream_set_metadata("/newton/prep", { "world": { 1: 2 } })
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.stream_set_metadata("/newton/prep", { "world": [ 1, 2 ] })
|
|
|
|
|
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
2012-03-20 17:46:18 -04:00
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_04_insert(self):
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2012-03-23 11:50:33 -04:00
|
|
|
|
|
2013-03-01 16:04:00 -05:00
|
|
|
|
# Limit _max_data to 1 MB, since our test file is 1.5 MB
|
|
|
|
|
old_max_data = nilmdb.client.client.StreamInserter._max_data
|
|
|
|
|
nilmdb.client.client.StreamInserter._max_data = 1 * 1024 * 1024
|
|
|
|
|
|
2012-03-24 13:32:11 -04:00
|
|
|
|
datetime_tz.localtz_set("America/New_York")
|
|
|
|
|
|
2012-03-24 17:15:24 -04:00
|
|
|
|
testfile = "tests/data/prep-20120323T1000"
|
2013-03-14 17:34:40 -04:00
|
|
|
|
start = nilmdb.utils.time.parse_time("20120323T1000")
|
2012-03-24 17:15:24 -04:00
|
|
|
|
rate = 120
|
2012-03-26 14:06:15 -04:00
|
|
|
|
|
|
|
|
|
# First try a nonexistent path
|
2013-01-28 19:04:52 -05:00
|
|
|
|
data = timestamper.TimestamperRate(testfile, start, 120)
|
2012-03-26 14:06:15 -04:00
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
result = client.stream_insert("/newton/no-such-path", data)
|
2012-03-29 12:46:34 -04:00
|
|
|
|
in_("404 Not Found", str(e.exception))
|
2012-03-26 14:06:15 -04:00
|
|
|
|
|
2012-03-29 12:46:34 -04:00
|
|
|
|
# Now try reversed timestamps
|
2013-01-28 19:04:52 -05:00
|
|
|
|
data = timestamper.TimestamperRate(testfile, start, 120)
|
2012-03-29 12:46:34 -04:00
|
|
|
|
data = reversed(list(data))
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
result = client.stream_insert("/newton/prep", data)
|
|
|
|
|
in_("400 Bad Request", str(e.exception))
|
2013-03-04 11:44:17 -05:00
|
|
|
|
in2_("timestamp is not monotonically increasing",
|
|
|
|
|
"start must precede end", str(e.exception))
|
2012-04-06 14:25:09 -04:00
|
|
|
|
|
2012-03-31 00:15:29 -04:00
|
|
|
|
# Now try empty data (no server request made)
|
|
|
|
|
empty = cStringIO.StringIO("")
|
2013-01-28 19:04:52 -05:00
|
|
|
|
data = timestamper.TimestamperRate(empty, start, 120)
|
2012-03-31 00:15:29 -04:00
|
|
|
|
result = client.stream_insert("/newton/prep", data)
|
|
|
|
|
eq_(result, None)
|
|
|
|
|
|
2013-02-21 14:07:35 -05:00
|
|
|
|
# It's OK to insert an empty interval
|
|
|
|
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
|
|
|
|
"start": 1, "end": 2 })
|
|
|
|
|
eq_(list(client.stream_intervals("/newton/prep")), [[1, 2]])
|
|
|
|
|
client.stream_remove("/newton/prep")
|
|
|
|
|
eq_(list(client.stream_intervals("/newton/prep")), [])
|
|
|
|
|
|
|
|
|
|
# Timestamps can be negative too
|
|
|
|
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
|
|
|
|
"start": -2, "end": -1 })
|
|
|
|
|
eq_(list(client.stream_intervals("/newton/prep")), [[-2, -1]])
|
|
|
|
|
client.stream_remove("/newton/prep")
|
|
|
|
|
eq_(list(client.stream_intervals("/newton/prep")), [])
|
|
|
|
|
|
|
|
|
|
# Intervals that end at zero shouldn't be any different
|
|
|
|
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
|
|
|
|
"start": -1, "end": 0 })
|
|
|
|
|
eq_(list(client.stream_intervals("/newton/prep")), [[-1, 0]])
|
|
|
|
|
client.stream_remove("/newton/prep")
|
|
|
|
|
eq_(list(client.stream_intervals("/newton/prep")), [])
|
|
|
|
|
|
|
|
|
|
# Try forcing a server request with equal start and end
|
2012-03-31 00:15:29 -04:00
|
|
|
|
with assert_raises(ClientError) as e:
|
2012-12-07 20:30:39 -05:00
|
|
|
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
|
|
|
|
"start": 0, "end": 0 })
|
2012-03-31 00:15:29 -04:00
|
|
|
|
in_("400 Bad Request", str(e.exception))
|
2013-02-21 14:06:40 -05:00
|
|
|
|
in_("start must precede end", str(e.exception))
|
2012-03-31 00:15:29 -04:00
|
|
|
|
|
2013-07-31 13:37:04 -04:00
|
|
|
|
# Invalid times in HTTP request
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
|
|
|
|
"start": "asdf", "end": 0 })
|
|
|
|
|
in_("400 Bad Request", str(e.exception))
|
|
|
|
|
in_("invalid start", str(e.exception))
|
|
|
|
|
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
client.http.put("stream/insert", "", { "path": "/newton/prep",
|
|
|
|
|
"start": 0, "end": "asdf" })
|
|
|
|
|
in_("400 Bad Request", str(e.exception))
|
|
|
|
|
in_("invalid end", str(e.exception))
|
|
|
|
|
|
2013-04-07 21:48:37 -04:00
|
|
|
|
# Good content type
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
client.http.put("stream/insert", "",
|
|
|
|
|
{ "path": "xxxx", "start": 0, "end": 1,
|
|
|
|
|
"binary": 1 },
|
|
|
|
|
binary = True)
|
|
|
|
|
in_("No such stream", str(e.exception))
|
|
|
|
|
|
|
|
|
|
# Bad content type
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
client.http.put("stream/insert", "",
|
|
|
|
|
{ "path": "xxxx", "start": 0, "end": 1,
|
|
|
|
|
"binary": 1 },
|
|
|
|
|
binary = False)
|
|
|
|
|
in_("Content type must be application/octet-stream", str(e.exception))
|
|
|
|
|
|
2012-12-11 18:08:00 -05:00
|
|
|
|
# Specify start/end (starts too late)
|
2013-01-28 19:04:52 -05:00
|
|
|
|
data = timestamper.TimestamperRate(testfile, start, 120)
|
2012-12-11 18:08:00 -05:00
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
result = client.stream_insert("/newton/prep", data,
|
2013-03-14 19:46:06 -04:00
|
|
|
|
start + 5000000, start + 120000000)
|
2012-12-11 18:08:00 -05:00
|
|
|
|
in_("400 Bad Request", str(e.exception))
|
2013-03-14 19:46:06 -04:00
|
|
|
|
in_("Data timestamp 1332511200000000 < start time 1332511205000000",
|
2012-12-11 18:08:00 -05:00
|
|
|
|
str(e.exception))
|
|
|
|
|
|
|
|
|
|
# Specify start/end (ends too early)
|
2013-01-28 19:04:52 -05:00
|
|
|
|
data = timestamper.TimestamperRate(testfile, start, 120)
|
2012-12-11 18:08:00 -05:00
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
result = client.stream_insert("/newton/prep", data,
|
2013-03-14 19:46:06 -04:00
|
|
|
|
start, start + 1000000)
|
2012-12-11 18:08:00 -05:00
|
|
|
|
in_("400 Bad Request", str(e.exception))
|
|
|
|
|
# Client chunks the input, so the exact timestamp here might change
|
|
|
|
|
# if the chunk positions change.
|
2013-03-14 19:46:06 -04:00
|
|
|
|
assert(re.search("Data timestamp 13325[0-9]+ "
|
|
|
|
|
">= end time 1332511201000000", str(e.exception))
|
2013-03-01 16:04:00 -05:00
|
|
|
|
is not None)
|
2012-12-11 18:08:00 -05:00
|
|
|
|
|
2012-03-26 14:06:15 -04:00
|
|
|
|
# Now do the real load
|
2013-01-28 19:04:52 -05:00
|
|
|
|
data = timestamper.TimestamperRate(testfile, start, 120)
|
2012-12-11 18:08:00 -05:00
|
|
|
|
result = client.stream_insert("/newton/prep", data,
|
2013-03-14 19:46:06 -04:00
|
|
|
|
start, start + 119999777)
|
2012-03-31 00:15:29 -04:00
|
|
|
|
|
2012-12-11 18:08:00 -05:00
|
|
|
|
# Verify the intervals. Should be just one, even if the data
|
|
|
|
|
# was inserted in chunks, due to nilmdb interval concatenation.
|
|
|
|
|
intervals = list(client.stream_intervals("/newton/prep"))
|
2013-03-14 19:46:06 -04:00
|
|
|
|
eq_(intervals, [[start, start + 119999777]])
|
2012-12-11 18:08:00 -05:00
|
|
|
|
|
2012-03-31 00:15:29 -04:00
|
|
|
|
# Try some overlapping data -- just insert it again
|
2013-01-28 19:04:52 -05:00
|
|
|
|
data = timestamper.TimestamperRate(testfile, start, 120)
|
2012-03-31 00:15:29 -04:00
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
result = client.stream_insert("/newton/prep", data)
|
|
|
|
|
in_("400 Bad Request", str(e.exception))
|
2013-01-16 17:31:31 -05:00
|
|
|
|
in_("verlap", str(e.exception))
|
2012-05-23 16:00:01 -04:00
|
|
|
|
|
2013-03-01 16:04:00 -05:00
|
|
|
|
nilmdb.client.client.StreamInserter._max_data = old_max_data
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_05_extractremove(self):
|
2013-01-08 21:07:52 -05:00
|
|
|
|
# Misc tests for extract and remove. Most of them are in test_cmdline.
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2012-06-04 19:46:33 -04:00
|
|
|
|
|
2013-03-14 19:46:06 -04:00
|
|
|
|
for x in client.stream_extract("/newton/prep",
|
|
|
|
|
999123000000, 999124000000):
|
2013-01-22 12:47:06 -05:00
|
|
|
|
raise AssertionError("shouldn't be any data for this request")
|
2012-06-04 19:46:33 -04:00
|
|
|
|
|
2013-01-08 21:07:52 -05:00
|
|
|
|
with assert_raises(ClientError) as e:
|
2013-03-14 19:46:06 -04:00
|
|
|
|
client.stream_remove("/newton/prep", 123000000, 120000000)
|
2013-01-08 21:07:52 -05:00
|
|
|
|
|
2013-02-19 18:26:15 -05:00
|
|
|
|
# Test count
|
|
|
|
|
eq_(client.stream_count("/newton/prep"), 14400)
|
|
|
|
|
|
2013-04-07 16:06:52 -04:00
|
|
|
|
# Test binary output
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
list(client.stream_extract("/newton/prep",
|
|
|
|
|
markup = True, binary = True))
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
list(client.stream_extract("/newton/prep",
|
|
|
|
|
count = True, binary = True))
|
|
|
|
|
data = "".join(client.stream_extract("/newton/prep", binary = True))
|
|
|
|
|
# Quick check using struct
|
|
|
|
|
unpacker = struct.Struct("<qffffffff")
|
|
|
|
|
out = []
|
|
|
|
|
for i in range(14400):
|
|
|
|
|
out.append(unpacker.unpack_from(data, i * unpacker.size))
|
|
|
|
|
eq_(out[0], (1332511200000000, 266568.0, 224029.0, 5161.39990234375,
|
|
|
|
|
2525.169921875, 8350.83984375, 3724.699951171875,
|
|
|
|
|
1355.3399658203125, 2039.0))
|
|
|
|
|
|
2013-04-10 19:08:05 -04:00
|
|
|
|
# Just get some coverage
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
client.http.post("/stream/remove", { "path": "/none" })
|
|
|
|
|
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_06_generators(self):
|
2012-05-23 16:00:01 -04:00
|
|
|
|
# A lot of the client functionality is already tested by test_cmdline,
|
|
|
|
|
# but this gets a bit more coverage that cmdline misses.
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2012-05-23 16:00:01 -04:00
|
|
|
|
|
|
|
|
|
# Trigger a client error in generator
|
2013-03-14 17:34:40 -04:00
|
|
|
|
start = nilmdb.utils.time.parse_time("20120323T2000")
|
|
|
|
|
end = nilmdb.utils.time.parse_time("20120323T1000")
|
2012-05-30 14:24:36 -04:00
|
|
|
|
for function in [ client.stream_intervals, client.stream_extract ]:
|
|
|
|
|
with assert_raises(ClientError) as e:
|
2013-03-14 17:34:40 -04:00
|
|
|
|
function("/newton/prep", start, end).next()
|
2012-05-30 14:24:36 -04:00
|
|
|
|
in_("400 Bad Request", str(e.exception))
|
2013-02-21 14:06:40 -05:00
|
|
|
|
in_("start must precede end", str(e.exception))
|
2012-05-30 14:24:36 -04:00
|
|
|
|
|
|
|
|
|
# Trigger a curl error in generator
|
|
|
|
|
with assert_raises(ServerError) as e:
|
2013-03-30 17:31:35 -04:00
|
|
|
|
client.http.get_gen("http://nosuchurl.example.com./").next()
|
2012-05-23 16:00:01 -04:00
|
|
|
|
|
2012-05-30 14:24:36 -04:00
|
|
|
|
# Check 404 for missing streams
|
|
|
|
|
for function in [ client.stream_intervals, client.stream_extract ]:
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
function("/no/such/stream").next()
|
|
|
|
|
in_("404 Not Found", str(e.exception))
|
|
|
|
|
in_("No such stream", str(e.exception))
|
|
|
|
|
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_07_headers(self):
|
2012-05-25 12:44:24 -04:00
|
|
|
|
# Make sure that /stream/intervals and /stream/extract
|
2013-01-22 12:47:06 -05:00
|
|
|
|
# properly return streaming, chunked, text/plain response.
|
|
|
|
|
# Pokes around in client.http internals a bit to look at the
|
|
|
|
|
# response headers.
|
2012-05-25 12:44:24 -04:00
|
|
|
|
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2013-01-22 12:47:06 -05:00
|
|
|
|
http = client.http
|
2012-05-25 12:44:24 -04:00
|
|
|
|
|
2013-02-26 15:45:50 -05:00
|
|
|
|
# Use a warning rather than returning a test failure for the
|
|
|
|
|
# transfer-encoding, so that we can still disable chunked
|
|
|
|
|
# responses for debugging.
|
|
|
|
|
|
|
|
|
|
def headers():
|
|
|
|
|
h = ""
|
2013-02-26 17:43:49 -05:00
|
|
|
|
for (k, v) in http._last_response.headers.items():
|
2013-02-26 15:45:50 -05:00
|
|
|
|
h += k + ": " + v + "\n"
|
|
|
|
|
return h.lower()
|
2013-01-22 12:47:06 -05:00
|
|
|
|
|
|
|
|
|
# Intervals
|
2013-02-26 15:45:50 -05:00
|
|
|
|
x = http.get("stream/intervals", { "path": "/newton/prep" })
|
|
|
|
|
if "transfer-encoding: chunked" not in headers():
|
2012-06-04 19:46:33 -04:00
|
|
|
|
warnings.warn("Non-chunked HTTP response for /stream/intervals")
|
2013-02-26 15:45:50 -05:00
|
|
|
|
if "content-type: application/x-json-stream" not in headers():
|
|
|
|
|
raise AssertionError("/stream/intervals content type "
|
|
|
|
|
"is not application/x-json-stream:\n" +
|
|
|
|
|
headers())
|
2012-05-24 17:05:38 -04:00
|
|
|
|
|
2013-01-22 12:47:06 -05:00
|
|
|
|
# Extract
|
2013-07-15 14:38:28 -04:00
|
|
|
|
x = http.get("stream/extract", { "path": "/newton/prep",
|
|
|
|
|
"start": "123", "end": "124" })
|
2013-02-26 15:45:50 -05:00
|
|
|
|
if "transfer-encoding: chunked" not in headers():
|
2012-06-04 19:46:33 -04:00
|
|
|
|
warnings.warn("Non-chunked HTTP response for /stream/extract")
|
2013-02-26 15:45:50 -05:00
|
|
|
|
if "content-type: text/plain;charset=utf-8" not in headers():
|
2013-01-22 12:47:06 -05:00
|
|
|
|
raise AssertionError("/stream/extract is not text/plain:\n" +
|
2013-02-26 15:45:50 -05:00
|
|
|
|
headers())
|
2013-01-02 00:00:30 -05:00
|
|
|
|
|
2013-07-15 14:38:28 -04:00
|
|
|
|
x = http.get("stream/extract", { "path": "/newton/prep",
|
|
|
|
|
"start": "123", "end": "124",
|
|
|
|
|
"binary": "1" })
|
2013-04-07 21:27:06 -04:00
|
|
|
|
if "transfer-encoding: chunked" not in headers():
|
|
|
|
|
warnings.warn("Non-chunked HTTP response for /stream/extract")
|
|
|
|
|
if "content-type: application/octet-stream" not in headers():
|
|
|
|
|
raise AssertionError("/stream/extract is not binary:\n" +
|
|
|
|
|
headers())
|
|
|
|
|
|
2013-07-15 14:38:28 -04:00
|
|
|
|
# Make sure a binary of "0" is really off
|
|
|
|
|
x = http.get("stream/extract", { "path": "/newton/prep",
|
|
|
|
|
"start": "123", "end": "124",
|
|
|
|
|
"binary": "0" })
|
|
|
|
|
if "content-type: application/octet-stream" in headers():
|
|
|
|
|
raise AssertionError("/stream/extract is not text:\n" +
|
|
|
|
|
headers())
|
|
|
|
|
|
|
|
|
|
# Invalid parameters
|
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
x = http.get("stream/extract", { "path": "/newton/prep",
|
|
|
|
|
"start": "123", "end": "124",
|
|
|
|
|
"binary": "asdfasfd" })
|
|
|
|
|
in_("can't parse parameter", str(e.exception))
|
|
|
|
|
|
2013-02-16 18:53:15 -05:00
|
|
|
|
client.close()
|
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_08_unicode(self):
|
2013-03-05 11:54:29 -05:00
|
|
|
|
# Try both with and without posting JSON
|
|
|
|
|
for post_json in (False, True):
|
|
|
|
|
# Basic Unicode tests
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl, post_json = post_json)
|
2013-03-05 11:54:29 -05:00
|
|
|
|
|
|
|
|
|
# Delete streams that exist
|
|
|
|
|
for stream in client.stream_list():
|
2013-03-18 19:39:03 -04:00
|
|
|
|
client.stream_remove(stream[0])
|
2013-03-05 11:54:29 -05:00
|
|
|
|
client.stream_destroy(stream[0])
|
|
|
|
|
|
|
|
|
|
# Database is empty
|
|
|
|
|
eq_(client.stream_list(), [])
|
|
|
|
|
|
|
|
|
|
# Create Unicode stream, match it
|
|
|
|
|
raw = [ u"/düsseldorf/raw", u"uint16_6" ]
|
|
|
|
|
prep = [ u"/düsseldorf/prep", u"uint16_6" ]
|
|
|
|
|
client.stream_create(*raw)
|
|
|
|
|
eq_(client.stream_list(), [raw])
|
|
|
|
|
eq_(client.stream_list(layout=raw[1]), [raw])
|
|
|
|
|
eq_(client.stream_list(path=raw[0]), [raw])
|
|
|
|
|
client.stream_create(*prep)
|
|
|
|
|
eq_(client.stream_list(), [prep, raw])
|
|
|
|
|
|
|
|
|
|
# Set / get metadata with Unicode keys and values
|
|
|
|
|
eq_(client.stream_get_metadata(raw[0]), {})
|
|
|
|
|
eq_(client.stream_get_metadata(prep[0]), {})
|
|
|
|
|
meta1 = { u"alpha": u"α",
|
|
|
|
|
u"β": u"beta" }
|
|
|
|
|
meta2 = { u"alpha": u"α" }
|
|
|
|
|
meta3 = { u"β": u"beta" }
|
|
|
|
|
client.stream_set_metadata(prep[0], meta1)
|
|
|
|
|
client.stream_update_metadata(prep[0], {})
|
|
|
|
|
client.stream_update_metadata(raw[0], meta2)
|
|
|
|
|
client.stream_update_metadata(raw[0], meta3)
|
|
|
|
|
eq_(client.stream_get_metadata(prep[0]), meta1)
|
|
|
|
|
eq_(client.stream_get_metadata(raw[0]), meta1)
|
|
|
|
|
eq_(client.stream_get_metadata(raw[0], [ "alpha" ]), meta2)
|
|
|
|
|
eq_(client.stream_get_metadata(raw[0], [ "alpha", "β" ]), meta1)
|
|
|
|
|
|
|
|
|
|
client.close()
|
2013-02-16 18:53:15 -05:00
|
|
|
|
|
2013-02-19 15:25:34 -05:00
|
|
|
|
def test_client_09_closing(self):
|
2013-02-16 18:47:07 -05:00
|
|
|
|
# Make sure we actually close sockets correctly. New
|
|
|
|
|
# connections will block for a while if they're not, since the
|
|
|
|
|
# server will stop accepting new connections.
|
|
|
|
|
for test in [1, 2]:
|
|
|
|
|
start = time.time()
|
|
|
|
|
for i in range(50):
|
|
|
|
|
if time.time() - start > 15:
|
|
|
|
|
raise AssertionError("Connections seem to be blocking... "
|
|
|
|
|
"probably not closing properly.")
|
|
|
|
|
if test == 1:
|
|
|
|
|
# explicit close
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(url = testurl)
|
2013-02-16 18:47:07 -05:00
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
client.stream_remove("/newton/prep", 123, 120)
|
|
|
|
|
client.close() # remove this to see the failure
|
|
|
|
|
elif test == 2:
|
|
|
|
|
# use the context manager
|
2013-03-08 17:10:18 -05:00
|
|
|
|
with nilmdb.client.Client(url = testurl) as c:
|
2013-02-16 18:47:07 -05:00
|
|
|
|
with assert_raises(ClientError) as e:
|
|
|
|
|
c.stream_remove("/newton/prep", 123, 120)
|
2013-02-19 17:19:45 -05:00
|
|
|
|
|
|
|
|
|
def test_client_10_context(self):
|
|
|
|
|
# Test using the client's stream insertion context manager to
|
|
|
|
|
# insert data.
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(testurl)
|
2013-02-19 17:19:45 -05:00
|
|
|
|
|
|
|
|
|
client.stream_create("/context/test", "uint16_1")
|
|
|
|
|
with client.stream_insert_context("/context/test") as ctx:
|
|
|
|
|
# override _max_data to trigger frequent server updates
|
|
|
|
|
ctx._max_data = 15
|
|
|
|
|
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.insert("1000 1\n")
|
2013-03-07 16:07:33 -05:00
|
|
|
|
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.insert("1010 ")
|
|
|
|
|
ctx.insert("1\n1020 1")
|
2013-03-07 16:07:33 -05:00
|
|
|
|
ctx.insert("")
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.insert("\n1030 1\n")
|
2013-03-07 16:07:33 -05:00
|
|
|
|
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.insert("1040 1\n")
|
2013-03-08 12:36:17 -05:00
|
|
|
|
ctx.insert("# hello\n")
|
|
|
|
|
ctx.insert(" # hello\n")
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.insert(" 1050 1\n")
|
2013-02-19 17:19:45 -05:00
|
|
|
|
ctx.finalize()
|
|
|
|
|
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.insert("1070 1\n")
|
|
|
|
|
ctx.update_end(1080)
|
2013-03-14 19:46:06 -04:00
|
|
|
|
ctx.finalize()
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.update_start(1090)
|
|
|
|
|
ctx.insert("1100 1\n")
|
|
|
|
|
ctx.insert("1110 1\n")
|
2013-03-30 17:30:43 -04:00
|
|
|
|
ctx.send()
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.insert("1120 1\n")
|
|
|
|
|
ctx.insert("1130 1\n")
|
|
|
|
|
ctx.insert("1140 1\n")
|
|
|
|
|
ctx.update_end(1160)
|
|
|
|
|
ctx.insert("1150 1\n")
|
|
|
|
|
ctx.update_end(1170)
|
|
|
|
|
ctx.insert("1160 1\n")
|
|
|
|
|
ctx.update_end(1180)
|
|
|
|
|
ctx.insert("1170 1" +
|
2013-03-08 12:36:17 -05:00
|
|
|
|
" # this is super long" * 100 +
|
|
|
|
|
"\n")
|
2013-02-19 17:19:45 -05:00
|
|
|
|
ctx.finalize()
|
2013-03-08 12:36:17 -05:00
|
|
|
|
ctx.insert("# this is super long" * 100)
|
2013-02-19 17:19:45 -05:00
|
|
|
|
|
|
|
|
|
with assert_raises(ClientError):
|
2013-04-08 17:15:06 -04:00
|
|
|
|
with client.stream_insert_context("/context/test",
|
|
|
|
|
1000, 2000) as ctx:
|
|
|
|
|
ctx.insert("1180 1\n")
|
2013-02-19 17:19:45 -05:00
|
|
|
|
|
|
|
|
|
with assert_raises(ClientError):
|
2013-04-08 17:15:06 -04:00
|
|
|
|
with client.stream_insert_context("/context/test",
|
|
|
|
|
2000, 3000) as ctx:
|
|
|
|
|
ctx.insert("1180 1\n")
|
2013-02-19 17:19:45 -05:00
|
|
|
|
|
2013-03-08 12:36:17 -05:00
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
with client.stream_insert_context("/context/test") as ctx:
|
|
|
|
|
ctx.insert("bogus data\n")
|
|
|
|
|
|
2013-04-08 17:15:06 -04:00
|
|
|
|
with client.stream_insert_context("/context/test", 2000, 3000) as ctx:
|
2013-02-19 17:19:45 -05:00
|
|
|
|
# make sure our override wasn't permanent
|
|
|
|
|
ne_(ctx._max_data, 15)
|
2013-04-08 17:15:06 -04:00
|
|
|
|
ctx.insert("2250 1\n")
|
2013-02-19 17:19:45 -05:00
|
|
|
|
ctx.finalize()
|
|
|
|
|
|
2013-03-04 11:44:17 -05:00
|
|
|
|
with assert_raises(ClientError):
|
2013-04-08 17:15:06 -04:00
|
|
|
|
with client.stream_insert_context("/context/test",
|
|
|
|
|
3000, 4000) as ctx:
|
|
|
|
|
ctx.insert("3010 1\n")
|
|
|
|
|
ctx.insert("3020 2\n")
|
|
|
|
|
ctx.insert("3030 3\n")
|
|
|
|
|
ctx.insert("3040 4\n")
|
|
|
|
|
ctx.insert("3040 4\n") # non-monotonic after a few lines
|
2013-03-04 11:44:17 -05:00
|
|
|
|
ctx.finalize()
|
|
|
|
|
|
2013-02-19 17:19:45 -05:00
|
|
|
|
eq_(list(client.stream_intervals("/context/test")),
|
2013-04-08 17:15:06 -04:00
|
|
|
|
[ [ 1000, 1051 ],
|
|
|
|
|
[ 1070, 1080 ],
|
|
|
|
|
[ 1090, 1180 ],
|
|
|
|
|
[ 2000, 3000 ] ])
|
2013-02-19 17:19:45 -05:00
|
|
|
|
|
2013-03-18 19:39:03 -04:00
|
|
|
|
# destroy stream (try without removing data first)
|
|
|
|
|
with assert_raises(ClientError):
|
|
|
|
|
client.stream_destroy("/context/test")
|
|
|
|
|
client.stream_remove("/context/test")
|
2013-02-19 17:19:45 -05:00
|
|
|
|
client.stream_destroy("/context/test")
|
|
|
|
|
client.close()
|
2013-02-21 14:07:35 -05:00
|
|
|
|
|
|
|
|
|
def test_client_11_emptyintervals(self):
|
|
|
|
|
# Empty intervals are ok! If recording detection events
|
|
|
|
|
# by inserting rows into the database, we want to be able to
|
|
|
|
|
# have an interval where no events occurred. Test them here.
|
2013-03-08 17:10:18 -05:00
|
|
|
|
client = nilmdb.client.Client(testurl)
|
2013-02-21 14:07:35 -05:00
|
|
|
|
client.stream_create("/empty/test", "uint16_1")
|
|
|
|
|
|
|
|
|
|
def info():
|
|
|
|
|
result = []
|
|
|
|
|
for interval in list(client.stream_intervals("/empty/test")):
|
|
|
|
|
result.append((client.stream_count("/empty/test", *interval),
|
|
|
|
|
interval))
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
eq_(info(), [])
|
|
|
|
|
|
|
|
|
|
# Insert a region with just a few points
|
|
|
|
|
with client.stream_insert_context("/empty/test") as ctx:
|
|
|
|
|
ctx.update_start(100)
|
2013-03-07 16:07:33 -05:00
|
|
|
|
ctx.insert("140 1\n")
|
|
|
|
|
ctx.insert("150 1\n")
|
|
|
|
|
ctx.insert("160 1\n")
|
2013-02-21 14:07:35 -05:00
|
|
|
|
ctx.update_end(200)
|
|
|
|
|
ctx.finalize()
|
|
|
|
|
|
|
|
|
|
eq_(info(), [(3, [100, 200])])
|
|
|
|
|
|
|
|
|
|
# Delete chunk, which will leave one data point and two intervals
|
|
|
|
|
client.stream_remove("/empty/test", 145, 175)
|
|
|
|
|
eq_(info(), [(1, [100, 145]),
|
|
|
|
|
(0, [175, 200])])
|
|
|
|
|
|
|
|
|
|
# Try also creating a completely empty interval from scratch,
|
|
|
|
|
# in a few different ways.
|
2013-03-07 16:07:33 -05:00
|
|
|
|
client.stream_insert("/empty/test", "", 300, 350)
|
2013-02-21 14:07:35 -05:00
|
|
|
|
client.stream_insert("/empty/test", [], 400, 450)
|
|
|
|
|
with client.stream_insert_context("/empty/test", 500, 550):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
# If enough timestamps aren't provided, empty streams won't be created.
|
|
|
|
|
client.stream_insert("/empty/test", [])
|
|
|
|
|
with client.stream_insert_context("/empty/test"):
|
|
|
|
|
pass
|
|
|
|
|
client.stream_insert("/empty/test", [], start = 600)
|
|
|
|
|
with client.stream_insert_context("/empty/test", start = 700):
|
|
|
|
|
pass
|
|
|
|
|
client.stream_insert("/empty/test", [], end = 850)
|
|
|
|
|
with client.stream_insert_context("/empty/test", end = 950):
|
|
|
|
|
pass
|
|
|
|
|
|
2013-04-11 13:25:37 -04:00
|
|
|
|
# Equal start and end is OK as long as there's no data
|
|
|
|
|
with client.stream_insert_context("/empty/test", start=9, end=9):
|
|
|
|
|
pass
|
|
|
|
|
|
2013-02-21 14:07:35 -05:00
|
|
|
|
# Try various things that might cause problems
|
2013-04-11 13:25:00 -04:00
|
|
|
|
with client.stream_insert_context("/empty/test", 1000, 1050) as ctx:
|
2013-02-21 14:07:35 -05:00
|
|
|
|
ctx.finalize() # inserts [1000, 1050]
|
|
|
|
|
ctx.finalize() # nothing
|
|
|
|
|
ctx.finalize() # nothing
|
2013-03-07 16:07:33 -05:00
|
|
|
|
ctx.insert("1100 1\n")
|
2013-03-14 19:46:06 -04:00
|
|
|
|
ctx.finalize() # inserts [1100, 1101]
|
2013-02-21 14:07:35 -05:00
|
|
|
|
ctx.update_start(1199)
|
2013-03-07 16:07:33 -05:00
|
|
|
|
ctx.insert("1200 1\n")
|
2013-02-21 14:07:35 -05:00
|
|
|
|
ctx.update_end(1250)
|
|
|
|
|
ctx.finalize() # inserts [1199, 1250]
|
|
|
|
|
ctx.update_start(1299)
|
|
|
|
|
ctx.finalize() # nothing
|
|
|
|
|
ctx.update_end(1350)
|
|
|
|
|
ctx.finalize() # nothing
|
|
|
|
|
ctx.update_start(1400)
|
2013-03-08 12:36:17 -05:00
|
|
|
|
ctx.insert("# nothing!\n")
|
2013-02-21 14:07:35 -05:00
|
|
|
|
ctx.update_end(1450)
|
|
|
|
|
ctx.finalize()
|
2013-03-08 12:36:17 -05:00
|
|
|
|
ctx.update_start(1500)
|
|
|
|
|
ctx.insert("# nothing!")
|
|
|
|
|
ctx.update_end(1550)
|
|
|
|
|
ctx.finalize()
|
|
|
|
|
ctx.insert("# nothing!\n" * 10)
|
|
|
|
|
ctx.finalize()
|
2013-02-21 14:07:35 -05:00
|
|
|
|
# implicit last finalize inserts [1400, 1450]
|
|
|
|
|
|
|
|
|
|
# Check everything
|
|
|
|
|
eq_(info(), [(1, [100, 145]),
|
|
|
|
|
(0, [175, 200]),
|
|
|
|
|
(0, [300, 350]),
|
|
|
|
|
(0, [400, 450]),
|
|
|
|
|
(0, [500, 550]),
|
|
|
|
|
(0, [1000, 1050]),
|
2013-03-14 19:46:06 -04:00
|
|
|
|
(1, [1100, 1101]),
|
2013-02-21 14:07:35 -05:00
|
|
|
|
(1, [1199, 1250]),
|
|
|
|
|
(0, [1400, 1450]),
|
2013-03-08 12:36:17 -05:00
|
|
|
|
(0, [1500, 1550]),
|
2013-02-21 14:07:35 -05:00
|
|
|
|
])
|
|
|
|
|
|
|
|
|
|
# Clean up
|
2013-03-18 19:39:03 -04:00
|
|
|
|
client.stream_remove("/empty/test")
|
2013-02-21 14:07:35 -05:00
|
|
|
|
client.stream_destroy("/empty/test")
|
|
|
|
|
client.close()
|
2013-02-26 19:41:55 -05:00
|
|
|
|
|
|
|
|
|
def test_client_12_persistent(self):
|
Explicitly avoid HTTP/1.1 persistent connections (keep-alive)
We do this by creating a new requests.Session object for each request,
sending a "Connection: close" request header, and then explicitly
marking the connection for close after the response is read.
This is to avoid a longstanding race condition with HTTP keepalive
and server timeouts. Due to data processing, capture, etc, requests
may be separated by an arbitrary delay. If this delay is shorter
than the server's KeepAliveTimeout, the same connection is used.
If the delay is longer, a new connection is used. If the delay is
the same, however, the request may be sent on the old connection at
the exact same time that the server closes it. Typically, the
client sees the connection as closing between the request and the
response, which leads to "httplib.BadStatusLine" errors.
This patch avoids the race condition entirely by not using persistent
connections.
Another solution may be to detect those errors and retry the
connection, resending the request. However, the race condition could
potentially show up in other places, like a closed connection during
the request body, not after. Such an error could also be a legitimate
network condition or problem. This solution should be more reliable,
and the overhead of each new connection will hopefully be minimal for
typical workloads.
2014-02-18 14:36:58 -05:00
|
|
|
|
# Check that connections are NOT persistent. Rather than trying
|
|
|
|
|
# to verify this at the TCP level, just make sure that the response
|
|
|
|
|
# contained a "Connection: close" header.
|
2013-03-08 17:10:18 -05:00
|
|
|
|
with nilmdb.client.Client(url = testurl) as c:
|
2013-02-26 19:41:55 -05:00
|
|
|
|
c.stream_create("/persist/test", "uint16_1")
|
Explicitly avoid HTTP/1.1 persistent connections (keep-alive)
We do this by creating a new requests.Session object for each request,
sending a "Connection: close" request header, and then explicitly
marking the connection for close after the response is read.
This is to avoid a longstanding race condition with HTTP keepalive
and server timeouts. Due to data processing, capture, etc, requests
may be separated by an arbitrary delay. If this delay is shorter
than the server's KeepAliveTimeout, the same connection is used.
If the delay is longer, a new connection is used. If the delay is
the same, however, the request may be sent on the old connection at
the exact same time that the server closes it. Typically, the
client sees the connection as closing between the request and the
response, which leads to "httplib.BadStatusLine" errors.
This patch avoids the race condition entirely by not using persistent
connections.
Another solution may be to detect those errors and retry the
connection, resending the request. However, the race condition could
potentially show up in other places, like a closed connection during
the request body, not after. Such an error could also be a legitimate
network condition or problem. This solution should be more reliable,
and the overhead of each new connection will hopefully be minimal for
typical workloads.
2014-02-18 14:36:58 -05:00
|
|
|
|
eq_(c.http._last_response.headers["Connection"], "close")
|
|
|
|
|
|
2013-02-26 19:41:55 -05:00
|
|
|
|
c.stream_destroy("/persist/test")
|
Explicitly avoid HTTP/1.1 persistent connections (keep-alive)
We do this by creating a new requests.Session object for each request,
sending a "Connection: close" request header, and then explicitly
marking the connection for close after the response is read.
This is to avoid a longstanding race condition with HTTP keepalive
and server timeouts. Due to data processing, capture, etc, requests
may be separated by an arbitrary delay. If this delay is shorter
than the server's KeepAliveTimeout, the same connection is used.
If the delay is longer, a new connection is used. If the delay is
the same, however, the request may be sent on the old connection at
the exact same time that the server closes it. Typically, the
client sees the connection as closing between the request and the
response, which leads to "httplib.BadStatusLine" errors.
This patch avoids the race condition entirely by not using persistent
connections.
Another solution may be to detect those errors and retry the
connection, resending the request. However, the race condition could
potentially show up in other places, like a closed connection during
the request body, not after. Such an error could also be a legitimate
network condition or problem. This solution should be more reliable,
and the overhead of each new connection will hopefully be minimal for
typical workloads.
2014-02-18 14:36:58 -05:00
|
|
|
|
eq_(c.http._last_response.headers["Connection"], "close")
|
2013-03-14 14:39:25 -04:00
|
|
|
|
|
|
|
|
|
def test_client_13_timestamp_rounding(self):
|
2013-03-15 18:07:40 -04:00
|
|
|
|
# Test potentially bad timestamps (due to floating point
|
|
|
|
|
# roundoff etc). The server will round floating point values
|
|
|
|
|
# to the nearest int.
|
2013-03-14 14:39:25 -04:00
|
|
|
|
client = nilmdb.client.Client(testurl)
|
|
|
|
|
|
|
|
|
|
client.stream_create("/rounding/test", "uint16_1")
|
2013-03-14 19:46:06 -04:00
|
|
|
|
with client.stream_insert_context("/rounding/test",
|
2013-03-15 18:07:40 -04:00
|
|
|
|
100000000, 200000000.1) as ctx:
|
|
|
|
|
ctx.insert("100000000.1 1\n")
|
|
|
|
|
ctx.insert("150000000.00003 1\n")
|
|
|
|
|
ctx.insert("199999999.4 1\n")
|
|
|
|
|
eq_(list(client.stream_intervals("/rounding/test")),
|
|
|
|
|
[ [ 100000000, 200000000 ] ])
|
2013-03-14 14:39:25 -04:00
|
|
|
|
|
|
|
|
|
with assert_raises(ClientError):
|
2013-03-14 19:46:06 -04:00
|
|
|
|
with client.stream_insert_context("/rounding/test",
|
|
|
|
|
200000000, 300000000) as ctx:
|
|
|
|
|
ctx.insert("200000000 1\n")
|
|
|
|
|
ctx.insert("250000000 1\n")
|
2013-03-15 18:07:40 -04:00
|
|
|
|
# Server will round this and give an error on finalize()
|
2013-03-14 19:46:06 -04:00
|
|
|
|
ctx.insert("299999999.99 1\n")
|
2013-03-14 14:39:25 -04:00
|
|
|
|
|
2013-03-18 19:39:03 -04:00
|
|
|
|
client.stream_remove("/rounding/test")
|
2013-03-14 14:39:25 -04:00
|
|
|
|
client.stream_destroy("/rounding/test")
|
|
|
|
|
client.close()
|