Parsed output to generate keyvals for specific netperf tests. Additionally
made this test as well as the iperf test use the new scheme for 3 storing
three dimensional data in the keyval files. Which is to use multiple iterations
and to use attributes.
Signed-off-by: Bryce Boe <bboe@google.com>
git-svn-id: http://test.kernel.org/svn/autotest/trunk@2113 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/client/tests/netperf2/netperf2.py b/client/tests/netperf2/netperf2.py
index f1a78eb..3faba17b 100755
--- a/client/tests/netperf2/netperf2.py
+++ b/client/tests/netperf2/netperf2.py
@@ -26,7 +26,7 @@
'src/netperf -H')
self.valid_tests = ['TCP_STREAM', 'TCP_RR', 'TCP_CRR',
- 'UDP_STREAM', 'UDP_RR', 'UDP_CRR']
+ 'UDP_STREAM', 'UDP_RR']
self.results = []
@@ -35,25 +35,33 @@
if test not in self.valid_tests:
raise error.TestError('invalid test specified')
self.role = role
+ self.test = test
+ self.stream_list = stream_list
server_tag = server_ip + '#netperf-server'
client_tag = client_ip + '#netperf-client'
all = [server_tag, client_tag]
-
for num_streams in stream_list:
if role == 'server':
self.server_start()
try:
- self.job.barrier(server_tag, 'start', 120).rendevous(*all)
- self.job.barrier(server_tag, 'stop', 5400).rendevous(*all)
+ # Wait up to five minutes for the client to reach this
+ # point.
+ self.job.barrier(server_tag, 'start', 300).rendevous(*all)
+ # Wait up to test_time + 1 minute for the test to
+ # complete
+ self.job.barrier(server_tag, 'stop',
+ test_time+60).rendevous(*all)
finally:
self.server_stop()
elif role == 'client':
- self.job.barrier(client_tag, 'start', 120).rendevous(*all)
+ # Wait up to five minutes for the server to start
+ self.job.barrier(client_tag, 'start', 300).rendevous(*all)
self.client(server_ip, test, test_time, num_streams)
- self.job.barrier(client_tag, 'stop', 30).rendevous(*all)
+ # Wait up to 1 minute for the server to also reach this point
+ self.job.barrier(client_tag, 'stop', 60).rendevous(*all)
else:
raise error.TestError('invalid role specified')
@@ -74,9 +82,8 @@
try:
self.results.append(utils.get_cpu_percentage(
- utils.system_output_parallel,
- [cmd]*num_streams,
- retain_output=True))
+ utils.system_output_parallel, [cmd]*num_streams,
+ timeout=test_time+60, retain_output=True))
except error.CmdError, e:
""" Catch errors due to timeout, but raise others
The actual error string is:
@@ -86,15 +93,110 @@
Looking for 'within' is probably not the best way to do this but
works for now"""
- if 'within' in e.additional_text:
+ if ('within' in e.additional_text
+ or 'non-zero' in e.additional_text):
print e.additional_text
- self.results.append(None)
+ # Results are cpu%, outputs
+ self.results.append((0, None))
else:
raise
def postprocess(self):
- print "Post Processing"
- print self.role
- print self.results
- print "End Post Processing"
+ if self.role == 'client':
+ if len(self.stream_list) != len(self.results):
+ raise error.TestError('Mismatched number of results')
+
+ function = None
+ keys = None
+
+ # Each of the functions return tuples in which the keys define
+ # what that item in the tuple represents
+ if self.test == 'TCP_STREAM':
+ function = self.process_tcp_stream
+ keys = ('Throughput',)
+ elif self.test == 'UDP_STREAM':
+ function = self.process_udp_stream
+ keys = ('Throughput', 'Errors')
+ elif self.test in ['TCP_RR', 'TCP_CRR', 'UDP_RR']:
+ function = self.process_request_response
+ keys = ('Transfer_Rate',)
+ else:
+ raise error.TestError('Unhandled test')
+
+ # self.results is a list of tuples. The first element in each
+ # tuple is the cpu utilization for that run, and the second
+ # element is a list containing the output for each stream in that
+ # run.
+ for i, streams in enumerate(self.stream_list):
+ attr = {'stream_count':streams}
+ keyval = {}
+ temp_vals = []
+ keyval['CPU'], outputs = self.results[i]
+
+ # Short circuit to handle errors due to client timeouts
+ if not outputs:
+ for key in keys:
+ keyval[key] = 0
+ self.write_iteration_keyval(attr, keyval)
+ continue
+
+ for result in outputs:
+ temp_vals.append(function(result))
+
+ # Compute the average of elements returned from function which
+ # represent the string contained in keys
+ for j, key in enumerate(keys):
+ vals = [x[j] for x in temp_vals]
+ keyval[key] = sum(vals) / len(vals)
+
+ self.write_iteration_keyval(attr, keyval)
+
+
+ def process_tcp_stream(self, output):
+ """Parses the following and returns a singleton containing throughput.
+
+ TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to kcqz13.prod.google.com (10.75.222.13) port 0 AF_INET
+ Recv Send Send
+ Socket Socket Message Elapsed
+ Size Size Size Time Throughput
+ bytes bytes bytes secs. 10^6bits/sec
+
+ 87380 16384 16384 2.00 941.28
+ """
+
+ return float(output.splitlines()[6].split()[4]),
+
+
+ def process_udp_stream(self, output):
+ """Parses the following and returns a touple containing throughput
+ and the number of errors.
+
+ UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to kcqz13.prod.google.com (10.75.222.13) port 0 AF_INET
+ Socket Message Elapsed Messages
+ Size Size Time Okay Errors Throughput
+ bytes bytes secs # # 10^6bits/sec
+
+ 129024 65507 2.00 3673 0 961.87
+ 131072 2.00 3673 961.87
+ """
+
+ line_tokens = output.splitlines()[5].split()
+ return float(line_tokens[5]), int(line_tokens[4])
+
+
+ def process_request_response(self, output):
+ """Parses the following which works for both rr (TCP and UDP) and crr
+ tests and returns a singleton containing transfer rate.
+
+ TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to kcqz13.prod.google.com (10.75.222.13) port 0 AF_INET
+ Local /Remote
+ Socket Size Request Resp. Elapsed Trans.
+ Send Recv Size Size Time Rate
+ bytes Bytes bytes bytes secs. per sec
+
+ 16384 87380 1 1 2.00 14118.53
+ 16384 87380
+ """
+
+ return float(output.splitlines()[6].split()[5]),