aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTreeHugger Robot <treehugger-gerrit@google.com>2017-12-15 17:03:59 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2017-12-15 17:03:59 +0000
commitf581f6c7c25c8356b49499e41ab4df13c609a2b0 (patch)
tree88848d8806187ab98e995b2405d1d67cd3da7ade
parentbde4b008fab6f1fb25e141c04f183ac40809df92 (diff)
parentc5fad389cc870d6e8e5dfd568f498e9fa41ddddd (diff)
downloadlisa-f581f6c7c25c8356b49499e41ab4df13c609a2b0.tar.gz
Merge changes from topic "cpu_energy_modeling"
* changes: power_profile: update the cpu freq script to support experiment experiments/power: add active and cluster costs experiments/power: Refactor run_cpu_freq experiments/power: Continue previous experiment experiments/power: Taskset dhrystone experiments/power: Sleep until dhrystone dies experiments/power: Correctly update cgroups in cpu freq experiments/power: Hold wakelock during cpu freq experiments/power: Disable thermal throttling
-rwxr-xr-xexperiments/power/eas/run_cpu_frequency.py224
-rwxr-xr-xtools/scripts/power/cpu_frequency_power_average.py309
-rwxr-xr-xtools/scripts/power/generate_power_profile.py153
3 files changed, 433 insertions, 253 deletions
diff --git a/experiments/power/eas/run_cpu_frequency.py b/experiments/power/eas/run_cpu_frequency.py
index 011b942..9780ec7 100755
--- a/experiments/power/eas/run_cpu_frequency.py
+++ b/experiments/power/eas/run_cpu_frequency.py
@@ -39,6 +39,10 @@ parser.add_argument('--out_prefix', dest='out_prefix', action='store',
default='default',
help='prefix for out directory')
+parser.add_argument('--continue', dest='cont', action='store',
+ default=False, type=bool,
+ help='continue previous experiment with same prefix')
+
parser.add_argument('--duration', dest='duration_s', action='store',
default=30, type=int,
help='Duration of test (default 30s)')
@@ -52,10 +56,10 @@ CRITICAL_TASKS = [
"/system/bin/sh", "adbd", "/init"
]
-def outfiles(on_cpus, freq):
- cpu_str = ''.join('{}-'.format(c) for c in on_cpus)
- samples = 'cpus{}freq{}-samples.csv'.format(cpu_str, freq)
- energy = 'cpus{}freq{}-energy.json'.format(cpu_str, freq)
+def outfiles(cluster, cpus, freq):
+ prefix = 'cluster{}-cores{}freq{}_'.format(str(cluster), ''.join('{}-'.format(cpu) for cpu in cpus), freq)
+ samples = '{}samples.csv'.format(prefix)
+ energy = '{}energy.json'.format(prefix)
return energy, samples
def update_cpus(target, on_cpus, off_cpus):
@@ -65,49 +69,29 @@ def update_cpus(target, on_cpus, off_cpus):
for cpu in off_cpus:
target.hotplug.offline(cpu)
-def experiment():
- # Check if the dhyrstone binary is on the device
- dhrystone = os.path.join(target.executables_directory, 'dhrystone')
- if not target.file_exists(dhrystone):
- raise RuntimeError('dhrystone could not be located here: {}'.format(
- dhrystone))
-
- # Create results directory
- outdir=te.res_dir + '_' + args.out_prefix
- try:
- shutil.rmtree(outdir)
- except:
- print "couldn't remove " + outdir
- pass
- os.makedirs(outdir)
-
- # Get clusters and cpus
- clusters = te.topology.get_level('cluster')
- cpus = [cpu for cluster in clusters for cpu in cluster]
-
- # Prevent screen from dozing
- Screen.set_doze_always_on(target, on=False)
+def run_dhrystone(target, dhrystone, outdir, energy, samples, on_cpus):
+ # Run dhrystone benchmark for longer than the requested time so
+ # we have extra time to set up the measuring device
+ for on_cpu in on_cpus:
+ target.execute('nohup taskset {:x} {} -t {} -r {} 2>/dev/null 1>/dev/null &'.format(1 << (on_cpu), dhrystone, 1, args.duration_s+30))
- # Turn on airplane mode
- System.set_airplane_mode(target, on=True)
+ # Start measuring
+ te.emeter.reset()
- # Turn off screen
- Screen.set_screen(target, on=False)
-
- # Store governors so they can be restored later
- governors = [ target.cpufreq.get_governor(cpu) for cpu in cpus]
+ # Sleep for the required time
+ sleep(args.duration_s)
- # Set the governer to userspace so the cpu frequencies can be set
- target.hotplug.online_all()
- target.cpufreq.set_all_governors('userspace')
+ # Stop measuring
+ te.emeter.report(outdir, out_energy=energy, out_samples=samples)
- # Freeze all non critical tasks
- target.cgroups.freeze(exclude=CRITICAL_TASKS)
+ # Since we are using nohup, the benchmark doesn't show up in
+ # process list. Instead sleep until we can be sure the benchmark
+ # is dead.
+ sleep(30)
+def single_cluster(cpus, sandbox_cg, isolated_cg, dhrystone, outdir):
# For each cluster
- for cluster in clusters:
- # Remove all userspace tasks from the cluster
- target_cg, _ = target.cgroups.isolate(cluster)
+ for i, cluster in enumerate(CLUSTERS):
# For each frequency on the cluster
for freq in target.cpufreq.list_frequencies(cluster[0]):
@@ -123,39 +107,149 @@ def experiment():
on_cpus.append(cpu)
off_cpus.remove(cpu)
- # Bring the on_cpus online and take the off_cpus offline
+ # Switch the output file so the previous samples are not overwritten
+ energy, samples = outfiles(i, on_cpus, freq)
+
+ # If we are continuing from a previous experiment and this set has
+ # already been run, skip it
+ if args.cont and os.path.isfile(os.path.join(outdir, energy)) and os.path.isfile(os.path.join(outdir, samples)):
+ continue
+
+ # Bring the on_cpus online take the off_cpus offline
update_cpus(target, on_cpus, off_cpus)
+ target.cpufreq.set_frequency(cpu, freq)
- # Update the target cgroup in case hotplugging has introduced
- # any errors
- target_cg.set(cpus=on_cpus)
+ # Update sandbox and isolated cgroups
+ sandbox_cg.set(cpus=on_cpus)
+ isolated_cg.set(cpus=off_cpus)
- # Switch the output file so the previous samples are not overwritten
- energy, samples = outfiles(on_cpus, freq)
+ # Run the benchmark
+ run_dhrystone(target, dhrystone, outdir, energy, samples, on_cpus)
- # Set cpu frequency for the newly add cpu
- target.cpufreq.set_frequency(cpu, freq)
+ # Restore all the cpus
+ target.hotplug.online_all()
+
+
+def multiple_clusters(cpus, sandbox_cg, isolated_cg, dhrystone, outdir):
+ # Keep track of offline and online cpus
+ off_cpus = cpus[:]
+ on_cpus = []
+ prefix = ''
+
+ if len(CLUSTERS) != 2:
+ print 'Only 2 clusters is supported.'
+ return
+
+ # For each cluster
+ for i, cluster in enumerate(CLUSTERS):
+ # A cpu in each cluster
+ cpu = cluster[0]
+
+ freq = target.cpufreq.list_frequencies(cpu)[0]
+
+ # Set frequency to min
+ target.cpufreq.set_frequency(cpu, freq)
+
+ # Keep cpu on
+ on_cpus.append(cpu)
+ off_cpus.remove(cpu)
+
+ prefix = '{}cluster{}-cores{}-freq{}_'.format(prefix, i, cpu, freq)
- # Run dhrystone benchmark for longer than the requested time so
- # we have extra time to set up the measuring device
- target.execute('nohup {} -t {} -r {} 2>/dev/null 1>/dev/null'
- ' &'.format(dhrystone, len(on_cpus), args.duration_s+60))
+ # Update cgroups to reflect on_cpus and off_cpus
+ sandbox_cg.set(cpus=on_cpus)
+ isolated_cg.set(cpus=off_cpus)
- # Start measuring
- te.emeter.reset()
+ # Bring the on_cpus online take the off_cpus offline
+ update_cpus(target, on_cpus, off_cpus)
- # Sleep for the required time
- sleep(args.duration_s)
+ # For one cpu in each cluster
+ for i, cpu in enumerate(on_cpus):
- # Stop measuring
- te.emeter.report(outdir, out_energy=energy, out_samples=samples)
+ # For each frequency on the cluster
+ for freq in target.cpufreq.list_frequencies(cpu):
+
+ # Switch the output file so the previous samples are not overwritten
+ curr_prefix = prefix.replace('cores{}-freq{}'.format(cpu,
+ target.cpufreq.list_frequencies(cpu)[0]),
+ 'cores{}-freq{}'.format(cpu, freq))
+ samples = '{}samples.csv'.format(curr_prefix)
+ energy = '{}energy.json'.format(curr_prefix)
+
+ # If we are continuing from a previous experiment and this set has
+ # already been run, skip it
+ if args.cont and os.path.isfile(os.path.join(outdir, energy)) and os.path.isfile(os.path.join(outdir, samples)):
+ continue
- # Kill dhrystone so it does not affect the next measurement
- pids = target.killall('dhyrstone')
+ # Set frequency
+ target.cpufreq.set_frequency(cpu, freq)
+
+ # Run the benchmark
+ run_dhrystone(target, dhrystone, outdir, energy, samples, on_cpus)
+
+ # Reset frequency to min
+ target.cpufreq.set_frequency(cpu, target.cpufreq.list_frequencies(cpu)[0])
# Restore all the cpus
target.hotplug.online_all()
+
+def experiment():
+ # Check if the dhyrstone binary is on the device
+ dhrystone = os.path.join(target.executables_directory, 'dhrystone')
+ if not target.file_exists(dhrystone):
+ raise RuntimeError('dhrystone could not be located here: {}'.format(
+ dhrystone))
+
+ # Create results directory
+ outdir=te.res_dir + '_' + args.out_prefix
+ if not args.cont:
+ try:
+ shutil.rmtree(outdir)
+ except:
+ print "couldn't remove " + outdir
+ pass
+ if not os.path.exists(outdir):
+ os.makedirs(outdir)
+
+ # Get clusters and cpus
+ cpus = [cpu for cluster in CLUSTERS for cpu in cluster]
+
+ # Prevent screen from dozing
+ Screen.set_doze_always_on(target, on=False)
+
+ # Turn on airplane mode
+ System.set_airplane_mode(target, on=True)
+
+ # Turn off screen
+ Screen.set_screen(target, on=False)
+
+ # Stop thermal engine and perfd
+ target.execute("stop thermal-engine")
+ target.execute("stop perfd")
+
+ # Take a wakelock
+ System.wakelock(target, take=True)
+
+ # Store governors so they can be restored later
+ governors = [ target.cpufreq.get_governor(cpu) for cpu in cpus]
+
+ # Set the governer to userspace so the cpu frequencies can be set
+ target.hotplug.online_all()
+ target.cpufreq.set_all_governors('userspace')
+
+ # Freeze all non critical tasks
+ target.cgroups.freeze(exclude=CRITICAL_TASKS)
+
+ # Remove all userspace tasks from the cluster
+ sandbox_cg, isolated_cg = target.cgroups.isolate([])
+
+ # Run measurements on single cluster
+ single_cluster(cpus, sandbox_cg, isolated_cg, dhrystone, outdir)
+
+ # Run measurements on multiple clusters
+ multiple_clusters(cpus, sandbox_cg, isolated_cg, dhrystone, outdir)
+
# Restore all governors
for i, governor in enumerate(governors):
target.cpufreq.set_governor(cpus[i], governor)
@@ -163,6 +257,13 @@ def experiment():
# Restore non critical tasks
target.cgroups.freeze(thaw=True)
+ # Release wakelock
+ System.wakelock(target, take=False)
+
+ # Stop thermal engine and perfd
+ target.execute("start thermal-engine")
+ target.execute("start perfd")
+
# Dump platform
te.platform_dump(outdir)
@@ -211,5 +312,6 @@ if args.serial:
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False)
target = te.target
+CLUSTERS = te.topology.get_level('cluster')
results = experiment()
diff --git a/tools/scripts/power/cpu_frequency_power_average.py b/tools/scripts/power/cpu_frequency_power_average.py
index eb03250..3d0a612 100755
--- a/tools/scripts/power/cpu_frequency_power_average.py
+++ b/tools/scripts/power/cpu_frequency_power_average.py
@@ -20,6 +20,7 @@ from __future__ import division
import os
import re
import json
+import glob
import argparse
import pandas as pd
import numpy as np
@@ -33,153 +34,213 @@ from power_average import PowerAverage
def average(values):
return sum(values) / len(values)
-class Cluster:
- def __init__(self, cpus, freqs):
- # Cpus in the cluster
- self.cpus = cpus
- # Frequencies supported by the cluster
- self.freqs = freqs
- # The average cluster cost of this cluster
- self.cluster_cost = 0.0
- # The average cpu costs by frequency
- self.cpu_costs = {}
- # Samples is a dict whose keys are tuples of cpus. Its values are dicts
- # whose keys are frequencies and values are sample averages.
- # For example: to access the sample averages for cpus 0, 1, 2 at frequency
- # 595200. avg = samples[(0, 1, 2)][595200]
- self.samples = {}
-
- def contains(self, cpus):
- return set(self.cpus).issuperset(set(cpus))
-
- def add_sample(self, cpus, freq, cost):
- # Convert the cpus to a tuple because mutable lists cannot be used as
- # keys to dicts.
- cpus_tuple = tuple(cpus)
-
- if cpus_tuple not in self.samples:
- self.samples[cpus_tuple] = {}
-
- self.samples[cpus_tuple][freq] = cost
-
- def get_sample(self, cpus, freq):
- return self.samples[tuple(cpus)][freq]
+class SampleReader:
+ def __init__(self, results_dir, column):
+ self.results_dir = results_dir
+ self.column = column
+
+ def get(self, filename):
+ files = glob.glob(os.path.join(self.results_dir, filename))
+ if len(files) != 1:
+ raise ValueError('Multiple files match pattern')
+ return PowerAverage.get(files[0], self.column)
+
+class Cpu:
+ def __init__(self, platform_file, sample_reader):
+ self.platform_file = platform_file
+ self.sample_reader = sample_reader
+ # This is the additional cost when any cluster is on. It is seperate from
+ # the cluster cost because it is not duplicated when a second cluster
+ # turns on.
+ self.active_cost = -1.0
+
+ # Read in the cluster and frequency information from the plaform.json
+ with open(platform_file, 'r') as f:
+ platform = json.load(f)
+ self.clusters = {i : Cluster(self.sample_reader, i, platform["clusters"][i],
+ platform["freqs"][i]) for i in sorted(platform["clusters"])}
+
+ if len(self.clusters) != 2:
+ raise ValueError('Only cpus with 2 clusters are supported')
+
+ self.compute_costs()
def compute_costs(self):
- # At any given frequency, the total power usage of the cluster is
- # total_power = cluster_cost + cpu_cost * n_cpus
- #
- # Given this formula we can compute the cluster cost and cpu cost at
- # each frequency.
+ # Compute initial core costs by freq. These are necessary for computing the
+ # cluster and active costs. However, since the cluster and active costs are computed
+ # using averages across all cores and frequencies, we will need to adjust the
+ # core cost at the end.
#
- # While the computed cluster_cost can vary based on frequency, we need
- # to get one cluster_cost. To do this, we will
- # take the average cluster_cost.
- #
- # Once we have an average cluster_cost, we can go back and compute the
- # cost of an additional cpu at each frequency relative to the average
- # cluster_cost.
+ # For example: The total cpu cost of core 0 on cluster 0 running at
+ # a given frequency is 25. We initally compute the core cost as 10.
+ # However the active and cluster averages end up as 9 and 3. 10 + 9 + 3 is
+ # 22 not 25. We can adjust the core cost 13 to cover this error.
+ for cluster in self.clusters:
+ self.clusters[cluster].compute_initial_core_costs()
+
+ # Compute the cluster costs
+ cluster0 = self.clusters.values()[0]
+ cluster1 = self.clusters.values()[1]
+ cluster0.compute_cluster_cost(cluster1)
+ cluster1.compute_cluster_cost(cluster0)
+
+ # Compute the active cost as an average of computed active costs by cluster
+ self.active_cost = average([self.clusters[cluster].compute_active_cost() for cluster in self.clusters])
+
+ # Compute final core costs. This will help correct for any errors introduced
+ # by the averaging of the cluster and active costs.
+ for cluster in self.clusters:
+ self.clusters[cluster].compute_final_core_costs(self.active_cost)
+
+ def get_clusters(self):
+ with open(self.platform_file, 'r') as f:
+ platform = json.load(f)
+ return platform["clusters"]
- # Compute cluster cost
- cluster_costs = []
+ def get_active_cost(self):
+ return self.active_cost
+
+ def get_cluster_cost(self, cluster):
+ return self.clusters[cluster].get_cluster_cost()
- for freq in self.freqs:
- for n in range(1, len(self.cpus)):
- # cluster_cost + cpu_cost * n
- n_cost = self.get_sample(self.cpus[:n], freq)
- # cluster_cost + cpu_cost * (n + 1)
- n_plus_one_cost = self.get_sample(self.cpus[:n+1], freq)
+ def get_cores(self, cluster):
+ return self.clusters[cluster].get_cores()
- # (cluster_cost + cpu_cost * (n + 1)) - (cluster_cost + cpu_cost * n)
- cpu_cost = n_plus_one_cost - n_cost
+ def get_core_freqs(self, cluster):
+ return self.clusters[cluster].get_freqs()
- # cpu_cost * n
- n_cpu_cost = cpu_cost * n
+ def get_core_cost(self, cluster, freq):
+ return self.clusters[cluster].get_core_cost(freq)
- # (cluster_cost + cpu_cost * n) - (cpu_cost * n)
- cluster_costs.append(n_cost - n_cpu_cost)
+ def dump(self):
+ print 'Active cost: {}'.format(self.active_cost)
+ for cluster in self.clusters:
+ self.clusters[cluster].dump()
+class Cluster:
+ def __init__(self, sample_reader, handle, cores, freqs):
+ self.sample_reader = sample_reader
+ self.handle = handle
+ self.cores = cores
+ self.cluster_cost = -1.0
+ self.core_costs = {freq:-1.0 for freq in freqs}
+
+ def compute_initial_core_costs(self):
+ # For every frequency, freq
+ for freq, _ in self.core_costs.iteritems():
+ total_costs = []
+ core_costs = []
+
+ # Store the total cost for turning on 1 to len(cores) on the
+ # cluster at freq
+ for cnt in range(1, len(self.cores)+1):
+ total_costs.append(self.get_sample_avg(cnt, freq))
+
+ # Compute the additional power cost of turning on another core at freq.
+ for i in range(len(total_costs)-1):
+ core_costs.append(total_costs[i+1] - total_costs[i])
+
+ # The initial core cost is the average of the additional power to add
+ # a core at freq
+ self.core_costs[freq] = average(core_costs)
+
+ def compute_final_core_costs(self, active_cost):
+ # For every frequency, freq
+ for freq, _ in self.core_costs.iteritems():
+ total_costs = []
+ core_costs = []
+
+ # Store the total cost for turning on 1 to len(cores) on the
+ # cluster at freq
+ for core_cnt in range(1, len(self.cores)+1):
+ total_costs.append(self.get_sample_avg(core_cnt, freq))
+
+ # Recompute the core cost as the sample average minus the cluster and
+ # active costs divided by the number of cores on. This will help
+ # correct for any error introduced by averaging the cluster and
+ # active costs.
+ for i, total_cost in enumerate(total_costs):
+ core_cnt = i + 1
+ core_costs.append((total_cost - self.cluster_cost - active_cost) / (core_cnt))
+
+ # The final core cost is the average of the core costs at freq
+ self.core_costs[freq] = average(core_costs)
+
+ def compute_cluster_cost(self, other_cluster=None):
+ # Create a template for the file name. For each frequency we will be able
+ # to easily substitute it into the file name.
+ template = '{}_samples.csv'.format('_'.join(sorted(
+ ['cluster{}-cores?-freq{{}}'.format(self.handle),
+ 'cluster{}-cores?-freq{}'.format(other_cluster.get_handle(),
+ other_cluster.get_min_freq())])))
+
+ # Get the cost of running a single cpu at min frequency on the other cluster
+ cluster_costs = []
+ other_cluster_total_cost = other_cluster.get_sample_avg(1, other_cluster.get_min_freq())
+
+ # For every frequency
+ for freq, core_cost in self.core_costs.iteritems():
+ # Get the cost of running a single core on this cluster at freq and
+ # a single core on the other cluster at min frequency
+ total_cost = self.sample_reader.get(template.format(freq))
+ # Get the cluster cost by subtracting all the other costs from the
+ # total cost so that the only cost that remains is the cluster cost
+ # of this cluster
+ cluster_costs.append(total_cost - core_cost - other_cluster_total_cost)
+
+ # Return the average calculated cluster cost
self.cluster_cost = average(cluster_costs)
- # Compute cpu costs
- for freq in self.freqs:
- cpu_costs = []
+ def compute_active_cost(self):
+ active_costs = []
- for n in range(1, len(self.cpus) + 1):
- # cluster_cost + cpu_cost * n
- total_cost = self.get_sample(self.cpus[:n], freq)
+ # For every frequency
+ for freq, core_cost in self.core_costs.iteritems():
+ # For every core
+ for i, core in enumerate(self.cores):
+ core_cnt = i + 1
+ # Subtract the core and cluster costs from each total cost.
+ # The remaining cost is the active cost
+ active_costs.append(self.get_sample_avg(core_cnt, freq)
+ - core_cost*core_cnt - self.cluster_cost)
- # ((cluster_cost + cpu_cost * n) - cluster_cost) / n
- cpu_costs.append((total_cost - self.cluster_cost) / n)
+ # Return the average active cost
+ return average(active_costs)
- self.cpu_costs[freq] = average(cpu_costs)
+ def get_handle(self):
+ return self.handle
- def get_cpus(self):
- return self.cpus
+ def get_min_freq(self):
+ return min(self.core_costs, key=self.core_costs.get)
+
+ def get_sample_avg(self, core_cnt, freq):
+ core_str = ''.join('{}-'.format(self.cores[i]) for i in range(core_cnt))
+ filename = 'cluster{}-cores{}freq{}_samples.csv'.format(self.handle, core_str, freq)
+ return self.sample_reader.get(filename)
def get_cluster_cost(self):
return self.cluster_cost
- def get_cpu_freqs(self):
- return sorted(list(self.cpu_costs.keys()))
-
- def get_cpu_cost(self, freq):
- return self.cpu_costs[freq]
+ def get_cores(self):
+ return self.cores
- def __str__(self):
- cpu_cost_str = "".join("\tfreq: %s \tcost: %s\n" % (f, self.cpu_costs[f])
- for f in sorted(self.cpu_costs))
- return "Cluster: {}\nCluster cost: {}\nCpu cost:\n{}".format(self.cpus,
- self.cluster_cost, cpu_cost_str)
+ def get_freqs(self):
+ return self.core_costs.keys()
- __repr__ = __str__
+ def get_core_cost(self, freq):
+ return self.core_costs[freq]
+ def dump(self):
+ print 'Cluster {} cost: {}'.format(self.handle, self.cluster_cost)
+ for freq in sorted(self.core_costs):
+ print '\tfreq {} cost: {}'.format(freq, self.core_costs[freq])
class CpuFrequencyPowerAverage:
@staticmethod
def get(results_dir, platform_file, column):
- clusters = []
-
- CpuFrequencyPowerAverage._populate_clusters(clusters, platform_file)
- CpuFrequencyPowerAverage._parse_samples(clusters, results_dir, column)
- CpuFrequencyPowerAverage._compute_costs(clusters)
-
- return clusters
-
- @staticmethod
- def _populate_clusters(clusters, platform_file):
- with open(platform_file, 'r') as f:
- platform = json.load(f)
-
- for i in sorted(platform["clusters"]):
- clusters.append(Cluster(platform["clusters"][i], platform["freqs"][i]))
-
- @staticmethod
- def _parse_samples(clusters, results_dir, column):
- for filename in os.listdir(results_dir):
- if filename.endswith(".csv"):
-
- # Extract the cpu and frequency information from the file name
- m = re.match('cpus(?P<cpus>(\d-)+)freq(?P<freq>\d*)-samples.csv',
- filename)
-
- # Get the cpus running during the sample in an int tuple
- cpus = tuple(map(int, m.group('cpus')[:-1].split('-')))
- freq = int(m.group('freq'))
-
- # Add the cost to the correct cluster
- cost = PowerAverage.get(os.path.join(results_dir, filename),
- column)
-
- for cluster in clusters:
- if cluster.contains(cpus):
- cluster.add_sample(cpus, freq, cost)
- break;
-
- @staticmethod
- def _compute_costs(clusters):
- for cluster in clusters:
- cluster.compute_costs()
+ sample_reader = SampleReader(results_dir, column)
+ cpu = Cpu(platform_file, sample_reader)
+ return cpu
parser = argparse.ArgumentParser(
@@ -187,7 +248,7 @@ parser = argparse.ArgumentParser(
" specify a time interval over which to calculate the sample.")
parser.add_argument("--column", "-c", type=str, required=True,
- help="The name of the column in the sample.csv's that"
+ help="The name of the column in the samples.csv's that"
" contain the power values to average.")
parser.add_argument("--results_dir", "-d", type=str,
@@ -205,5 +266,5 @@ parser.add_argument("--platform_file", "-p", type=str,
if __name__ == "__main__":
args = parser.parse_args()
- print CpuFrequencyPowerAverage.get(args.results_dir, args.platform_file,
- args.column)
+ cpu = CpuFrequencyPowerAverage.get(args.results_dir, args.platform_file, args.column)
+ cpu.dump()
diff --git a/tools/scripts/power/generate_power_profile.py b/tools/scripts/power/generate_power_profile.py
index b7a550b..24632c0 100755
--- a/tools/scripts/power/generate_power_profile.py
+++ b/tools/scripts/power/generate_power_profile.py
@@ -34,8 +34,8 @@ class PowerProfile:
'battery.capacity' : 'This is the battery capacity in mAh',
- 'cpu.idle' : 'Power consumption when CPU is suspended',
- 'cpu.awake' : 'Additional power consumption when CPU is in a kernel'
+ 'cpu.suspend' : 'Power consumption when CPU is suspended',
+ 'cpu.idle' : 'Additional power consumption when CPU is in a kernel'
' idle loop',
'cpu.clusters.cores' : 'Number of cores each CPU cluster contains',
@@ -122,7 +122,7 @@ class PowerProfileGenerator:
self.emeter = emeter
self.datasheet = datasheet
self.power_profile = PowerProfile()
- self.clusters = None
+ self.cpu = None
def get(self):
self._compute_measurements()
@@ -148,22 +148,22 @@ class PowerProfileGenerator:
self._run_experiment(os.path.join('power', 'eas',
'run_cpu_frequency.py'), duration, 'cpu_freq')
- self.clusters = CpuFrequencyPowerAverage.get(
+ self.cpu= CpuFrequencyPowerAverage.get(
os.path.join(os.environ['LISA_HOME'], 'results',
'CpuFrequency_cpu_freq'), os.path.join(os.environ['LISA_HOME'],
'results', 'CpuFrequency', 'platform.json'),
self.emeter['power_column'])
- def _remove_cpu_idle(self, power):
- cpu_idle_power = self.power_profile.get_item('cpu.idle')
- if cpu_idle_power is None:
- self._measure_cpu_idle()
- cpu_idle_power = self.power_profile.get_item('cpu.idle')
+ def _remove_cpu_suspend(self, power):
+ cpu_suspend_power = self.power_profile.get_item('cpu.suspend')
+ if cpu_suspend_power is None:
+ self._measure_cpu_suspend()
+ cpu_suspend_power = self.power_profile.get_item('cpu.suspend')
- return power - cpu_idle_power
+ return power - cpu_suspend_power
- def _remove_cpu_active(self, power, duration, results_dir):
- if self.clusters is None:
+ def _remove_cpu_power(self, power, duration, results_dir):
+ if self.cpu is None:
self._cpu_freq_power_average()
cfile = os.path.join(os.environ['LISA_HOME'], 'results', results_dir,
@@ -175,18 +175,17 @@ class PowerProfileGenerator:
for cl in sorted(time_in_state_json['clusters']):
time_in_state_cpus = set(int(c) for c in time_in_state_json['clusters'][cl])
- for cluster in self.clusters:
- if time_in_state_cpus == set(cluster.get_cpus()):
- cpu_cnt = len(cluster.get_cpus())
+ for cluster in self.cpu.get_clusters():
+ if time_in_state_cpus == set(self.cpu.get_cores(cluster)):
+ cpu_cnt = len(self.cpu.get_cores(cluster))
for freq, time_cs in time_in_state_json['time_delta'][cl].iteritems():
time_s = time_cs * 0.01
- energy += time_s * cluster.get_cpu_cost(int(freq))
+ energy += time_s * self.cpu.get_core_cost(cluster, int(freq))
# TODO remove cpu cluster cost and addtional base cost
- # This will require updating the cpu_frequency script
- # to calcualte the base cost and a kernel patch to
- # keep track of cluster time
+ # This will require a kernel patch to keep track of cluster
+ # time
return power - energy / duration * 1000
@@ -198,34 +197,32 @@ class PowerProfileGenerator:
args='--collect=energy,time_in_state --brightness 100 --image={}'.format(image))
display_plus_cpu_power = self._power_average(results_dir)
- display_power = self._remove_cpu_active(display_plus_cpu_power,
+ display_power = self._remove_cpu_power(display_plus_cpu_power,
duration, results_dir)
return power - display_power
- # The power profile defines cpu.idle as a suspended cpu
- def _measure_cpu_idle(self):
+ def _measure_cpu_suspend(self):
duration = 120
- self._run_experiment('run_suspend_resume.py', duration, 'cpu_idle',
+ self._run_experiment('run_suspend_resume.py', duration, 'cpu_suspend',
args='--collect energy')
- power = self._power_average('SuspendResume_cpu_idle',
+ power = self._power_average('SuspendResume_cpu_suspend',
start=duration*0.25, remove_outliers=True)
- self.power_profile.add_item('cpu.idle', power)
+ self.power_profile.add_item('cpu.suspend', power)
- # The power profile defines cpu.awake as an idle cpu
- def _measure_cpu_awake(self):
+ def _measure_cpu_idle(self):
duration = 120
- self._run_experiment('run_idle_resume.py', duration, 'cpu_awake',
+ self._run_experiment('run_idle_resume.py', duration, 'cpu_idle',
args='--collect energy')
- power = self._power_average('IdleResume_cpu_awake', start=duration*0.25,
+ power = self._power_average('IdleResume_cpu_idle', start=duration*0.25,
remove_outliers=True)
- power = self._remove_cpu_idle(power)
+ power = self._remove_cpu_suspend(power)
- self.power_profile.add_item('cpu.awake', power)
+ self.power_profile.add_item('cpu.idle', power)
def _measure_screen_on(self):
duration = 120
@@ -235,7 +232,7 @@ class PowerProfileGenerator:
args='--collect=energy,time_in_state --brightness 0')
power = self._power_average(results_dir)
- power = self._remove_cpu_active(power, duration, results_dir)
+ power = self._remove_cpu_power(power, duration, results_dir)
self.power_profile.add_item('screen.on', power)
@@ -247,53 +244,72 @@ class PowerProfileGenerator:
args='--collect=energy,time_in_state --brightness 100')
power = self._power_average(results_dir)
- power = self._remove_cpu_active(power, duration, results_dir)
+ power = self._remove_cpu_power(power, duration, results_dir)
self.power_profile.add_item('screen.full', power)
def _measure_cpu_cluster_cores(self):
- if self.clusters is None:
+ if self.cpu is None:
self._cpu_freq_power_average()
- cpu_cluster_cores = [ len(cluster.get_cpus()) for cluster in self.clusters ]
- self.power_profile.add_array('cpu.clusters.cores', cpu_cluster_cores)
+ self.power_profile.add_array('cpu.clusters.cores', self.cpu.get_clusters())
- def _measure_cpu_base_cluster(self):
- if self.clusters is None:
+ def _measure_cpu_active_power(self):
+ if self.cpu is None:
self._cpu_freq_power_average()
- for i, cluster in enumerate(self.clusters):
+ comment = 'Additional power used when any cpu core is turned on'\
+ ' in any cluster. Does not include the power used by the cpu'\
+ ' cluster(s) or core(s).'
+ self.power_profile.add_item('cpu.active', self.cpu.get_active_cost()*1000,
+ comment)
+
+ def _measure_cpu_cluster_power(self):
+ if self.cpu is None:
+ self._cpu_freq_power_average()
+
+ clusters = self.cpu.get_clusters()
+
+ for cluster in clusters:
+ cluster_power = self.cpu.get_cluster_cost(cluster)
+
comment = 'Additional power used when any cpu core is turned on'\
' in cluster{}. Does not include the power used by the cpu'\
- ' core(s).'.format(i)
- self.power_profile.add_item('cpu.base.cluster{}'.format(i),
- cluster.get_cluster_cost()*1000, comment)
+ ' core(s).'.format(cluster)
+ self.power_profile.add_item('cpu.cluster_power.cluster{}'.format(cluster),
+ cluster_power*1000, comment)
- def _measure_cpu_speeds_cluster(self):
- if self.clusters is None:
+ def _measure_cpu_core_speeds(self):
+ if self.cpu is None:
self._cpu_freq_power_average()
- for i, cluster in enumerate(self.clusters):
+ clusters = self.cpu.get_clusters()
+
+ for cluster in clusters:
+ core_speeds = self.cpu.get_core_freqs(cluster)
+
comment = 'Different CPU speeds as reported in /sys/devices/system/'\
- 'cpu/cpu{}/cpufreq/scaling_available_frequencies'.format(
- cluster.get_cpus()[0])
- freqs = cluster.get_cpu_freqs()
- self.power_profile.add_array('cpu.speeds.cluster{}'.format(i), freqs,
- comment)
-
- def _measure_cpu_active_cluster(self):
- if self.clusters is None:
+ 'cpu/cpuX/cpufreq/scaling_available_frequencies'
+ self.power_profile.add_array('cpu.core_speeds.cluster{}'.format(cluster),
+ core_speeds, comment)
+
+ def _measure_cpu_core_power(self):
+ if self.cpu is None:
self._cpu_freq_power_average()
- for i, cluster in enumerate(self.clusters):
- freqs = cluster.get_cpu_freqs()
- cpu_active = [ cluster.get_cpu_cost(freq)*1000 for freq in freqs ]
+ clusters = self.cpu.get_clusters()
+
+ for cluster in clusters:
+ core_speeds = self.cpu.get_core_freqs(cluster)
+
+ core_powers = [ self.cpu.get_core_cost(cluster, core_speed)*1000 for core_speed in core_speeds ]
comment = 'Additional power used by a CPU from cluster {} when'\
' running at different speeds. Currently this measurement'\
- ' also includes cluster cost.'.format(i)
- subcomments = [ '{} MHz CPU speed'.format(freq*0.001) for freq in freqs ]
- self.power_profile.add_array('cpu.active.cluster{}'.format(i),
- cpu_active, comment, subcomments)
+ ' also includes cluster cost.'.format(cluster)
+ subcomments = [ '{} MHz CPU speed'.format(core_speed*0.001) for core_speed in core_speeds ]
+
+ self.power_profile.add_array('cpu.core_power.cluster{}'.format(cluster),
+ core_powers, comment, subcomments)
def _measure_camera_flashlight(self):
duration = 120
@@ -306,7 +322,7 @@ class PowerProfileGenerator:
power = self._remove_screen_full(power, duration,
'power_profile_camera_flashlight.png')
- power = self._remove_cpu_active(power, duration, results_dir)
+ power = self._remove_cpu_power(power, duration, results_dir)
self.power_profile.add_item('camera.flashlight', power)
@@ -321,7 +337,7 @@ class PowerProfileGenerator:
power = self._remove_screen_full(power, duration,
'power_profile_camera_avg.png')
- power = self._remove_cpu_active(power, duration, results_dir)
+ power = self._remove_cpu_power(power, duration, results_dir)
self.power_profile.add_item('camera.avg', power)
@@ -335,17 +351,18 @@ class PowerProfileGenerator:
power = self._remove_screen_full(power, duration,
'power_profile_gps_on.png')
- power = self._remove_cpu_active(power, duration, results_dir)
+ power = self._remove_cpu_power(power, duration, results_dir)
self.power_profile.add_item('gps.on', power)
def _compute_measurements(self):
- self._measure_cpu_idle()
- self._measure_cpu_awake()
+ #self._measure_cpu_suspend()
+ #self._measure_cpu_idle()
self._measure_cpu_cluster_cores()
- self._measure_cpu_base_cluster()
- self._measure_cpu_speeds_cluster()
- self._measure_cpu_active_cluster()
+ self._measure_cpu_active_power()
+ self._measure_cpu_cluster_power()
+ self._measure_cpu_core_speeds()
+ self._measure_cpu_core_power()
self._measure_screen_on()
self._measure_screen_full()
self._measure_camera_flashlight()