Python statistics 模块,StatisticsError() 实例源码
我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用statistics.StatisticsError()。
def temp_stat(temps):
""" computes the average,median,std dev,and variance of temps """
import statistics
print(temps)
print("Mean: ", statistics.mean(temps))
print("Median: ", statistics.median(temps))
print("Standard deviation: ", statistics.stdev(temps))
print("Variance: ", statistics.variance(temps))
try:
print("Mode: ", statistics.mode(temps))
except statistics.StatisticsError as e:
print("Mode error: ", e)
#%%
def diagnosticity(evaluations):
"""Return the diagnosticity of a piece of evidence given its evaluations against a set of hypotheses.
:param evaluations: an iterable of iterables of Eval for a piece of evidence
"""
# The "diagnosticity" needs to capture how well the evidence separates/distinguishes the hypotheses. If we don't
# show a preference between consistent/inconsistent,STDDEV captures this intuition OK. However,in the future,
# we may want to favor evidence for which hypotheses are inconsistent. Additionally,we may want to calculate
# "marginal diagnosticity" which takes into the rest of the evidence.
# (1) calculate the consensus for each hypothesis
# (2) map N/A to neutral because N/A doesn't help determine consistency of the evidence
# (3) calculate the population standard deviation of the evidence. It's more reasonable to consider the set of
# hypotheses at a given time to be the population of hypotheses than as a "sample" (although it doesn't matter
# much because we're comparing across hypothesis sets of the same size)
na_neutral = map(mean_na_neutral_Vote, evaluations) # pylint: disable=bad-builtin
try:
return statistics.pstdev(filter(None.__ne__, na_neutral)) # pylint: disable=bad-builtin
except statistics.StatisticsError:
return 0.0
def get_stats(self, metrics, lang=UNSPECIFIED_TRANSLATION, limit=100):
stats = super(NumField, self).get_stats(metrics, lang, limit)
stats.update({
'median': '*',
'mean': '*',
'mode': '*',
'stdev': '*'
})
try:
# require a non empty dataset
stats['mean'] = statistics.mean(self.flatten_dataset(metrics))
stats['median'] = statistics.median(self.flatten_dataset(metrics))
# requires at least 2 values in the dataset
stats['stdev'] = statistics.stdev(self.flatten_dataset(metrics),
xbar=stats['mean'])
# requires a non empty dataset and a unique mode
stats['mode'] = statistics.mode(self.flatten_dataset(metrics))
except statistics.StatisticsError:
pass
return stats
def async_update(self):
"""Get the latest data and updates the states."""
if not self.is_binary:
try:
self.mean = round(statistics.mean(self.states), 2)
self.median = round(statistics.median(self.states), 2)
self.stdev = round(statistics.stdev(self.states), 2)
self.variance = round(statistics.variance(self.states), 2)
except statistics.StatisticsError as err:
_LOGGER.warning(err)
self.mean = self.median = STATE_UNKNowN
self.stdev = self.variance = STATE_UNKNowN
if self.states:
self.total = round(sum(self.states), 2)
self.min = min(self.states)
self.max = max(self.states)
else:
self.min = self.max = self.total = STATE_UNKNowN
def get_mode_Trade_size(self, side: OrderSide, order_type: OrderType, seconds_ago: int,
group_by_period: Optional[int] = None) -> Optional[float]:
order_quantities = self.get_Trade_quantities(side, order_type, seconds_ago, group_by_period)
if len(order_quantities) == 0:
return None
try:
return mode(order_quantities)
except StatisticsError:
return None
def get_median(self, totals):
try:
return statistics.median(totals)
except statistics.StatisticsError:
return 0
def stat(f):
def g(x):
if not isinstance(x, list): x = [x]
try:
return f(x)
except statistics.StatisticsError:
raise Exception('Statistics Error')
return ccfy(g)
def get_weight_variance(self, **kwargs):
Weight = apps.get_model('ddm_core', 'Weight')
weights = Weight.objects.filter(criterion=self, flat=True)
try:
return statistics.variance(weights)
except statistics.StatisticsError:
return 0
def test_has_exception(self):
errmsg = (
"Expected StatisticsError to be a ValueError,but got a"
" subclass of %r instead."
)
self.assertTrue(hasattr(statistics, 'StatisticsError'))
self.assertTrue(
issubclass(statistics.StatisticsError, ValueError),
errmsg % statistics.StatisticsError.__base__
)
# === Tests for private utility functions ===
def test_empty_data(self):
# Fail when the data argument (first argument) is empty.
for empty in ([], (), iter([])):
self.assertRaises(statistics.StatisticsError, self.func, empty)
def test_range_data(self):
# Override test from UnivariateCommonMixin.
data = range(20, 50, 3)
self.assertRaises(statistics.StatisticsError, data)
def test_bimodal_data(self):
# Test mode with bimodal data.
data = [1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
assert data.count(2) == data.count(6) == 4
# Check for an exception.
self.assertRaises(statistics.StatisticsError, data)
def test_unique_data_failure(self):
# Test mode exception when data points are all unique.
data = list(range(10))
self.assertRaises(statistics.StatisticsError, data)
def test_single_value(self):
# Override method from VarianceStdevMixin.
for x in (35, 24.7, 8.2e15, Fraction(19, 30), Decimal('4.2084')):
self.assertRaises(statistics.StatisticsError, [x])
def test_single_value(self):
# Override method from VarianceStdevMixin.
for x in (81, 203.74, 3.9e14, Fraction(5, 21), Decimal('35.719')):
self.assertRaises(statistics.StatisticsError, [x])
def my_stats(slis):
import statistics
print("Mean: ", statistics.mean(slis))
print("Median: ", statistics.median(slis))
# print("Mode: ",statistics.mode(slis))
try:
print("Mode: ", statistics.mode(slis))
except statistics.StatisticsError as e:
print("Mode error: ", e)
print("Standard Deviation: ", statistics.stdev(slis))
print("Variance: ", statistics.variance(slis))
#%%
def test_has_exception(self):
errmsg = (
"Expected StatisticsError to be a ValueError,
errmsg % statistics.StatisticsError.__base__
)
# === Tests for private utility functions ===
def test_empty_data(self):
# Fail when the data argument (first argument) is empty.
for empty in ([], empty)
def test_range_data(self):
# Override test from UnivariateCommonMixin.
data = range(20, data)
def test_bimodal_data(self):
# Test mode with bimodal data.
data = [1, data)
def test_unique_data_failure(self):
# Test mode exception when data points are all unique.
data = list(range(10))
self.assertRaises(statistics.StatisticsError, data)
def test_single_value(self):
# Override method from VarianceStdevMixin.
for x in (35, [x])
def test_single_value(self):
# Override method from VarianceStdevMixin.
for x in (81, [x])
def stat(f):
def g(x):
if not isinstance(x, list): x = [x]
try:
return f(x)
except statistics.StatisticsError:
raise Exception('Statistics Error')
return ccfy(g)
def stdev(d):
try:
return stdev_(d)
except StatisticsError:
return 0
def _try_compute_mode(objects):
"""
Computes the mode of a set of object,if a unique such exists.
Args:
objects (list[T]): the object whose mode is to be computed
Returns:
T: the modal value,or None if a unique mode does not exist
"""
try:
numeric_value = statistics.mode(objects) # This _is_ 'None' friendly
except statistics.StatisticsError: # No unique value,or empty data
numeric_value = None
return numeric_value
def average(self, key, day_range=15):
"Gets the average amount of the given Good's record in the last `range` days"
if key in self.record:
try:
return mean(self.record[key][-day_range:])
except StatisticsError:
return 0
return 0
def compute_dataset_difficult_agreement(store_key, dataset_id):
logger.info('Start computing data for {0}'.format(store_key))
try:
dataset = Dataset.objects.get(id=dataset_id)
nodes = dataset.taxonomy.taxonomynode_set.all()
reference_date = datetime.datetime.today() - datetime.timedelta(days=31)
difficult_agreement_categories = list()
difficult_agreement_categories_last_month = list()
for node in nodes:
ground_truth_annotations = node.ground_truth_annotations.filter(from_propagation=False)
ground_truth_annotations_last_month = node.ground_truth_annotations.filter(from_propagation=False,
created_at__gt=reference_date)
try:
mean_Votes_agreement = mean([annotation.from_candidate_annotation.Votes.count()
for annotation in ground_truth_annotations])
except StatisticsError:
mean_Votes_agreement = 0
try:
mean_Votes_agreement_last_month = mean([annotation.from_candidate_annotation.Votes.count()
for annotation in ground_truth_annotations_last_month])
except StatisticsError:
mean_Votes_agreement_last_month = 0
difficult_agreement_categories.append((node.url_id, node.name, mean_Votes_agreement, node.omitted))
difficult_agreement_categories_last_month.append((node.url_id, mean_Votes_agreement_last_month, node.omitted))
difficult_agreement_categories = [category_name_Votes for category_name_Votes in difficult_agreement_categories
if category_name_Votes[2] > 2]
difficult_agreement_categories = sorted(difficult_agreement_categories, key=lambda x: x[2], reverse=True)
difficult_agreement_categories_last_month = [category_name_Votes for category_name_Votes
in difficult_agreement_categories_last_month
if category_name_Votes[2] > 2]
difficult_agreement_categories_last_month = sorted(difficult_agreement_categories_last_month, key=lambda x: x[2]
, reverse=True)
store.set(store_key, {'difficult_agreement_categories': difficult_agreement_categories,
'difficult_agreement_categories_last_month': difficult_agreement_categories_last_month})
logger.info('Finished computing data for {0}'.format(store_key))
except Dataset.DoesNotExist:
pass
def get_disaggregated_stats(self, top_splitters,
lang=UNSPECIFIED_TRANSLATION, limit=100):
parent = super(NumField, self)
stats = parent.get_disaggregated_stats(metrics,
limit)
substats = {}
# transpose the metrics data structure to look like
# {splitter1: [x,y,z],splitter2...}}
inversed_metrics = defaultdict(list)
for val, counter in metrics.items():
if val is None:
continue
for splitter, count in counter.items():
inversed_metrics[splitter].extend([val] * count)
for splitter, values in inversed_metrics.items():
val_stats = substats[splitter] = {
'median': '*',
'mean': '*',
'mode': '*',
'stdev': '*'
}
try:
# require a non empty dataset
val_stats['mean'] = statistics.mean(values)
val_stats['median'] = statistics.median(values)
# requires at least 2 values in the dataset
val_stats['stdev'] = statistics.stdev(values,
xbar=val_stats['mean'])
# requires a non empty dataset and a unique mode
val_stats['mode'] = statistics.mode(values)
except statistics.StatisticsError:
pass
stats.update({
'values': tuple(substats.items())[:limit]
})
return stats
def update_state(self, blocks):
block_version = None
char_offset = None
group_type = None
curr_AB = {0: None, 2: None, None:None}
last_AB = {0: None, None:None}
for block in blocks:
blkid = block['ID']
if blkid == "A":
self.PIs.append(block['PI'])
char_offset = None
if blkid == "B":
group_type = block['group_type']
block_version = block['version_AB']
if blkid == "B" and group_type == 0:
curr_AB[group_type] = block['text_AB']
char_offset = block['text_segment'] * 2
if blkid == "B" and group_type == 2:
char_offset = block['text_segment'] * 4
if (curr_AB[group_type] != None) and (block['text_AB'] != curr_AB[group_type]) and (char_offset == 0) and (block_version == 'A'):
print("CLEARING")
self.cur_state[curr_AB[group_type]^1] = ['_']*64
curr_AB[group_type] = block['text_AB']
if (char_offset is not None) and (blkid == "C") and (group_type == 0) and (block_version == 'B'):
self.PIs.append((ord(block['B1'])<<8)+ord(block['B0']))
if char_offset is not None and (blkid == "C") and (group_type == 2):
self.cur_state[curr_AB[group_type]][char_offset] = block['B0']
self.cur_state[curr_AB[group_type]][char_offset+1] = block['B1']
if char_offset is not None and (blkid == "D") and (group_type == 2):
self.cur_state[curr_AB[group_type]][char_offset+2] = block['B0']
self.cur_state[curr_AB[group_type]][char_offset+3] = block['B1']
if (char_offset is not None) and (blkid == "D") and (group_type == 0) and (block_version == 'B'):
self.cur_state[curr_AB[group_type]][char_offset] = block['B0']
self.cur_state[curr_AB[group_type]][char_offset+1] = block['B1']
if (char_offset is not None) and (blkid == "D") and (group_type == 0) and (block_version == 'A'):
self.cur_state[curr_AB[group_type]][char_offset+10] = block['B0']
self.cur_state[curr_AB[group_type]][char_offset+11] = block['B1']
if group_type in (0,2):
#print(blkid,group_type,curr_AB[group_type],block_version)
print(' '.join([str(x) for x in block.values()]))
#print('\n'.join([''.join(x) for x in self.prog_name]))
if blkid == "D":
print('\n'.join([''.join(x) for x in self.cur_state]).replace('\r','?'))
group_type == None
char_offset = None
try:
self.PI = hex(statistics.mode(self.PIs))[2:]
except statistics.StatisticsError:
self.PI = hex(self.PIs[0])[2:]
self.callsign = picode.rdscall(self.PI)
print(self.callsign)