summaryrefslogtreecommitdiff
path: root/f1elo/elo.py
diff options
context:
space:
mode:
authoremkael <emkael@tlen.pl>2016-11-30 14:26:35 +0100
committeremkael <emkael@tlen.pl>2016-11-30 14:26:35 +0100
commit1b8f84941c3ca28fc6590ccc907228b8afded8c6 (patch)
tree4758f87c29ba6076a7f2f3831e7dffa46d8aa48d /f1elo/elo.py
parentcd1810a441230188b58d834e68cd15975a58cdb4 (diff)
* coding standards and formatting
Diffstat (limited to 'f1elo/elo.py')
-rw-r--r--f1elo/elo.py85
1 files changed, 55 insertions, 30 deletions
diff --git a/f1elo/elo.py b/f1elo/elo.py
index d864e6b..588cc9d 100644
--- a/f1elo/elo.py
+++ b/f1elo/elo.py
@@ -1,18 +1,26 @@
-import json, dateutil
-from sqlalchemy import func
+import json
from itertools import combinations
from os import path
import __main__
+import dateutil
+from sqlalchemy import func
from f1elo.model import *
-class Elo:
+class Elo(object):
def __init__(self, session):
self.session = session
self.config = json.load(
- open(path.join(path.dirname(__main__.__file__), 'config', 'elo.json')))
+ open(
+ path.join(
+ path.dirname(__main__.__file__),
+ 'config',
+ 'elo.json'
+ )
+ )
+ )
def get_ranking(self, driver, rank_date=None):
rank = driver.get_ranking(rank_date)
@@ -21,12 +29,15 @@ class Elo:
return self.config['initial_ranking']
def get_entry_ranking(self, entry, date=None):
- return sum([self.get_ranking(d, date) for d in entry.drivers]) / len(entry.drivers)
+ return sum(
+ [self.get_ranking(d, date) for d in entry.drivers]
+ ) / len(entry.drivers)
def get_race_disparity(self, race):
race_disparity = self.config['disparity']['base_disparity']
if self.config['disparity']['adjust']:
- recent_date = race.date - dateutil.relativedelta.relativedelta(months=3)
+ recent_date = race.date - dateutil.relativedelta.relativedelta(
+ months=3)
recent_ratings = self.session.query(
func.min(Ranking.ranking).label('min'),
func.max(Ranking.ranking).label('max')
@@ -37,14 +48,26 @@ class Elo:
)
changes_query = self.session.query(
func.avg(
- recent_ratings.subquery().columns.max - recent_ratings.subquery().columns.min
+ recent_ratings.subquery().columns.max
+ - recent_ratings.subquery().columns.min
)
)
recent_rank_change = changes_query.scalar()
if not recent_rank_change:
recent_rank_change = 0
- recent_rank_change = min(self.config['disparity']['base_rating_change'], recent_rank_change)
- race_disparity *= (2.5 + (self.config['disparity']['base_rating_change']/(recent_rank_change - 2.0 * self.config['disparity']['base_rating_change']))) * 0.5
+ recent_rank_change = min(
+ self.config['disparity']['base_rating_change'],
+ recent_rank_change)
+ race_disparity *= (
+ 2.5
+ + (
+ self.config['disparity']['base_rating_change']
+ / (
+ recent_rank_change
+ - 2.0 * self.config['disparity']['base_rating_change']
+ )
+ )
+ ) * 0.5
return race_disparity
def rank_race(self, race):
@@ -53,22 +76,22 @@ class Elo:
entries_to_compare = []
rankings = {}
new_rankings = {}
- for e in entries:
- rankings[e] = self.get_entry_ranking(e, race.date)
- new_rankings[e] = 0.0
- if e.result_group:
- entries_to_compare.append(e)
- for c in combinations(entries_to_compare, 2):
- score = self.get_score(
- rankings[c[0]] - rankings[c[1]],
- self.get_outcome(c),
+ for entry in entries:
+ rankings[entry] = self.get_entry_ranking(entry, race.date)
+ new_rankings[entry] = 0.0
+ if entry.result_group:
+ entries_to_compare.append(entry)
+ for combo in combinations(entries_to_compare, 2):
+ score = get_score(
+ rankings[combo[0]] - rankings[combo[1]],
+ get_outcome(combo),
self.get_importance(race,
- [rankings[c[0]],
- rankings[c[1]]]),
+ [rankings[combo[0]],
+ rankings[combo[1]]]),
race_disparity
)
- new_rankings[c[0]] += score
- new_rankings[c[1]] -= score
+ new_rankings[combo[0]] += score
+ new_rankings[combo[1]] -= score
return new_rankings
def get_importance(self, race, rankings):
@@ -80,12 +103,14 @@ class Elo:
return base_importance * 0.75
return base_importance / 2
- def get_outcome(self, entries):
- if entries[0].result_group < entries[1].result_group:
- return 1
- elif entries[0].result_group > entries[1].result_group:
- return 0
- return 0.5
- def get_score(self, difference, outcome, importance, disparity):
- return importance * (outcome - 1 / (1 + (10 ** (-difference / disparity))))
+def get_outcome(entries):
+ if entries[0].result_group < entries[1].result_group:
+ return 1
+ elif entries[0].result_group > entries[1].result_group:
+ return 0
+ return 0.5
+
+
+def get_score(difference, outcome, importance, disparity):
+ return importance * (outcome - 1 / (1 + (10 ** (-difference / disparity))))