Skip to content

Commit

Permalink
Re-implement rank filters for queryset standings
Browse files Browse the repository at this point in the history
This commit makes the ranked annotations NULL for the purposes of the
ranking annotator when using queryset-based standings. To simplify the
task, rank filters are now a two-tuple (count field, minimum count)
which are then passed to the StandingsGenerator to create the
appropriate function or pass to the ranking annotator, which passes to
the QuerySet metric generator that creates a new annotation with the
given field and value to wrap in the conditional (NULL if false).

This commit also deduplicates the previous get_rank_filter methods to
use class attributes for preference and field, and removes an extraneous
default parameter.
  • Loading branch information
tienne-B committed Dec 4, 2020
1 parent c0e070d commit f9d4cfc
Show file tree
Hide file tree
Showing 5 changed files with 57 additions and 49 deletions.
13 changes: 7 additions & 6 deletions tabbycat/standings/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,6 @@ def sort_from_rankings(self, tiebreak_func=None):

self._standings.sort(key=lambda r: tuple(r.rankings[key] for key in self.ranking_keys))

if self.rank_filter:
self._standings.sort(key=self.rank_filter, reverse=True)

self.ranked = True

def sort(self, precedence, tiebreak_func=None):
Expand Down Expand Up @@ -274,7 +271,7 @@ class BaseStandingsGenerator:

DEFAULT_OPTIONS = {
"tiebreak": "random",
"rank_filter": None,
"rank_filter": (None, None), # (Field name, Min value)
"include_filter": None, # not currently used by other code,
}

Expand Down Expand Up @@ -312,6 +309,9 @@ def _annotate_metrics(self, queryset, standings, round):
if self.options["include_filter"]:
standings.filter(self.options["include_filter"])

def get_rank_filter(self):
return lambda info: info.metrics[self.options["rank_filter"][0]] >= self.options["rank_filter"][1]

def generate(self, queryset, round=None):
"""Generates standings for the objects in queryset. Returns a
Standings object.
Expand All @@ -322,7 +322,8 @@ def generate(self, queryset, round=None):
(That is, rounds after `round` are excluded from the standings.)
"""

standings = Standings(queryset, rank_filter=self.options["rank_filter"])
rank_filter = self.get_rank_filter() if self.options["rank_filter"][0] is not None else None
standings = Standings(queryset, rank_filter=rank_filter)

# The original queryset might have filtered out information relevant to
# calculating the metrics (e.g., if it filters teams by participation in
Expand Down Expand Up @@ -355,7 +356,7 @@ def generate_from_queryset(self, queryset, standings, round):
aggregations present from the queryset (no repeated metrics)"""

for annotator in self.ranking_annotators:
queryset = annotator.get_annotated_queryset(queryset, self.queryset_metric_annotators)
queryset = annotator.get_annotated_queryset(queryset, self.queryset_metric_annotators, *self.options["rank_filter"])

self._annotate_metrics(queryset, standings, round)

Expand Down
7 changes: 7 additions & 0 deletions tabbycat/standings/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

import logging

from django.db.models import Case, F, When

logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -104,6 +106,11 @@ def get_annotated_queryset(self, queryset, round=None):
self.queryset_annotated = True
return queryset.annotate(**{self.key: annotation})

def get_ranking_annotation(self, min_field, min_rounds):
if min_rounds is None:
return F(self.key)
return Case(When(**{min_field + "__gte": min_rounds, "then": F(self.key)}))

def annotate_with_queryset(self, queryset, standings):
"""Annotates items with the given QuerySet."""
for item in queryset:
Expand Down
51 changes: 28 additions & 23 deletions tabbycat/standings/ranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,30 +56,32 @@ def annotate(self, standings):
"""
raise NotImplementedError("BaseRankAnnotator subclasses must implement annotate()")

def _get_ordering(self, annotators):
def _get_ordering(self, annotators, min_field, min_rounds):
ordering = []
annotations = {a.key: a for a in annotators}
for key in self.metrics:
if annotations[key].ascending:
ordering.append(F(key).asc(nulls_last=True))
annotation = annotations[key]
if annotation.ascending:
ordering.append(annotation.get_ranking_annotation(min_field, min_rounds).asc(nulls_last=True))
else:
ordering.append(F(key).desc(nulls_last=True))
ordering.append(annotation.get_ranking_annotation(min_field, min_rounds).desc(nulls_last=True))
return ordering

def get_annotated_queryset(self, queryset, annotators):
def get_annotated_queryset(self, queryset, annotators, min_field, min_rounds):
self.queryset_annotated = True
return queryset.annotate(**{
self.key : self.get_annotation(annotators),
self.key + '_tied': self.get_tied_annotation(),
self.key : self.get_annotation(annotators, min_field, min_rounds),
self.key + '_tied': self.get_tied_annotation(annotators, min_field, min_rounds),
})

def get_annotation(self):
def get_annotation(self, annotators, min_field, min_rounds):
raise NotImplementedError

def get_tied_annotation(self):
def get_tied_annotation(self, annotators, min_field, min_rounds):
annotations = {a.key: a for a in annotators}
return Window(
expression=Count('id'),
partition_by=[F(key) for key in self.metrics],
partition_by=[annotations[key].get_ranking_annotation(min_field, min_rounds) for key in self.metrics],
)

def annotate_with_queryset(self, queryset, standings):
Expand Down Expand Up @@ -112,10 +114,10 @@ def annotate(self, standings):
info.add_ranking("rank", (rank, len(group) > 1))
rank += len(group)

def get_annotation(self, annotators):
def get_annotation(self, annotators, min_field, min_rounds):
return Window(
expression=Rank(),
order_by=self._get_ordering(annotators),
order_by=self._get_ordering(annotators, min_field, min_rounds),
)


Expand Down Expand Up @@ -147,21 +149,23 @@ def __init__(self, metrics):
self.group_key = metricgetter(metrics[:1]) # don't crash if there are no metrics
self.rank_key = metricgetter(metrics[1:])

def _get_ordering(self, annotators):
def _get_ordering(self, annotators, min_field, min_rounds):
ordering = []
annotations = {a.key: a for a in annotators}
for key in self.metrics[1:]:
if annotations[key].ascending:
ordering.append(F(key).asc(nulls_last=True))
annotation = annotations[key]
if annotation.ascending:
ordering.append(annotation.get_ranking_annotation(min_field, min_rounds).asc(nulls_last=True))
else:
ordering.append(F(key).desc(nulls_last=True))
ordering.append(annotation.get_ranking_annotation(min_field, min_rounds).desc(nulls_last=True))
return ordering

def get_annotation(self, annotators):
def get_annotation(self, annotators, min_field, min_rounds):
annotations = {a.key: a for a in annotators}
return Window(
expression=Rank(),
order_by=self._get_ordering(annotators),
partition_by=[F(key) for key in self.metrics[:1]],
order_by=self._get_ordering(annotators, min_field, min_rounds),
partition_by=[annotations[key].get_ranking_annotation(min_field, min_rounds) for key in self.metrics[:1]],
)


Expand All @@ -179,15 +183,16 @@ def __init__(self, metrics):
def group_key(tsi):
return tsi.team.institution_id

def get_annotation(self, annotators):
def get_annotation(self, annotators, min_field, min_rounds):
return Window(
expression=Rank(),
order_by=self._get_ordering(annotators),
order_by=self._get_ordering(annotators, min_field, min_rounds),
partition_by=F('institution_id'),
)

def get_tied_annotation(self):
def get_tied_annotation(self, annotators, min_field, min_rounds):
annotations = {a.key: a for a in annotators}
return Window(
expression=Count('id'),
partition_by=[F('institution_id')] + [F(key) for key in self.metrics],
partition_by=[F('institution_id')] + [annotations[key].get_ranking_annotation(min_field, min_rounds) for key in self.metrics],
)
1 change: 0 additions & 1 deletion tabbycat/standings/speakers.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,6 @@ def get_annotation(self, round=None):
return Case(
When(speech_count__gt=2, then=(total - highest - lowest) / (F('speech_count') - 2)),
When(speech_count__gt=0, then=total / F('speech_count')),
default=None,
output_field=FloatField(),
)

Expand Down
34 changes: 15 additions & 19 deletions tabbycat/standings/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,8 @@ class BaseSpeakerStandingsView(BaseStandingsView):
"""Base class for views that display speaker standings."""

rankings = ('rank',)
missable_preference = None
missable_field = None

def get_standings(self):
if self.round is None:
Expand Down Expand Up @@ -261,7 +263,13 @@ def integer_score_columns(self, rounds):
return []

def get_rank_filter(self):
return None
missable = -1 if self.missable_preference is None else self.tournament.pref(self.missable_preference)
if missable < 0:
return (None, None) # no limit
total_prelim_rounds = self.tournament.round_set.filter(
stage=Round.STAGE_PRELIMINARY, seq__lte=self.round.seq).count()
minimum_needed = total_prelim_rounds - missable
return (self.missable_field, minimum_needed)

def populate_result_missing(self, standings):
for info in standings:
Expand All @@ -282,6 +290,9 @@ class BaseSubstantiveSpeakerStandingsView(BaseSpeakerStandingsView):
page_title = gettext_lazy("Speaker Standings")
page_emoji = '💯'

missable_preference = 'standings_missed_debates'
missable_field = 'count'

def get_speakers(self):
return Speaker.objects.filter(team__tournament=self.tournament)

Expand All @@ -302,15 +313,6 @@ def integer_score_columns(self, rounds):
else:
return []

def get_rank_filter(self):
missable_debates = self.tournament.pref('standings_missed_debates')
if missable_debates < 0:
return None # no limit
total_prelim_rounds = self.tournament.round_set.filter(
stage=Round.STAGE_PRELIMINARY, seq__lte=self.round.seq).count()
minimum_debates_needed = total_prelim_rounds - missable_debates
return lambda info: info.metrics["count"] >= minimum_debates_needed

def add_round_results(self, standings, rounds):
add_speaker_round_results(standings, rounds, self.tournament)
self.cast_round_results(standings, rounds, 'score_step')
Expand Down Expand Up @@ -370,6 +372,9 @@ class BaseReplyStandingsView(BaseSpeakerStandingsView):
page_title = gettext_lazy("Reply Speaker Standings")
page_emoji = '💁'

missable_preference = 'standings_missed_replies'
missable_field = 'replies_count'

def get_speakers(self):
if self.tournament.reply_position is None:
raise StandingsError(_("Reply speeches aren't enabled in this tournament."))
Expand All @@ -381,15 +386,6 @@ def get_speakers(self):
def get_metrics(self):
return ('replies_avg',), ('replies_stddev', 'replies_count')

def get_rank_filter(self):
missable_replies = self.tournament.pref('standings_missed_replies')
if missable_replies < 0:
return None # no limit
total_prelim_rounds = self.tournament.round_set.filter(
stage=Round.STAGE_PRELIMINARY, seq__lte=self.round.seq).count()
minimum_replies_needed = total_prelim_rounds - missable_replies
return lambda info: info.metrics["replies_count"] >= minimum_replies_needed

def add_round_results(self, standings, rounds):
add_speaker_round_results(standings, rounds, self.tournament, replies=True)
self.cast_round_results(standings, rounds, 'reply_score_step')
Expand Down

0 comments on commit f9d4cfc

Please sign in to comment.