-
Notifications
You must be signed in to change notification settings - Fork 378
/
utils.py
1220 lines (1016 loc) · 47.3 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright The IETF Trust 2011-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
import hashlib
import io
import json
import math
import os
import re
import textwrap
from collections import defaultdict, namedtuple, Counter
from typing import Union
from zoneinfo import ZoneInfo
from django.conf import settings
from django.contrib import messages
from django.forms import ValidationError
from django.http import Http404
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.html import escape
from django.urls import reverse as urlreverse
from django_stubs_ext import QuerySetAny
import debug # pyflakes:ignore
from ietf.community.models import CommunityList
from ietf.community.utils import docs_tracked_by_community_list
from ietf.doc.models import Document, DocHistory, State, DocumentAuthor, DocHistoryAuthor
from ietf.doc.models import DocAlias, RelatedDocument, RelatedDocHistory, BallotType, DocReminder
from ietf.doc.models import DocEvent, ConsensusDocEvent, BallotDocEvent, IRSGBallotDocEvent, NewRevisionDocEvent, StateDocEvent
from ietf.doc.models import TelechatDocEvent, DocumentActionHolder, EditedAuthorsDocEvent
from ietf.name.models import DocReminderTypeName, DocRelationshipName
from ietf.group.models import Role, Group, GroupFeatures
from ietf.ietfauth.utils import has_role, is_authorized_in_doc_stream, is_individual_draft_author, is_bofreq_editor
from ietf.person.models import Person
from ietf.review.models import ReviewWish
from ietf.utils import draft, log
from ietf.utils.mail import send_mail
from ietf.mailtrigger.utils import gather_address_lists
from ietf.utils.timezone import date_today, datetime_from_date, datetime_today, DEADLINE_TZINFO
from ietf.utils.xmldraft import XMLDraft
def save_document_in_history(doc):
"""Save a snapshot of document and related objects in the database."""
def get_model_fields_as_dict(obj):
return dict((field.name, getattr(obj, field.name))
for field in obj._meta.fields
if field is not obj._meta.pk)
# copy fields
fields = get_model_fields_as_dict(doc)
fields["doc"] = doc
fields["name"] = doc.canonical_name()
dochist = DocHistory(**fields)
dochist.save()
# copy many to many
for field in doc._meta.many_to_many:
if field.remote_field.through and field.remote_field.through._meta.auto_created:
hist_field = getattr(dochist, field.name)
hist_field.clear()
hist_field.set(getattr(doc, field.name).all())
# copy remaining tricky many to many
def transfer_fields(obj, HistModel):
mfields = get_model_fields_as_dict(item)
# map doc -> dochist
for k, v in mfields.items():
if v == doc:
mfields[k] = dochist
HistModel.objects.create(**mfields)
for item in RelatedDocument.objects.filter(source=doc):
transfer_fields(item, RelatedDocHistory)
for item in DocumentAuthor.objects.filter(document=doc):
transfer_fields(item, DocHistoryAuthor)
return dochist
def get_state_types(doc):
res = []
if not doc:
return res
res.append(doc.type_id)
if doc.type_id == "draft":
if doc.stream_id and doc.stream_id != "legacy":
res.append("draft-stream-%s" % doc.stream_id)
res.append("draft-iesg")
res.append("draft-iana-review")
res.append("draft-iana-action")
res.append("draft-rfceditor")
return res
def get_tags_for_stream_id(stream_id):
if stream_id == "ietf":
return ["w-expert", "w-extern", "w-merge", "need-aut", "w-refdoc", "w-refing", "rev-wg", "rev-wglc", "rev-ad", "rev-iesg", "sheph-u", "no-adopt", "other"]
elif stream_id == "iab":
return ["need-ed", "w-part", "w-review", "need-rev", "sh-f-up"]
elif stream_id == "irtf":
return ["need-ed", "need-sh", "w-dep", "need-rev", "iesg-com"]
elif stream_id == "ise":
return ["w-dep", "w-review", "need-rev", "iesg-com"]
else:
return []
def can_adopt_draft(user, doc):
"""Answers whether a user can adopt a given draft into some stream/group.
This does not answer, even by implicaiton, which streams/groups the user has authority to adopt into."""
if not user.is_authenticated:
return False
if has_role(user, "Secretariat"):
return True
#The IRTF chair can adopt a draft into any RG
if has_role(user, "IRTF Chair"):
return (doc.stream_id in (None, "irtf")
and doc.group.type_id == "individ")
for type_id, allowed_stream in (
("wg", "ietf"),
("rg", "irtf"),
("ag", "ietf"),
("rag", "irtf"),
("edwg", "editorial"),
):
if doc.stream_id in (None, allowed_stream):
if doc.group.type_id in ("individ", type_id):
if Role.objects.filter(
name__in=GroupFeatures.objects.get(type_id=type_id).docman_roles,
group__type_id = type_id,
group__state = "active",
person__user = user,
).exists():
return True
return False
def can_unadopt_draft(user, doc):
# TODO: This should use docman_roles, and this implementation probably returns wrong answers
# For instance, should any WG chair be able to unadopt a group from any other WG
if not user.is_authenticated:
return False
if has_role(user, "Secretariat"):
return True
if doc.stream_id == 'irtf':
if has_role(user, "IRTF Chair"):
return True
return user.person.role_set.filter(name__in=('chair','delegate','secr'),group=doc.group).exists()
elif doc.stream_id == 'ietf':
return user.person.role_set.filter(name__in=('chair','delegate','secr'),group=doc.group).exists()
elif doc.stream_id == 'ise':
return user.person.role_set.filter(name='chair',group__acronym='ise').exists()
elif doc.stream_id == 'iab':
return False # Right now only the secretariat can add a document to the IAB stream, so we'll
# leave it where only the secretariat can take it out.
elif doc.stream_id == 'editorial':
return user.person.role_set.filter(name='chair', group__acronym='rswg').exists()
else:
return False
def can_edit_docextresources(user, doc):
return (has_role(user, ("Secretariat", "Area Director"))
or is_authorized_in_doc_stream(user, doc)
or is_individual_draft_author(user, doc)
or is_bofreq_editor(user, doc))
def two_thirds_rule( recused=0 ):
# For standards-track, need positions from 2/3 of the non-recused current IESG.
active = Role.objects.filter(name="ad",group__type="area",group__state="active").count()
return int(math.ceil((active - recused) * 2.0/3.0))
def needed_ballot_positions(doc, active_positions):
'''Returns text answering the question "what does this document
need to pass?". The return value is only useful if the document
is currently in IESG evaluation.'''
yes = [p for p in active_positions if p and p.pos_id == "yes"]
noobj = [p for p in active_positions if p and p.pos_id == "noobj"]
blocking = [p for p in active_positions if p and p.pos.blocking]
recuse = [p for p in active_positions if p and p.pos_id == "recuse"]
answer = []
if len(yes) < 1:
answer.append("Needs a YES.")
if blocking:
if len(blocking) == 1:
answer.append("Has a %s." % blocking[0].pos.name.upper())
else:
if blocking[0].pos.name.upper().endswith('S'):
answer.append("Has %d %ses." % (len(blocking), blocking[0].pos.name.upper()))
else:
answer.append("Has %d %ss." % (len(blocking), blocking[0].pos.name.upper()))
needed = 1
if doc.type_id == "draft" and doc.intended_std_level_id in ("bcp", "ps", "ds", "std"):
needed = two_thirds_rule(recused=len(recuse))
elif doc.type_id == "statchg":
if isinstance(doc,Document):
related_set = doc.relateddocument_set
elif isinstance(doc,DocHistory):
related_set = doc.relateddochistory_set
else:
related_set = RelatedDocHistory.objects.none()
for rel in related_set.filter(relationship__slug__in=['tops', 'tois', 'tohist', 'toinf', 'tobcp', 'toexp']):
if (rel.target.document.std_level_id in ['bcp','ps','ds','std']) or (rel.relationship_id in ['tops','tois','tobcp']):
needed = two_thirds_rule(recused=len(recuse))
break
else:
if len(yes) < 1:
return " ".join(answer)
have = len(yes) + len(noobj)
if have < needed:
more = needed - have
if more == 1:
answer.append("Needs one more YES or NO OBJECTION position to pass.")
else:
answer.append("Needs %d more YES or NO OBJECTION positions to pass." % more)
else:
if blocking:
answer.append("Has enough positions to pass once %s positions are resolved." % blocking[0].pos.name.upper())
else:
answer.append("Has enough positions to pass.")
return " ".join(answer)
def irsg_needed_ballot_positions(doc, active_positions):
'''Returns text answering the question "what does this document
need to pass?". The return value is only useful if the document
is currently in IRSG evaluation.'''
yes = [p for p in active_positions if p and p.pos_id == "yes"]
needmoretime = [p for p in active_positions if p and p.pos_id == "moretime"]
notready = [p for p in active_positions if p and p.pos_id == "notready"]
answer = []
needed = 2
have = len(yes)
if len(notready) > 0:
answer.append("Has a Not Ready position.")
if have < needed:
more = needed - have
if more == 1:
answer.append("Needs one more YES position to pass.")
else:
answer.append("Needs %d more YES positions to pass." % more)
else:
answer.append("Has enough positions to pass.")
if len(needmoretime) > 0:
answer.append("Has a Need More Time position.")
return " ".join(answer)
def rsab_needed_ballot_positions(doc, active_positions):
count = Counter([p.pos_id if p else 'none' for p in active_positions])
answer = []
if count["concern"] > 0:
answer.append("Has a Concern position.")
# note RFC9280 section 3.2.2 item 12
# the "vote" mentioned there is a separate thing from ballot position.
if count["yes"] == 0:
# This is _implied_ by 9280 - a document shouldn't be
# approved if all RSAB members recuse
answer.append("Needs a YES position.")
if count["none"] > 0:
answer.append("Some members have have not taken a position.")
return " ".join(answer)
def create_ballot(request, doc, by, ballot_slug, time=None):
closed = close_open_ballots(doc, by)
for e in closed:
messages.warning(request, "Closed earlier open ballot created %s on '%s' for %s" % (e.time.strftime('%Y-%m-%d %H:%M'), e.ballot_type, e.doc.name, ))
if time:
e = BallotDocEvent(type="created_ballot", by=by, doc=doc, rev=doc.rev, time=time)
else:
e = BallotDocEvent(type="created_ballot", by=by, doc=doc, rev=doc.rev)
e.ballot_type = BallotType.objects.get(doc_type=doc.type, slug=ballot_slug)
e.desc = 'Created "%s" ballot' % e.ballot_type.name
e.save()
def create_ballot_if_not_open(request, doc, by, ballot_slug, time=None, duedate=None):
ballot_type = BallotType.objects.get(doc_type=doc.type, slug=ballot_slug)
if not doc.ballot_open(ballot_slug):
kwargs = dict(type="created_ballot", by=by, doc=doc, rev=doc.rev)
if time:
kwargs['time'] = time
if doc.stream_id == 'irtf':
kwargs['duedate'] = duedate
e = IRSGBallotDocEvent(**kwargs)
else:
e = BallotDocEvent(**kwargs)
e.ballot_type = ballot_type
e.desc = 'Created "%s" ballot' % e.ballot_type.name
e.save()
return e
else:
if request:
messages.warning(request, "There already exists an open '%s' ballot for %s. No new ballot created." % (ballot_type, doc.name))
return None
def close_ballot(doc, by, ballot_slug):
b = doc.ballot_open(ballot_slug)
if b:
e = BallotDocEvent(type="closed_ballot", doc=doc, rev=doc.rev, by=by)
e.ballot_type = BallotType.objects.get(doc_type=doc.type,slug=ballot_slug)
e.desc = 'Closed "%s" ballot' % e.ballot_type.name
e.save()
return b
def close_open_ballots(doc, by):
closed = []
for t in BallotType.objects.filter(doc_type=doc.type_id):
e = close_ballot(doc, by, t.slug )
if e:
closed.append(e)
return closed
def get_chartering_type(doc):
chartering = ""
if doc.get_state_slug() not in ("notrev", "approved"):
if doc.group.state_id in ("proposed", "bof"):
chartering = "initial"
elif doc.group.state_id == "active":
chartering = "rechartering"
return chartering
def augment_events_with_revision(doc, events):
"""Take a set of events for doc and add a .rev attribute with the
revision they refer to by checking NewRevisionDocEvents."""
if isinstance(events, QuerySetAny):
qs = events.filter(newrevisiondocevent__isnull=False)
else:
qs = NewRevisionDocEvent.objects.filter(doc=doc)
event_revisions = list(qs.order_by('time', 'id').values('id', 'rev', 'time'))
if doc.type_id == "draft" and doc.get_state_slug() == "rfc":
# add fake "RFC" revision
if isinstance(events, QuerySetAny):
e = events.filter(type="published_rfc").order_by('time').last()
else:
e = doc.latest_event(type="published_rfc")
if e:
event_revisions.append(dict(id=e.id, time=e.time, rev="RFC"))
event_revisions.sort(key=lambda x: (x["time"], x["id"]))
for e in sorted(events, key=lambda e: (e.time, e.id), reverse=True):
while event_revisions and (e.time, e.id) < (event_revisions[-1]["time"], event_revisions[-1]["id"]):
event_revisions.pop()
# Check for all subtypes which have 'rev' fields:
for sub in ['newrevisiondocevent', 'submissiondocevent', ]:
if hasattr(e, sub):
e = getattr(e, sub)
break
if not hasattr(e, 'rev'):
if event_revisions:
cur_rev = event_revisions[-1]["rev"]
else:
cur_rev = "00"
e.rev = cur_rev
def add_events_message_info(events):
for e in events:
if not e.type == "added_message":
continue
e.message = e.addedmessageevent.message
e.msgtype = e.addedmessageevent.msgtype
e.in_reply_to = e.addedmessageevent.in_reply_to
def get_unicode_document_content(key, filename, codec='utf-8', errors='ignore'):
try:
with io.open(filename, 'rb') as f:
raw_content = f.read().decode(codec,errors)
except IOError:
if settings.DEBUG:
error = "Error; cannot read ("+filename+")"
else:
error = "Error; cannot read ("+key+")"
return error
return raw_content
def tags_suffix(tags):
return ("::" + "::".join(t.name for t in tags)) if tags else ""
def add_state_change_event(doc, by, prev_state, new_state, prev_tags=None, new_tags=None, timestamp=None):
"""Add doc event to explain that state change just happened."""
if prev_state and new_state:
assert prev_state.type_id == new_state.type_id
# convert default args to empty lists
prev_tags = prev_tags or []
new_tags = new_tags or []
if prev_state == new_state and set(prev_tags) == set(new_tags):
return None
e = StateDocEvent(doc=doc, rev=doc.rev, by=by)
e.type = "changed_state"
e.state_type = (prev_state or new_state).type
e.state = new_state
e.desc = "%s changed to <b>%s</b>" % (e.state_type.label, new_state.name + tags_suffix(new_tags))
if prev_state:
e.desc += " from %s" % (prev_state.name + tags_suffix(prev_tags))
if timestamp:
e.time = timestamp
e.save()
return e
def add_action_holder_change_event(doc, by, prev_set, reason=None):
set_changed = False
if doc.documentactionholder_set.exclude(person__in=prev_set).exists():
set_changed = True # doc has an action holder not in the old set
# If set_changed is still False, then all of the current action holders were in
# prev_set. Either the sets are the same or the prev_set contains at least one
# Person not in the current set, so just check length.
if doc.documentactionholder_set.count() != len(prev_set):
set_changed = True
if not set_changed:
return None
if doc.action_holders.exists():
ah_names = [person.plain_name() for person in doc.action_holders.all()]
description = 'Changed action holders to %s' % ', '.join(ah_names)
else:
description = 'Removed all action holders'
if reason:
description += ' (%s)' % reason
return DocEvent.objects.create(
type='changed_action_holders',
doc=doc,
by=by,
rev=doc.rev,
desc=description,
)
def update_action_holders(doc, prev_state=None, new_state=None, prev_tags=None, new_tags=None):
"""Update the action holders for doc based on state transition
Returns an event describing the change which should be passed to doc.save_with_history()
Only cares about draft-iesg state changes. Places where other state types are updated
may not call this method. If you add rules for updating action holders on other state
types, be sure this is called in the places that change that state.
"""
# Should not call this with different state types
if prev_state and new_state:
assert prev_state.type_id == new_state.type_id
# Convert tags to sets of slugs
prev_tag_slugs = {t.slug for t in (prev_tags or [])}
new_tag_slugs = {t.slug for t in (new_tags or [])}
# Do nothing if state / tag have not changed
if (prev_state == new_state) and (prev_tag_slugs == new_tag_slugs):
return None
# Remember original list of action holders to later check if it changed
prev_set = list(doc.action_holders.all())
# Only draft-iesg states are of interest (for now)
if (prev_state != new_state) and (getattr(new_state, 'type_id') == 'draft-iesg'):
# Clear the action_holders list on a state change. This will reset the age of any that get added back.
doc.action_holders.clear()
if doc.ad and new_state.slug not in DocumentActionHolder.CLEAR_ACTION_HOLDERS_STATES:
# Default to responsible AD for states other than these
doc.action_holders.add(doc.ad)
if prev_tag_slugs != new_tag_slugs:
# If we have added or removed the need-rev tag, add or remove authors as action holders
if ('need-rev' in prev_tag_slugs) and ('need-rev' not in new_tag_slugs):
# Removed the 'need-rev' tag - drop authors from the action holders list
DocumentActionHolder.objects.filter(document=doc, person__in=doc.authors()).delete()
elif ('need-rev' not in prev_tag_slugs) and ('need-rev' in new_tag_slugs):
# Added the 'need-rev' tag - add authors to the action holders list
for auth in doc.authors():
if not doc.action_holders.filter(pk=auth.pk).exists():
doc.action_holders.add(auth)
# Now create an event if we changed the set
return add_action_holder_change_event(
doc,
Person.objects.get(name='(System)'),
prev_set,
reason='IESG state changed',
)
def update_documentauthors(doc, new_docauthors, by=None, basis=None):
"""Update the list of authors for a document
Returns an iterable of events describing the change. These must be saved by the caller if
they are to be kept.
The new_docauthors argument should be an iterable containing objects that
have person, email, affiliation, and country attributes. An easy way to create
these objects is to use DocumentAuthor(), but e.g., a named tuple could be
used. These objects will not be saved, their attributes will be used to create new
DocumentAuthor instances. (The document and order fields will be ignored.)
"""
def _change_field_and_describe(auth, field, newval):
# make the change
oldval = getattr(auth, field)
setattr(auth, field, newval)
was_empty = oldval is None or len(str(oldval)) == 0
now_empty = newval is None or len(str(newval)) == 0
# describe the change
if oldval == newval:
return None
else:
if was_empty and not now_empty:
return 'set {field} to "{new}"'.format(field=field, new=newval)
elif now_empty and not was_empty:
return 'cleared {field} (was "{old}")'.format(field=field, old=oldval)
else:
return 'changed {field} from "{old}" to "{new}"'.format(
field=field, old=oldval, new=newval
)
persons = []
changes = [] # list of change descriptions
for order, docauthor in enumerate(new_docauthors):
# If an existing DocumentAuthor matches, use that
auth = doc.documentauthor_set.filter(person=docauthor.person).first()
is_new_auth = auth is None
if is_new_auth:
# None exists, so create a new one (do not just use docauthor here because that
# will modify the input and might cause side effects)
auth = DocumentAuthor(document=doc, person=docauthor.person)
changes.append('Added "{name}" as author'.format(name=auth.person.name))
author_changes = []
# Now fill in other author details
author_changes.append(_change_field_and_describe(auth, 'email', docauthor.email))
author_changes.append(_change_field_and_describe(auth, 'affiliation', docauthor.affiliation or ''))
author_changes.append(_change_field_and_describe(auth, 'country', docauthor.country or ''))
author_changes.append(_change_field_and_describe(auth, 'order', order + 1))
auth.save()
log.assertion('auth.email_id != "none"')
persons.append(docauthor.person)
if not is_new_auth:
all_author_changes = ', '.join([ch for ch in author_changes if ch is not None])
if len(all_author_changes) > 0:
changes.append('Changed author "{name}": {changes}'.format(
name=auth.person.name, changes=all_author_changes
))
# Finally, remove any authors no longer in the list
removed_authors = doc.documentauthor_set.exclude(person__in=persons)
changes.extend(['Removed "{name}" as author'.format(name=auth.person.name)
for auth in removed_authors])
removed_authors.delete()
# Create change events - one event per author added/changed/removed.
# Caller must save these if they want them persisted.
return [
EditedAuthorsDocEvent(
type='edited_authors', by=by, doc=doc, rev=doc.rev, desc=change, basis=basis
) for change in changes
]
def update_reminder(doc, reminder_type_slug, event, due_date):
reminder_type = DocReminderTypeName.objects.get(slug=reminder_type_slug)
try:
reminder = DocReminder.objects.get(event__doc=doc, type=reminder_type, active=True)
except DocReminder.DoesNotExist:
reminder = None
if due_date:
# activate/update reminder
if not reminder:
reminder = DocReminder(type=reminder_type)
reminder.event = event
reminder.due = due_date
reminder.active = True
reminder.save()
else:
# deactivate reminder
if reminder:
reminder.active = False
reminder.save()
def prettify_std_name(n, spacing=" "):
if re.match(r"(rfc|bcp|fyi|std)[0-9]+", n):
return n[:3].upper() + spacing + n[3:]
else:
return n
def default_consensus(doc):
# if someone edits the consensus return that, otherwise
# ietf stream => true and irtf stream => false
consensus = None
e = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
if (e):
return e.consensus
if doc.stream_id == "ietf":
consensus = True
elif doc.stream_id == "irtf":
consensus = False
else: # ise, iab, legacy
return consensus
def nice_consensus(consensus):
mapping = {
None: "Unknown",
True: "Yes",
False: "No"
}
return mapping[consensus]
def has_same_ballot(doc, date1, date2=None):
""" Test if the most recent ballot created before the end of date1
is the same as the most recent ballot created before the
end of date 2. """
datetime1 = datetime_from_date(date1, DEADLINE_TZINFO)
if date2 is None:
datetime2 = datetime_today(DEADLINE_TZINFO)
else:
datetime2 = datetime_from_date(date2, DEADLINE_TZINFO)
ballot1 = doc.latest_event(
BallotDocEvent,
type='created_ballot',
time__lt=datetime1 + datetime.timedelta(days=1),
)
ballot2 = doc.latest_event(
BallotDocEvent,
type='created_ballot',
time__lt=datetime2 + datetime.timedelta(days=1),
)
return ballot1 == ballot2
def make_notify_changed_event(request, doc, by, new_notify, time=None):
# FOR REVIEW: This preserves the behavior from when
# drafts and charters had separate edit_notify
# functions. If it should be unified, there should
# also be a migration function cause historic
# events to match
if doc.type.slug=='charter':
event_type = 'changed_document'
else:
event_type = 'added_comment'
e = DocEvent(type=event_type, doc=doc, rev=doc.rev, by=by)
e.desc = "Notification list changed to %s" % (escape(new_notify) or "none")
if doc.notify:
e.desc += " from %s" % escape(doc.notify)
if time:
e.time = time
e.save()
return e
def update_telechat(request, doc, by, new_telechat_date, new_returning_item=None):
on_agenda = bool(new_telechat_date)
prev = doc.latest_event(TelechatDocEvent, type="scheduled_for_telechat")
prev_returning = bool(prev and prev.returning_item)
prev_telechat = prev.telechat_date if prev else None
prev_agenda = bool(prev_telechat)
if new_returning_item == None:
returning = prev_returning
else:
returning = new_returning_item
if returning == prev_returning and new_telechat_date == prev_telechat:
# fully updated, nothing to do
return
# auto-set returning item _ONLY_ if the caller did not provide a value
if ( new_returning_item != None
and on_agenda
and prev_agenda
and new_telechat_date != prev_telechat
and prev_telechat < date_today(DEADLINE_TZINFO)
and has_same_ballot(doc,prev.telechat_date)
):
returning = True
e = TelechatDocEvent()
e.type = "scheduled_for_telechat"
e.by = by
e.doc = doc
e.rev = doc.rev
e.returning_item = returning
e.telechat_date = new_telechat_date
if on_agenda != prev_agenda:
if on_agenda:
e.desc = "Placed on agenda for telechat - %s" % (new_telechat_date)
else:
e.desc = "Removed from agenda for telechat"
elif on_agenda and new_telechat_date != prev_telechat:
e.desc = "Telechat date has been changed to <b>%s</b> from <b>%s</b>" % (
new_telechat_date, prev_telechat)
else:
# we didn't reschedule but flipped returning item bit - let's
# just explain that
if returning:
e.desc = "Set telechat returning item indication"
else:
e.desc = "Removed telechat returning item indication"
e.save()
has_short_fuse = doc.type_id=='draft' and new_telechat_date and (( new_telechat_date - date_today() ) < datetime.timedelta(days=13))
from ietf.doc.mails import email_update_telechat
if has_short_fuse:
email_update_telechat(request, doc, e.desc+"\n\nWARNING: This may not leave enough time for directorate reviews!\n")
else:
email_update_telechat(request, doc, e.desc)
return e
def rebuild_reference_relations(doc, filenames):
"""Rebuild reference relations for a document
filenames should be a dict mapping file ext (i.e., type) to the full path of each file.
"""
if doc.type.slug != 'draft':
return None
# try XML first
if 'xml' in filenames:
refs = XMLDraft(filenames['xml']).get_refs()
elif 'txt' in filenames:
filename = filenames['txt']
try:
refs = draft.PlaintextDraft.from_file(filename).get_refs()
except IOError as e:
return { 'errors': ["%s :%s" % (e.strerror, filename)] }
else:
return {'errors': ['No Internet-Draft text available for rebuilding reference relations. Need XML or plaintext.']}
doc.relateddocument_set.filter(relationship__slug__in=['refnorm','refinfo','refold','refunk']).delete()
warnings = []
errors = []
unfound = set()
for ( ref, refType ) in refs.items():
refdoc = DocAlias.objects.filter(name=ref)
if not refdoc and re.match(r"^draft-.*-\d{2}$", ref):
refdoc = DocAlias.objects.filter(name=ref[:-3])
count = refdoc.count()
# As of Dec 2021, DocAlias has a unique constraint on the name field, so count > 1 should not occur
if count == 0:
unfound.add( "%s" % ref )
continue
elif count > 1:
errors.append("Too many DocAlias objects found for %s"%ref)
else:
# Don't add references to ourself
if doc != refdoc[0].document:
RelatedDocument.objects.get_or_create( source=doc, target=refdoc[ 0 ], relationship=DocRelationshipName.objects.get( slug='ref%s' % refType ) )
if unfound:
warnings.append('There were %d references with no matching DocAlias'%len(unfound))
ret = {}
if errors:
ret['errors']=errors
if warnings:
ret['warnings']=warnings
if unfound:
ret['unfound']=list(unfound)
return ret
def set_replaces_for_document(request, doc, new_replaces, by, email_subject, comment=""):
addrs = gather_address_lists('doc_replacement_changed',doc=doc)
to = set(addrs.to)
cc = set(addrs.cc)
relationship = DocRelationshipName.objects.get(slug='replaces')
old_replaces = doc.related_that_doc("replaces")
events = []
e = DocEvent(doc=doc, rev=doc.rev, by=by, type='changed_document')
new_replaces_names = ", ".join(d.name for d in new_replaces) or "None"
old_replaces_names = ", ".join(d.name for d in old_replaces) or "None"
e.desc = "This document now replaces <b>%s</b> instead of %s" % (new_replaces_names, old_replaces_names)
e.save()
events.append(e)
if comment:
events.append(DocEvent.objects.create(doc=doc, rev=doc.rev, by=by, type="added_comment", desc=comment))
for d in old_replaces:
if d not in new_replaces:
other_addrs = gather_address_lists('doc_replacement_changed',doc=d.document)
to.update(other_addrs.to)
cc.update(other_addrs.cc)
RelatedDocument.objects.filter(source=doc, target=d, relationship=relationship).delete()
if not RelatedDocument.objects.filter(target=d, relationship=relationship):
s = 'active' if d.document.expires > timezone.now() else 'expired'
d.document.set_state(State.objects.get(type='draft', slug=s))
for d in new_replaces:
if d not in old_replaces:
other_addrs = gather_address_lists('doc_replacement_changed',doc=d.document)
to.update(other_addrs.to)
cc.update(other_addrs.cc)
RelatedDocument.objects.create(source=doc, target=d, relationship=relationship)
d.document.set_state(State.objects.get(type='draft', slug='repl'))
if d.document.stream_id in ('irtf','ise','iab'):
repl_state = State.objects.get(type_id='draft-stream-%s'%d.document.stream_id, slug='repl')
d.document.set_state(repl_state)
events.append(StateDocEvent.objects.create(doc=d.document, rev=d.document.rev, by=by, type='changed_state', desc="Set stream state to Replaced",state_type=repl_state.type, state=repl_state))
# make sure there are no lingering suggestions duplicating new replacements
RelatedDocument.objects.filter(source=doc, target__in=new_replaces, relationship="possibly-replaces").delete()
email_desc = e.desc.replace(", ", "\n ")
if comment:
email_desc += "\n" + comment
from ietf.doc.mails import html_to_text
send_mail(request, list(to),
"DraftTracker Mail System <[email protected]>",
email_subject,
"doc/mail/change_notice.txt",
dict(text=html_to_text(email_desc),
doc=doc,
url=settings.IDTRACKER_BASE_URL + doc.get_absolute_url()),
cc=list(cc))
return events
def check_common_doc_name_rules(name):
"""Check common rules for document names for use in forms, throws
ValidationError in case there's a problem."""
errors = []
if re.search("[^a-z0-9-]", name):
errors.append("The name may only contain digits, lowercase letters and dashes.")
if re.search("--", name):
errors.append("Please do not put more than one hyphen between any two words in the name.")
if re.search("-[0-9]{2}$", name):
errors.append("This name looks like ends in a version number. -00 will be added automatically. Please adjust the end of the name.")
if errors:
raise ValidationError(errors)
def get_initial_notify(doc,extra=None):
# With the mailtrigger based changes, a document's notify should start empty
receivers = []
if extra:
if isinstance(extra, str):
extra = extra.split(', ')
receivers.extend(extra)
return ", ".join(set([x.strip() for x in receivers]))
def uppercase_std_abbreviated_name(name):
if re.match('(rfc|bcp|std|fyi) ?[0-9]+$', name):
return name.upper()
else:
return name
def extract_complete_replaces_ancestor_mapping_for_docs(names):
"""Return dict mapping all replaced by relationships of the
replacement ancestors to docs. So if x is directly replaced by y
and y is in names or replaced by something in names, x in
replaces[y]."""
replaces = defaultdict(set)
checked = set()
front = names
while True:
if not front:
break
relations = ( RelatedDocument.objects.filter(source__name__in=front, relationship="replaces")
.select_related("target").values_list("source__name", "target__docs__name") )
if not relations:
break
checked.update(front)
front = []
for source_doc, target_doc in relations:
replaces[source_doc].add(target_doc)
if target_doc not in checked:
front.append(target_doc)
return replaces
def make_rev_history(doc):
# return document history data for inclusion in doc.json (used by timeline)
def get_predecessors(doc, predecessors=None):
if predecessors is None:
predecessors = []
if hasattr(doc, 'relateddocument_set'):
for alias in doc.related_that_doc('replaces'):
for document in alias.docs.all():
if document not in predecessors:
predecessors.append(document)
predecessors.extend(get_predecessors(document, predecessors))
return predecessors
def get_ancestors(doc, ancestors = None):
if ancestors is None:
ancestors = []
if hasattr(doc, 'relateddocument_set'):
for alias in doc.related_that('replaces'):
for document in alias.docs.all():
if document not in ancestors:
ancestors.append(document)
ancestors.extend(get_ancestors(document, ancestors))
return ancestors
def get_replaces_tree(doc):
tree = get_predecessors(doc)
tree.extend(get_ancestors(doc))
return tree
history = {}
docs = get_replaces_tree(doc)
if docs is not None:
docs.append(doc)
for d in docs:
for e in d.docevent_set.filter(type='new_revision').distinct():
if hasattr(e, 'newrevisiondocevent'):
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=d)) + e.newrevisiondocevent.rev + "/"
history[url] = {
'name': d.name,
'rev': e.newrevisiondocevent.rev,
'published': e.time.isoformat(),
'url': url,
}
if d.history_set.filter(rev=e.newrevisiondocevent.rev).exists():
history[url]['pages'] = d.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages
if doc.type_id == "draft":
# e.time.date() agrees with RPC publication date when shown in the RPC_TZINFO time zone
e = doc.latest_event(type='published_rfc')
else:
e = doc.latest_event(type='iesg_approved')
if e:
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=e.doc))
history[url] = {
'name': e.doc.canonical_name(),
'rev': e.doc.canonical_name(),
'published': e.time.isoformat(),
'url': url
}
if hasattr(e, 'newrevisiondocevent') and doc.history_set.filter(rev=e.newrevisiondocevent.rev).exists():
history[url]['pages'] = doc.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages
history = list(history.values())
return sorted(history, key=lambda x: x['published'])
def get_search_cache_key(params):
from ietf.doc.views_search import SearchForm
fields = set(SearchForm.base_fields) - set(['sort',])
kwargs = dict([ (k,v) for (k,v) in list(params.items()) if k in fields ])
key = "doc:document:search:" + hashlib.sha512(json.dumps(kwargs, sort_keys=True).encode('utf-8')).hexdigest()
return key