-
Notifications
You must be signed in to change notification settings - Fork 116
/
losalamosmtgs.py
executable file
·1772 lines (1490 loc) · 68.3 KB
/
losalamosmtgs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
# Scrape the Los Alamos meetings page to be alerted to what's on
# the agenda at upcoming meetings.
# Make it available via RSS.
# Suggestion: run this script via crontab:
# Use crontab -e to add a line like:
# 45 15 * * * python3 /path/tp/htdocs/losalamosmtgs.py > /path/to/htdocs/los-alamos-meetings/LOG 2>&1
import requests
from bs4 import BeautifulSoup, NavigableString
import datetime
import time
from urllib.parse import urljoin
import io
import string
import subprocess
import tempfile
import json
import re
import os, sys
from lxml.html.diff import htmldiff
from urllib3.exceptions import ReadTimeoutError
from math import isclose
# Try to use PyMuPDF if available.
# For some inexplicable reason the package PyMuPDF is imported as "fitz".
# But it doesn't seem to work as well as pdftohtml anyway.
# try:
# import fitz
# except:
# print("No PyMuPDF installed, using pdftohtml")
########## CONFIGURATION ##############
# You can also pass in RSS_URL RSS_DIR as two optional arguments
# Where to start: the public legistar meeting list
MEETING_LIST_URL = "https://losalamos.legistar.com/Calendar.aspx"
# The place where the RSS will be hosted. Must end with a slash.
# The RSS file will be this/index.rss.
RSS_URL = "https://localhost/los-alamos-meetings/"
# Where to put the generated RSS file. Customize this for your website.
RSS_DIR = os.path.expanduser("~/web/los-alamos-meetings")
if not os.path.exists(RSS_DIR):
os.makedirs(RSS_DIR)
# Directory to store long-term records (CSV) of agenda items vs. dates.
# If None, long-term records will not be stored.
AGENDA_ITEM_STORE = os.path.join(RSS_DIR, "AgendaItems")
# Legal Notices published on the paper of record, the LA Daily Post
LEGALURL = 'https://ladailypost.com/legal-notices/'
######## END CONFIGURATION ############
RECORDS_FILEBASE = 'meeting-records.html'
LEGAL_JSON = 'legal-notices.json'
# Make a timezone-aware datetime for now:
now = datetime.datetime.now().astimezone()
localtz = now.tzinfo
today = now.date()
todaystr = today.strftime("%Y-%m-%d")
# and a UTC version
utcnow = datetime.datetime.now(tz=datetime.timezone.utc)
# Format for dates in RSS:
# This has to be GMT, not %Z, because datetime.strptime just
# throws away any %Z info anyway rather than parsing it.
# Better to get an error if we see any time that's not GMT.
RSS_DATE_FORMAT = "%a, %d %b %Y %H:%M GMT"
rssnow = utcnow.strftime(RSS_DATE_FORMAT)
# Needed to deal with meetings that don't list a time:
NO_TIME = "NO TIME"
# Something guaranteed to be before any meetings.
# Can't use datetime.datetime.min because it needs to be tz-aware.
EARLY_DATETIME = datetime.datetime(1970, 1, 1).astimezone()
EARLY_DATE = EARLY_DATETIME.date()
Verbose = True
# and save the timezone
localtz = now.tzinfo
# Match the date format used in the tickler, e.g. Tue Feb 28
TICKLER_HDR_DATE_PAT = re.compile("[MTWFS][a-z]{2} [A-Z][a-z]{2} [0-9]{1,2}")
# Items are introduced with a "file number" which can help separate them
# in what otherwise looks like a sea of text.
FILENO_PAT = re.compile('[A-Z0-9]{2,}-[A-Z0-9]{2,}')
# What to search for when linkifying.
# Note: this doesn't always get the whole link, but that's not the fault
# of LINK_PAT; it's that sometimes the PDFs split the URL across
# several lines.
LINK_PAT = re.compile('^https://')
# A few other patterns that are used here and there
sectionnum_pat = re.compile(r'[0-9A-Z]{,2}\.')
pagediv_pat = re.compile('page[0-9]*-div')
header_pat = re.compile(r'([0-9]+)\.\s*([A-Z \(\)\/,]+)$', flags=re.DOTALL)
# Where temp files will be created. pdftohtml can only write to a file.
tempdir = tempfile.mkdtemp()
def protected(filename):
"""Is the filename something that should be protected from
directory cleanup?
"""
# Protected files: don't remove these
if filename.startswith("index") or filename.startswith("about"):
return True
if filename.startswith('records'):
return True
if filename.startswith('no-agenda'):
return True
# *-meeting-records.html will be managed by write_meeting_records_file()
# and only deleted when a new one is to be added.
if filename.endswith(RECORDS_FILEBASE):
return True
# Files for legal notices should only be removed by check_legal_notices:
if 'legal-notices' in filename:
return True
return False
#
# Meeting Records (Minutes, Video, Audio):
# Records are stored in a JSON file records.json
# Each record has the date it was first seen.
# It may also include a URL, but the URL is bogus:
# unfortunately Legistar doesn't offer externally-viable links,
# so the audio and video links they show on the calendar page
# just go back to the calendar page, while the links for minutes
# go to the agenda page instead.
#
# Sadly, the links in the Legistar audio/video are completely
# bogus and just go back to the Legistar calendar;
# there's no way to link directly to the Video or Audio.
# Even worse, you can't even link to the correct month
# where someone might find the transcript: they have to
# load the calendar and then figure out how to navigate to
# the correct month, which will then screw them up the next
# time they want to load the calendar.
# ARGHHH!
#
# Clients need easy access both by cleanname and by date.
# Dates are yyyy-mm-dd strings.
#
class MeetingRecords:
# Types of records of meetings (to match the legistar calendar page):
RECORD_TYPES = [ 'Minutes', 'Video', 'Audio', "Updated" ]
RECORDFILENAME = "records.json"
def __init__(self):
# record_dic is indexed by cleanname, and consists of
# a list of dictionaries of { record_type: (date, url) }
# Each cleanname also has a latest-record date.
self.record_dic = {}
def add_meeting(self, mtg_dic):
# print("add_meeting from:", mtg_dic['cleanname'])
for rtype in self.RECORD_TYPES:
if rtype in mtg_dic and mtg_dic[rtype]:
self.add_record(mtg_dic['cleanname'], rtype, mtg_dic[rtype])
def add_record(self, cleanname, record_type, url):
"""Add a record of one type for one meeting.
If the indicated meeting already has that record type,
don't update anything, else set the date to today's date.
"""
if cleanname not in self.record_dic or \
record_type not in self.record_dic[cleanname] or \
self.record_dic[cleanname][record_type][0] != url:
if cleanname not in self.record_dic:
self.record_dic[cleanname] = { 'name': cleanname }
self.record_dic[cleanname][record_type] = (url, todaystr)
self.record_dic[cleanname]['latest-record'] = todaystr
# Only need to add name if there's been at least one record
def new_records_today(self):
"""An iterator that returns dicts of meetings that had
at least one new record appear today.
"""
for name in self.record_dic:
if self.record_dic[name]['latest-record'] == todaystr:
yield self.record_dic[name]
# else:
# print(name, "has older date,",
# self.record_dic[name]['latest-record'])
def records_by_date(self):
"""An iterator yielding dicts, sorted by latest record date.
starting with the most recent.
"""
# The bydate list is a list of (cleanname, date) sorted by date,
# most recent first.
bydate_list = []
for name in self.record_dic:
bydate_list.append(( name, self.record_dic[name]['latest-record'] ))
bydate_list.sort(key=lambda t: t[1], reverse=True)
for name, lastmod in bydate_list:
yield self.record_dic[name]
def read_file(self):
try:
with open(os.path.join(RSS_DIR, self.RECORDFILENAME)) as fp:
self.record_dic = json.load(fp)
except:
if not self.record_dic and Verbose:
print("No records saved from last time")
def save_file(self):
"""Clean and save the records.json file.
"""
# Don't keep anything older than 15 days.
keys = list(self.record_dic.keys())
for key in keys:
recdate = datetime.datetime.strptime(
self.record_dic[key]["latest-record"], "%Y-%m-%d").date()
if (today - recdate).days > 15:
del self.record_dic[key]
# Now save it
with open(os.path.join(RSS_DIR, self.RECORDFILENAME), 'w') as fp:
json.dump(self.record_dic, fp, indent=4)
# XXX Would be nice to save them in sorted order
print("Saved records to", os.path.join(RSS_DIR, self.RECORDFILENAME))
mtg_records = MeetingRecords()
upcoming_meetings = []
def build_upcoming_meetings_list():
# Initialize MeetingRecords from the saved file.
mtg_records.read_file()
# By default, the calendar page only shows the current month,
# even when there are meetings scheduled for next month.
# To see anything from the upcoming month you have to set cookies
# in the HTTP request.
# If you do that manually, here are the cookies it sets:
# Setting-69-ASP.meetingdetail_aspx.gridMain.SortExpression=Sequence ASC; Setting-69-Calendar Options=info|; Setting-69-Calendar Year=Next Month; Setting-69-Calendar Body=All; Setting-69-ASP.calendar_aspx.gridCalendar.SortExpression=MeetingStartDate DESC; ASP.NET_SessionId=tmk5pfksowfid2t3nqjmpvac; BIGipServerprod_insite_443=874644234.47873.0000
# but with any luck, 'Next Month' is the only one that's actually needed.
# This has to be done before reading the default page,
# to match the decreasing date order of the meetings on each month's page.
if now.day > 20:
cookiedict = { 'Setting-69-Calendar Year': 'Next Month' }
r = requests.get(MEETING_LIST_URL, cookies=cookiedict)
parse_html_meeting_list(r.text)
# Get the meetings on the default (this month) page.
# These will be appended to the global list upcoming_meetings.
r = requests.get(MEETING_LIST_URL, timeout=30)
parse_html_meeting_list(r.text)
# Look at last month to get any new records that have been posted
cookiedict = { 'Setting-69-Calendar Year': 'Last Month' }
r = requests.get(MEETING_LIST_URL, cookies=cookiedict)
parse_html_meeting_list(r.text)
# Now that all relevant months have been read,
# it's safe to save the records file.
mtg_records.save_file()
# The meeting list is in date/time order, latest first.
# Better to list them in the other order, starting with
# meetings today, then meetings tomorrow, etc.
# That's why we couldn't just write meetings from the earlier loop.
# Could sort by keys, 'Meeting Date' and 'Meeting Time',
# but since it's already sorted, it's easier just to reverse.
upcoming_meetings.reverse()
def parse_html_meeting_list(page_html):
"""Parse the page listing meetings, which is HTML generated by pdftohtml.
Return a list of dictionaries for each meeting.
"""
soup = BeautifulSoup(page_html, 'lxml')
# Remove a bunch of spurious tags
for badtag in [ "font", "span", "div" ]:
badtags = soup.find_all(badtag)
for tag in badtags:
tag.replace_with_children()
caltbl = soup.find("table",
id="ctl00_ContentPlaceHolder1_gridCalendar_ctl00")
# The legend is in the thead
fieldnames = []
for i, field in enumerate(caltbl.thead.find_all("th")):
if field.text:
fieldnames.append(field.text.strip())
else:
fieldnames.append(str(i))
# Loop over meetings, rows in the table:
for row in caltbl.tbody.find_all("tr"):
mtg = {}
# Loop over columns describing this meeting:
for i, cell in enumerate(row.find_all("td")):
if fieldnames[i].startswith("Agenda"):
# If there's an Agenda URL, make it absolute.
a = cell.find("a")
href = a.get("href")
if href:
mtg[fieldnames[i]] = urljoin(MEETING_LIST_URL, href)
else:
mtg[fieldnames[i]] = None
elif fieldnames[i] == 'Meeting Location':
# The Location field has simple formatting
# such as <br>, so can't just take .text, alas.
mtg[fieldnames[i]] = ' '.join([str(c).strip()
for c in cell.contents]) \
.strip()
# The little calendar icon somehow comes out with a name of '2'.
# Skip it.
elif fieldnames[i] == '2' or not fieldnames[i]:
continue
# Minutes/Video/Audio: save the URL, if any, else None
# XXX Though the URL is actually meaningless and doesn't work,
# so consider changing this to not store it.
elif fieldnames[i] in MeetingRecords.RECORD_TYPES:
# print(mtg["Name"], "record type", fieldnames[i])
try:
# Legistar uses \xa0 instead of spaces
fieldtext = cell.text.replace('\u00a0', ' ').strip()
assert fieldtext != "Not available"
# print("Not not available")
# In case they start playing other games with characters,
# check the href too:
href = a.get("href")
hclass = a.get("class")
# Real video links don't have a class, but
# "Not available" has fake links with a class.
if hclass:
assert 'NotAvailable' not in hclass
# But Legistar misspells it:
assert 'NotAvailble' not in hclass
# print("Real href")
mtg[fieldnames[i]] = urljoin(MEETING_LIST_URL, href)
# print(" Set link to", mtg[fieldnames[i]])
except AssertionError:
mtg[fieldnames[i]] = None
# Most cells are simple and won't have any formatting.
# They are full of nbsps '\u00a0', though.
else:
val = cell.text.replace('\u00a0', ' ').strip()
# if fieldnames[i] == "Name":
# print("--", val)
if val:
mtg[fieldnames[i]] = val
mtg['cleanname'] = mtgdic_to_cleanname(mtg)
mtg_records.add_meeting(mtg)
# Now we have a good mtg dictionary.
# If it's in the future, save it in upcoming_meetings;
# if the past, save it in past_records if it has records.
meetingdate = meeting_datetime(mtg).date()
if meetingdate >= today:
upcoming_meetings.append(mtg)
def meeting_datetime(mtg):
"""Parse the meeting date and time and return an aware local datetime.
If there's only a date and no time, return a date object.
"""
# The parsed time is in the local time and is unaware,
# because strptime can't create a timezone aware object
# even if the string it's parsing includes a timezone (see above).
if "Meeting Time" not in mtg or not mtg["Meeting Time"]:
mtg["Meeting Time"] = NO_TIME
try:
if mtg["Meeting Time"] != NO_TIME:
unaware = datetime.datetime.strptime(mtg["Meeting Date"] + " "
+ mtg["Meeting Time"],
'%m/%d/%Y %I:%M %p')
else: # No time, so list it at 23:59
unaware = datetime.datetime.strptime(mtg["Meeting Date"],
'%m/%d/%Y')
unaware.replace(hour=23, minute=59, second=0)
# Make it aware in localtime
localtime = unaware.astimezone(localtz)
return localtime
except ValueError:
print("ERROR: Can't parse date on meeting:", mtg)
return None
def html_head(title, rsspage=None):
if not rsspage:
rsspage = "index.rss"
return f"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>{ title }</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="alternate" type="application/rss+xml"
title="{ title } RSS Feed"
href="{RSS_URL}{rsspage}" />
<link rel="stylesheet" type="text/css" title="Style" href="meetingstyle.css"/>
</head>
<body>
<h1>{ title }</h1>"""
def rss_entry(title, desc, guid, url, lastmod):
"""Create an entry for one link in an RSS file.
title: title for the entry
desc: longer description for the entry
guid: unique identifier
url: link to follow (if any).
lastmod: date to be used for RSS, must be in RSS_DATE_FORMAT
"""
# XXX url should be made absolute
return f"""<item>
<title>{title}</title>
<guid isPermaLink="false">{guid}</guid>
<link>{url}</link>
<description><![CDATA[ {desc} ]]>
</description>
<pubDate>{lastmod}</pubDate>
</item>"""
def diffhtml(before_html, after_html, title=None):
"""Diffs the two files, and returns an html fragment that wraps
differences in <ins> or <del> tags, which you can style as desired.
Returns bytes, not str, because everything else works in bytes
due to using requests.
"""
if not title:
title = "Changed Agenda"
# lxml.html.htmldiff only accepts strings, not bytes, but these
# were read in as bytes because that's what comes from requests;
# so translate them.
if type(before_html) is bytes:
before_html = before_html.decode()
if type(after_html) is bytes:
after_html = after_html.decode()
# lxml.html.htmldiff returns fragments, not full documents.
# So add a header that includes a style for ins and del.
diff = html_head(title)
diff += htmldiff(before_html, after_html)
diff += "\n</body></html>\n"
# encode to return bytes.
return diff.encode()
def agenda_to_html(mtg, meetingtime, save_pdf_filename=None):
if save_pdf_filename:
prettyname = os.path.basename(save_pdf_filename)
else:
prettyname = agendaloc
print("Converting agenda for", prettyname, file=sys.stderr)
if 'fitz' in sys.modules:
print("Using fitz")
return html_agenda_fitz(mtg, meetingtime, save_pdf_filename)
# print("No fitz, using pdftohtml")
return html_agenda_pdftohtml(mtg, meetingtime, save_pdf_filename)
def html_agenda_fitz(mtg, meetingtime, save_pdf_filename=None):
"""Use fitz (mupdf's engine) to convert PDF to HTML, returned as a string.
"""
agendaloc = mtg["Agenda"]
doc = fitz.open(agendaloc)
def find_indent_levels(pdfdoc):
indents = []
for page in pdfdoc.pages():
for block in page.get_text("blocks"):
indent = round(block[0])
if indent not in indents:
indents.append(indent)
indents.sort()
def group_clusters(lis, max_sep):
"""Reduce a list of numbers removing numbers that are
close to each other.
E.g. [1, 2, 3, 44, 46] -> [2, 45].
lis is a sorted list of numbers.
max_sep is the maximum separation allowed.
"""
# clusters is a list of [ (low, high) ]
clusters = []
def add_to_clusters(l):
for c in clusters:
if l >= c[0] and l <= c[1]:
# the value is already represented by this cluster
return
# Okay, l is outside this cluster. But is it close?
# On the low end?
if l <= c[1] and l >= c[1] - max_sep:
c[0] = l
return
# Or on the high end?
if l >= c[0] and l <= c[0] + max_sep:
c[1] = l
return
# It's outside all of the known ranges.
# Add a new range.
clusters.append([l, l])
for l in lis:
add_to_clusters(l)
# Now clusters is a list of ranges. Take the average of each.
return [ int((c[0] + c[1])/2.) for c in clusters ]
return group_clusters(indents, 10)
indent_levels = find_indent_levels(doc)
# print("Found indent levels:", indent_levels)
html = """<html>
<body>
<h3>%s</h3>
""" % (meetingtime.strftime("%a %y-%m-%d"))
for page in doc.pages():
# blocks are like paragraphs in a normal PDF. Here, ??
# block is supposedly a tuple,
# (x0, y0, x1, y1, "lines in block", block_type, block_no)
# according to https://pymupdf.readthedocs.io/en/latest/app2.html
# but I think the last two are reversed, it's really no, type.
# flags=0 disables images, but might also disable other
# desirable things, so watch out.
# https://pymupdf.readthedocs.io/en/latest/app2.html#text-extraction-flags
# values include TEXT_PRESERVE_IMAGES, TEXT_PRESERVE_LIGATURES,
# TEXT_PRESERVE_SPANS, TEXT_PRESERVE_WHITESPACE.
blocks = page.get_text("blocks", flags=0)
for b in blocks:
# This clause isn't needed if TEXT_PRESERVE_IMAGES isn't set.
# if b[4].startswith("<image:"):
# print("Skipping an image")
# continue
# Is the indent close to the minimum indent?
# Then it's a header.
# Decide which level of header to use based on content.
if isclose(b[0], indent_levels[0], abs_tol=10):
# print("HEADER:", b[4].replace('\n', ' '))
if re.match(r'[0-9]+\.\n', b[4]):
html += "<h1>%s</h1>\n<p>\n" % b[4]
elif re.match(r'[A-Z]+\.\n', b[4]):
html += "<h2>%s</h2>\n<p>\n" % b[4]
else:
html += "<p>%s</p>" % b[4]
elif isclose(b[0], indent_levels[1], abs_tol=10):
# print(" ", b[4].replace('\n', ' '))
html += "<br>\n%s\n" % b[4]
else:
# print(" OTHER INDENT:", b[4].replace('\n', ' '))
html += "<br><blockquote>\n%s</blockquote>\n" % b[4]
html += "</body></html>"
return html
def html_agenda_pdftohtml(mtg, meetingtime, save_pdf_filename=None, url=None):
"""Convert a PDF agenda to text and/or HTML using pdftohtml,
then returned cleaned_up bytes (not str).
save_pdf_filename is for debugging:
if set, save the PDF there and don't delete it.
Returns the HTML source as bytes, not str.
"""
if url:
agendaloc = url
else:
agendaloc = mtg["Agenda"]
if not save_pdf_filename:
save_pdf_filename = "/tmp/tmpagenda.pdf"
if agendaloc.lower().startswith('http') and ':' in agendaloc:
r = requests.get(agendaloc, timeout=30)
with open(save_pdf_filename, "wb") as pdf_fp:
pdf_fp.write(r.content)
agendaloc = save_pdf_filename
elif ':' in agendaloc:
print("Don't understand location", agendaloc, file=sys.stderr)
return None
htmlfile = save_pdf_filename + ".html"
args = [ "pdftohtml", "-c", "-s", "-i", "-noframes",
# "-enc", "utf-8",
agendaloc, htmlfile ]
print("Calling", ' '.join(args), file=sys.stderr)
subprocess.call(args)
return clean_up_htmlfile(htmlfile, mtg, meetingtime)
def highlight_filenumbers(soup):
"""Find agenda items, which match FILENO_PAT and contain a link
to related documents on Legistar.
Highlight them with h3.highlight,
and return a list of dicts with 'href' and 'desc'
that can be saved to the item store.
"""
item_list = []
for para in soup.find_all("p"):
# If it matches the FILENO_PAT, change the p tag to an h3 highlight.
if FILENO_PAT.match(para.text.strip()):
# para.wrap(soup.new_tag("h3"))
para.name = "h3"
para["class"] = "highlight"
# Get the link, if any
try:
href = para.find('a').attrs['href']
except:
href = None
if para.name == 'h3':
# Look forward to the next paragraph, which is the description
# of the item.
nextpara = para.find_next('p')
if nextpara:
# Legistar likes to use \xa0, non-breaking spaces
# so sub them out first before replacing runs of whitespace
desctext = re.sub(r'\s{2,}', ' ',
nextpara.text.strip().replace('\xa0', ' '))
item_list.append({ 'url': href, 'itemdesc': desctext })
else:
print("Couldn't find next paragraph after h3", para)
head = soup.head
head.append(soup.new_tag('style', type='text/css'))
head.style.append('.highlight { width: 100%; background-color: #7fb; }')
return item_list
def add_stylesheet(soup):
# link to the stylesheet:
csslink = soup.new_tag("link")
csslink.attrs["rel"] = "stylesheet"
csslink.attrs["type"] = "text/css"
csslink.attrs["title"] = "Style"
csslink.attrs["href"] = "meetingstyle.css"
soup.head.insert(0, csslink)
def clean_up_htmlfile(htmlfile, mtg, meetingtime):
"""Clean up the scary HTML written by pdftohtml,
removing the idiotic dark grey background pdftohtml has hardcoded in,
the assortment of absolute-positioned styles,
the new-paragraph-for-each-line, etc.
Also, try to linkify links, identify sections, save agenda items, etc.
Returns bytes, not str.
"""
global AGENDA_ITEM_STORE
with open(htmlfile, 'rb') as htmlfp:
# The files produced by pdftohtml contain '\240' characters,
# which are ISO-8859-1 for nbsp.
# Adding "-enc", "utf-8" doesn't change that.
# If they aren't decoded, BeautifulSoup will freak out
# and won't see anything in the file at all.
html_bytes = htmlfp.read().decode('ISO-8859-1')
# Make some changes to make the HTML readable and parseable.
soup = BeautifulSoup(html_bytes, "lxml")
# The <style> tag just contains a long comment. No idea why it's there.
try:
soup.head.style.decompose()
except:
pass
body = soup.body
# Insert the meeting date at the beginning of the body
h_tag = soup.new_tag("h3")
soup.body.insert(0, h_tag)
datetext = NavigableString(meetingtime.strftime("%a %b %d"))
h_tag.append(datetext)
# Sometimes pdftohtml mysteriously doesn't work, and gives
# a basically empty HTML file: everything is using position:absolute
# and that makes it invisible to BeautifulSoup.
# This seems to be
# https://gitlab.freedesktop.org/poppler/poppler/-/issues/417
# Check for that.
# If all else fails, htmltotext works to extract the text,
# and might produce cleaner output anyway.
# Or there may be some way to get BS to find those
# <p style="position:absolute" tags that it isn't seeing.
bodylen = len(body.text.strip())
if bodylen == 0:
print("** Yikes! Empty HTML from pdftohtml", htmlfile)
return html
elif bodylen < 10:
print(f"Short! Body text is: '{body.text}'")
del body["bgcolor"]
del body["vlink"]
del body["link"]
# Remove all the fixed pixel width styles
for tag in body.find_all('style'):
tag.decompose()
for tag in body.find_all('div'):
del tag["style"]
for tag in body.find_all('p'):
del tag["style"]
# Get rid of the pagination
divs = list(body.find_all(id=pagediv_pat))
for div in divs:
div.replace_with_children()
# There are also anchors like <a name="8">\n</a>
# but they're not really hurting anything.
# Remove hard line breaks. This is a tough decision:
# some line breaks help readability, some hurt it.
# for tag in body.find_all('br'):
# tag.decompose()
# pdftohtml renders each line as a separate paragraph,
# so joining paragraphs helps readability.
# Call join_consecutive_tags starting with outer tags and working inward.
# Do this while the p tags still have classes, so paragraphs of
# different classes don't get merged.
join_consecutive_tags(body, 'p')
join_consecutive_tags(body, 'i')
join_consecutive_tags(body, 'b')
# Now don't need the class tags any more, so delete them.
for tag in body.find_all('p'):
del tag["class"]
# Try to identify major headers, to highlight them better.
# This doesn't work. I don't know why. find_all(text=pat) works fine
# in simple test cases, but not on the real files.
# for b in soup.find_all('b', text=header_pat):
# Instead, loop over all b tags doing the match explicitly:
for bold in body.find_all('b'):
m = re.match(header_pat, bold.get_text().strip())
if not m:
continue
# Can't change text like this
# b.text = f"## {m.groups(1)}. {m.groups(2)}"
# but we can change the tag name:
bold.name = 'h2'
# Highlight all the file numbers, which helps separate items
# to make the HTML more readable.
item_list = highlight_filenumbers(soup)
# Write agenda items found by highlight_filenumbers to the item store,
# if there is one.
# XXX need a way to avoid duplicates when meetings are re-posted
# Create the agenda item store directory if not already there
if item_list and AGENDA_ITEM_STORE:
try:
os.mkdir(AGENDA_ITEM_STORE)
except FileExistsError:
pass
except Exception as e:
print("Couldn't mkdir", AGENDA_ITEM_STORE,
": not saving agenda items", file=sys.stderr)
print(e, file=sys.stderr)
AGENDA_ITEM_STORE = None
if item_list and AGENDA_ITEM_STORE:
try:
# To create the filename, remove spaces, anything following a dash
# then add the year and month
bodyname = mtg["Name"].replace(' ', '').split('-')[0]
itemfilebase = os.path.join(AGENDA_ITEM_STORE,
bodyname + '-'
+ meetingtime.strftime('%Y-%m'))
itemfile = itemfilebase + '.jsonl'
with open(itemfile, 'a') as itemsfp:
for item in item_list:
# item['body'] = bodyname
item['mtgdate'] = mtg['Meeting Date']
item['mtgname'] = mtg["Name"]
json.dump(item, itemsfp)
# json.dump doesn't add a newline
print('', file=itemsfp)
except Exception as e:
print("Exception trying to save item store from", mtg)
print(e)
# linkify links, particularly the Zoom link
for link in soup.body.findAll(string=LINK_PAT):
if type(link) is not NavigableString:
continue
url = str(link)
print("linkifying", url, file=sys.stderr)
atag = soup.new_tag("a", href=url)
atag.string = url
link.replace_with(atag)
add_stylesheet(soup)
pretty_html_bytes = soup.prettify(encoding='utf-8')
# Testing: maybe the above changes removed the body contents?
# (I think this bug is long since fixed.)
if not body.text:
print("**Yikes! The changes to", save_pdf_file,
"made the HTML empty. Saving original instead.")
with open(os.path.join(RSS_DIR, save_pdf_filename + "_cleaned.html"),
"wb") as savfp:
savefp.write(pretty_html_bytes)
return html
return pretty_html_bytes
def join_consecutive_tags(soup, tagname, add_spaces=False):
"""Join consecutive tags of name tag if they have the same attributes.
E.g. in <p class="foo">some text</p><p class="foo">different text</p>
would produce <p class="foo">some text different text</p>
If add_spaces, will add spaces between tags.
"""
to_merge = []
tags = list(soup.find_all(tagname))
prev = None
for tag in tags:
prev = tag.find_previous_sibling()
# If the two tags have the same parent and the same class,
# they should be merged.
if prev and prev.attrs == tag.attrs:
# First merge in the list?
if not to_merge:
to_merge.append([prev, tag])
continue
else:
# Should these be merged with the last merge?
last_group = to_merge[-1]
last_tag_merged = last_group[-1]
prev_sib = prev.find_previous_sibling()
# SPECIAL CASE FOR LEGISTAR:
# Does it look like a section header?
# Don't merge a paragraph that looks like "2."
# with whatever was before it.
if tag.name == 'p' and re.match(sectionnum_pat, tag.text):
continue
elif (prev == last_tag_merged and
tag.attrs == last_tag_merged.attrs):
# Continue a group merge of three or more tags
last_group.append(tag)
else:
# New pair of mergers, make a new group
to_merge.append([prev, tag])
prev = tag
for group in to_merge:
first = group[0]
for tag in group[1:]:
# Iterating directly over tag.children gets crossed up
# when some of the children are moved to another tag.
children = list(tag.children)
for child in children:
first.append(child)
# All of tag's children have been moved to first.
# Delete tag.
tag.decompose()
def get_tickler(agenda_str, mtg, meetingtime, tickler_html_file):
"""Does an agenda include a tickler?
Input can be either a filename or HTML source as a string or bytes.
If there's a tickler, return full path to the written tickler html,
else None.
"""
if not agenda_str:
return None
soup = BeautifulSoup(agenda_str, "lxml")
if not soup:
print("get_tickler: No soup", file=sys.stderr)
return None
# Does it have a tickler?
tickler_url = None
for a in soup.find_all("a"):
if "tickler" in a.text.lower():
tickler_url = a.get("href")
break
else:
# print("No tickler")
return None
# The tickler is another PDF file, natch. Convert it:
tickler_html = html_agenda_pdftohtml(mtg, meetingtime,
url=tickler_url).decode()
soup = BeautifulSoup(tickler_html, "lxml")
# First give it a clear title. After html_agenda_pdf2html,
# there's an h3 at the beginning with the date of the tickler.
firstheader = soup.find('h3')
if firstheader:
headerchild = next(firstheader.children)
headertext = headerchild.strip()
if TICKLER_HDR_DATE_PAT.match(headertext):
firstheader.name = 'h1'
headerchild.replace_with("Tickler, " + headertext)
else:
print("firstheader didn't match:", firstheader, file=sys.stderr)
else:
print("No h3 anywhere in tickler file", file=sys.stderr)
# The tickler html comes out with no structure at all; you can't even
# assume that each item has its own <b> tag.
# XXX Would be nice to separate the separate lines into separate
# <p>s and <b>s.
lines = []
for para in soup.find_all("p"):
lines = para.text.strip().splitlines()
if len(lines) > 1:
# Discussion of depth-first iteration:
# https://stackoverflow.com/q/4814317
# Get rid of some of the whitespace-only tags:
for i, p in enumerate(para.recursiveChildGenerator()):
if type(p) is NavigableString:
if not p.strip():
del p
highlight_filenumbers(soup)
add_stylesheet(soup)
pretty_html_bytes = soup.prettify(encoding='utf-8')
tickler_html_file = os.path.join(RSS_DIR, tickler_html_file)
with open(tickler_html_file, "wb") as fp:
fp.write(pretty_html_bytes)
# print("Wrote tickler to", tickler_html_file, file=sys.stderr)
os.system("ls -l " + tickler_html_file)
return tickler_html_file
NO_AGENDA = b"No agenda available."
def write_meeting_files(mtglist):
"""Take a list of meeting dictionaries and make RSS and HTML files.
"""
active_meetings = []
##############
# Generate index HTML and RSS file headers.
# Open both the RSS and HTML files:
outrssfilename = os.path.join(RSS_DIR, "index.rss")