-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathglenorchy.py
45 lines (42 loc) · 1.8 KB
/
glenorchy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os
import scraperwiki
from bs4 import BeautifulSoup
from datetime import datetime
import logging
def getfield(lines_, key):
return [line.split(':', 1)[1] for line in lines_ if line.split(':')[0] == key][0].strip()
def councildas():
applications_url = 'https://www.gcc.tas.gov.au/services/planning-and-building/planning-and-development/planning-applications/'
html = scraperwiki.scrape(applications_url)
date_scraped = datetime.now().isoformat()
page = BeautifulSoup(html, 'html.parser')
das = page.find_all('div', 'content-block__inner')
records = []
for da in das:
refandaddress = da.find('a').text.strip()
council_reference, dummy, address = refandaddress.split(' ', 2)
# Address is sometimes capitals. Could put into titlecase
address = address.title() + ', Tasmania, Australia'
# some have one link, some have two
info_url = da('a')[0]['href']
# print(da)
description = da.find('div', 'content-block__description').text.strip()
rawcloses = da.find('p', 'content-block__date').text.split(':')[1].strip()
on_notice_to = datetime.strptime(rawcloses, '%d %B %Y').strftime('%Y-%m-%d')
record = {
'council_reference': council_reference,
'address': address,
'description': description,
'info_url': info_url,
'date_scraped': date_scraped,
'on_notice_to': on_notice_to
}
records = records + [record]
return records
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
os.environ["SCRAPERWIKI_DATABASE_NAME"] = "sqlite:///data.sqlite"
records = councildas()
for record in records:
logging.debug(record)
scraperwiki.sqlite.save(unique_keys=['council_reference'], data=record, table_name='data')