-
Notifications
You must be signed in to change notification settings - Fork 4
/
haripath_scarp.py
48 lines (37 loc) · 1.38 KB
/
haripath_scarp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import urllib.request
from bs4 import BeautifulSoup
def get_html(url_link):
url = urllib.request.urlopen(url_link)
return url.read()
def write_haripath(lis, directory):
no = 0
names = ['by_dnyandev_maharaj','by_namdev_maharaj','by_eknath_maharaj','by_tukaram_maharaj','by_nivritti_maharaj']
for li in lis:
f = open(directory + "/" + names[no] + ".txt", 'wb+')
no = no + 1
html = get_html("https://mr.wikisource.org" + li.a['href'])
soup = BeautifulSoup(html)
ps = soup.find_all('p')
for p in ps:
lines = p.text
lines = lines.replace('{{{notes}}}', ' ')
lines = lines.replace('<poem>', ' ')
lines = lines.replace('<br>', ' ')
lines = lines.replace('{', '')
lines = lines.replace('}', '')
f.write(lines.encode('utf-8') + "\n".encode('utf-8'))
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_haripath_links():
directory = "./datasets/haripath"
url = "https://mr.wikisource.org/wiki/%E0%A4%B9%E0%A4%B0%E0%A4%BF%E0%A4%AA%E0%A4%BE%E0%A4%A0"
html = get_html(url)
soup = BeautifulSoup(html, "lxml")
ul = soup.find('ul')
lis = ul.find_all('li')
create_directory(directory)
write_haripath(lis, directory)
if __name__ == "__main__":
get_haripath_links()