diff --git a/Web_app/pages/ChangeLog.py b/Web_app/pages/ChangeLog.py new file mode 100644 index 00000000..81192d36 --- /dev/null +++ b/Web_app/pages/ChangeLog.py @@ -0,0 +1,284 @@ +import streamlit as st +import requests +from datetime import datetime +import pandas as pd + +# GitHub repository URL +REPO_URL = "https://api.github.com/repos/recodehive/Scrape-ML" + + +# Function to fetch repository statistics +def fetch_repo_statistics(): + closed_pr_count = fetch_closed_pr_count() + return { + "total_prs": closed_pr_count, + "total_projects": fetch_total_projects(), + "total_contributors": fetch_contributors_count(), + } + + +# Existing fetch functions remain the same +def fetch_closed_pr_count(): + closed_prs_url = f"{REPO_URL}/pulls?state=closed" + closed_pr_count = 0 + page = 1 + while True: + response = requests.get(f"{closed_prs_url}&page={page}") + if response.status_code != 200 or not response.json(): + break + closed_pr_count += len(response.json()) + page += 1 + return closed_pr_count + + +def fetch_total_projects(): + return 0 + + +def fetch_closed_prs(): + closed_prs_url = f"{REPO_URL}/pulls?state=closed" + closed_prs = [] + page = 1 + while True: + response = requests.get(f"{closed_prs_url}&page={page}") + if response.status_code != 200 or not response.json(): + break + pulls = response.json() + for pr in pulls: + if pr["merged_at"]: + closed_prs.append( + { + "title": pr["title"], + "url": pr["html_url"], + "date": pr["merged_at"], + "user": pr["user"]["login"], + "avatar_url": pr["user"]["avatar_url"], + } + ) + page += 1 + return closed_prs + + +def fetch_upcoming_issues(): + issues_url = f"{REPO_URL}/issues?state=open" + upcoming_issues = [] + response = requests.get(issues_url) + if response.status_code == 200: + issues = response.json() + for issue in issues: + if issue.get("assignee"): + upcoming_issues.append( + { + "title": issue["title"], + "url": issue["html_url"], + "date": issue["created_at"], + "assignee": issue["assignee"]["login"], + "avatar_url": issue["assignee"]["avatar_url"], + } + ) + return upcoming_issues + + +def fetch_contributors_count(): + contributors_url = f"{REPO_URL}/contributors" + response = requests.get(contributors_url) + if response.status_code == 200: + return len(response.json()) + return 0 + + +# Custom CSS for modern design +st.set_page_config( + page_title="Changelog - Scrape ML", + page_icon="📝", + layout="wide", + initial_sidebar_state="expanded", +) + +# Custom CSS +st.markdown( + """ + +""", + unsafe_allow_html=True, +) + +# Title with gradient +st.markdown( + """ +
{feature['desc']}
+Scrape ML is a robust web scraping tool designed to simplify the extraction of data from various online sources. + With its user-friendly interface and powerful features, Scrape ML allows users to collect, organize, + and analyze data seamlessly. Ideal for developers, data scientists, and anyone interested in leveraging + web data for their projects.
+Made with 💜 by the Scrape ML Team
+