Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ENG-1322] I Reviewed 1,000s of Opinions on GitHub Copilot (Part 2) #308

Merged
merged 29 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion generator/konfig-docs/blog/2023/08/21-serverless.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ only thought-provoking opinions.
</Figure>

Next, I transcribed these discussions onto a whiteboard, organizing them into
"Pro Serverless," "Anti Serverless," or "Neutral" categories, and then
"Pro Serverless," "Anti Serverless", or "Neutral" categories, and then
clustering them into distinct opinions. Each section in this post showcases
an opinion while referencing pertinent discussions.

Expand Down
386 changes: 386 additions & 0 deletions generator/konfig-docs/blog/2023/10/23-github-copilot.mdx

Large diffs are not rendered by default.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import os
import cv2
import numpy as np
from PIL import Image

def find_content_bounds(image_path, buffer_pixels=5):
# Load the image using OpenCV
img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

# Apply Canny edge detection
edges = cv2.Canny(img, threshold1=100, threshold2=200)

# Find contours of the content
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

if contours:
# Find the bounding rectangle of the largest contour
largest_contour = max(contours, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(largest_contour)

# Add buffer to the detected content region
x -= buffer_pixels
y -= buffer_pixels
w += 2 * buffer_pixels
h += 2 * buffer_pixels

# Ensure the new region does not go outside the image bounds
x = max(0, x)
y = max(0, y)
w = min(img.shape[1], x + w) - x
h = min(img.shape[0], y + h) - y

return x, y, x + w, y + h

return None

def crop_and_pad_center(image_path, output_path, padding):
# Find content bounds
content_bounds = find_content_bounds(image_path)

if content_bounds:
# Open the image using Pillow
img = Image.open(image_path)

# Check if the image has an alpha channel (transparency)
has_alpha = img.mode == 'RGBA'

# Calculate cropping coordinates
left, upper, right, lower = content_bounds

# Crop the content
cropped_img = img.crop((left, upper, right, lower))

# Calculate new dimensions for the padded image
new_width = cropped_img.width + 2 * padding
new_height = cropped_img.height + 2 * padding

# Create a new image with padding
padded_img = Image.new('RGBA' if has_alpha else 'RGB', (new_width, new_height), (255, 255, 255, 0) if has_alpha else (255, 255, 255))
padded_img.paste(cropped_img, (padding, padding))

# Save the result to the output path
padded_img.save(output_path)
else:
print("No content detected in", image_path)


def process_images_in_folder(input_folder, output_folder, padding):
# Create the output folder if it doesn't exist
if not os.path.exists(output_folder):
os.makedirs(output_folder)

# Recursively get a list of all relative file files in the input folder
file_list = []
for root, directories, files in os.walk(input_folder):
for filename in files:
# Skip hidden files
if not filename.startswith('.'):
# Get relative path to file
file_path = os.path.join(root, filename)
relative_file_path = os.path.relpath(file_path, input_folder)
file_list.append(relative_file_path)

for relative_file_path in file_list:
# Check if the file is an image (you can modify the extensions if needed)
if relative_file_path.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):
# Get the input and output image paths
input_image_path = os.path.join(input_folder, relative_file_path)
output_image_path = os.path.join(output_folder, relative_file_path)

# create any missing directores for the output_image_path
os.makedirs(os.path.dirname(output_image_path), exist_ok=True)

# Crop and pad the image
crop_and_pad_center(input_image_path, output_image_path, padding)

if __name__ == "__main__":
input_folder_path = "./before" # Specify the input folder path
output_folder_path = "./after" # Specify the output folder path
padding_amount = 20 # Specify the padding amount in pixels

process_images_in_folder(input_folder_path, output_folder_path, padding_amount)
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import os
import pyperclip
from urllib.parse import urlparse

def get_comment(file_path):
"""Fetches the comment of a file on MacOS using AppleScript."""
script = f'''
tell application "Finder"
set theItem to (POSIX file "{file_path}") as alias
get comment of theItem
end tell
'''
return os.popen(f"osascript -e '{script}'").read().strip()

def extract_source_website(url):
"""Extracts the source website from a given URL."""
netloc = urlparse(url).netloc
if "youtube" in netloc:
return "YouTube"
elif "news.ycombinator" in netloc:
return "Hacker News"
elif "twitter" in netloc:
return "Twitter"
elif "reddit" in netloc:
return "Reddit"
else:
# Return the domain name as default if no other conditions match
return netloc

def generate_string(folder_path):
output_str = "<Carousel.Wrapper>"
for file_name in os.listdir(folder_path):
if file_name.endswith('.png'):
file_path = os.path.join(folder_path, file_name)

comment = get_comment(file_path)
source_website = extract_source_website(comment)

file_name_without_ext = os.path.splitext(file_name)[0]
folder_name = os.path.basename(os.path.normpath(folder_path)) # This gives 'bad-result' for './before/bad-result'

slide = f"""
<Carousel.Slide>
<Figure caption={{<a href="{comment}">{source_website}</a>}}>
![{file_name_without_ext}](./github-copilot-assets/slides/after/{folder_name}/{file_name})
</Figure>
</Carousel.Slide>"""
output_str += slide

output_str += "\n</Carousel.Wrapper>"
return output_str

folder_path = input("Enter the path of the folder containing the images (e.g. ./before/bad-result): ")
output = generate_string(folder_path)
print(output)

# Copy the output to the clipboard
pyperclip.copy(output)
print("The generated string has been copied to the clipboard!")
Loading
Loading