02 β FILE AUTOMATION
Organize, Rename, Backup
import os
import shutil
from pathlib import Path
def organise_downloads(download_path: str = str(Path.home() / "Downloads")):
extensions = {
"Images": [".jpg", ".jpeg", ".png", ".gif", ".webp"],
"Documents": [".pdf", ".docx", ".xlsx", ".txt", ".csv"],
"Archives": [".zip", ".rar", ".7z"],
"Videos": [".mp4", ".mkv", ".avi"],
}
for file in os.listdir(download_path):
file_path = os.path.join(download_path, file)
if os.path.isfile(file_path):
ext = Path(file).suffix.lower()
for folder, exts in extensions.items():
if ext in exts:
target = os.path.join(download_path, folder)
os.makedirs(target, exist_ok=True)
shutil.move(file_path, os.path.join(target, file))
print(f"Moved {file} β {folder}")
break
ποΈ Scans Downloads and moves files into subfolders by extension.
import os
from pathlib import Path
def sequential_rename(folder: str, prefix: str = "photo_", start: int = 1):
path = Path(folder)
files = sorted([f for f in path.iterdir() if f.is_file()])
for i, file_path in enumerate(files, start=start):
new_name = f"{prefix}{i:04d}{file_path.suffix}"
file_path.rename(path / new_name)
print(f"Renamed β {new_name}")
π·οΈ Renames all files in a folder with numeric pattern.
import shutil
from datetime import datetime
from pathlib import Path
def daily_backup(source: str, backup_root: str):
today = datetime.now().strftime("%Y-%m-%d")
backup_dir = Path(backup_root) / f"backup_{today}"
backup_dir.mkdir(parents=True, exist_ok=True)
shutil.copytree(source, backup_dir, dirs_exist_ok=True)
print(f"Backup β {backup_dir}")
β±οΈ Copies folder into timestamped subfolder. Use with scheduler.
import os
from datetime import datetime, timedelta
from pathlib import Path
def clean_old(dir: str, days: int = 30):
cutoff = datetime.now() - timedelta(days=days)
removed = 0
for f in Path(dir).rglob("*"):
if f.is_file() and datetime.fromtimestamp(f.stat().st_mtime) < cutoff:
f.unlink(); removed += 1
print(f"Removed {removed} old files")
β³ Deletes files older than `days`. Use with caution.
import calendar
from pathlib import Path
from datetime import datetime
def create_monthly(base: str, year: int = None):
year = year or datetime.now().year
base = Path(base) / str(year)
base.mkdir(parents=True, exist_ok=True)
for m in range(1, 13):
(base / f"{m:02d}_{calendar.month_name[m]}").mkdir(exist_ok=True)
ποΈ Creates year/month folder structure.
import filecmp
from pathlib import Path
def compare_folders(a, b):
dcmp = filecmp.dircmp(a, b)
for name in dcmp.left_only:
print(f"Only in A: {Path(a)/name}")
for name in dcmp.right_only:
print(f"Only in B: {Path(b)/name}")
for sub in dcmp.subdirs.values():
compare_folders(sub.left, sub.right)
π Lists files present in one folder but not the other.
03 β DATA & CSV AUTOMATION
Spreadsheets and Reports
import pandas as pd
def sales_summary(csv_path: str, out: str = "region_summary.csv"):
df = pd.read_csv(csv_path)
summary = df.groupby("Region")["Amount"].agg(["sum", "mean", "count"]).round(2)
summary["sum"] = summary["sum"].map("β±{:,.2f}".format)
summary.to_csv(out)
print(summary)
π Groups CSV by Region, saves summary. Needs pandas.
import pandas as pd
from pathlib import Path
def merge_csv(folder: str, out: str = "merged.csv"):
all_df = [pd.read_csv(f) for f in Path(folder).glob("*.csv")]
if all_df:
merged = pd.concat(all_df, ignore_index=True)
merged.to_csv(out, index=False)
print(f"Merged {len(all_df)} files -> {out}")
π Stacks CSVs with same columns. Needs pandas.
04 β WEB AUTOMATION
Scraping, Downloads, Monitoring
import requests
from bs4 import BeautifulSoup
def get_price(url: str):
headers = {"User-Agent": "Mozilla/5.0"}
resp = requests.get(url, headers=headers, timeout=10)
soup = BeautifulSoup(resp.text, "html.parser")
title = soup.find("h1").get_text(strip=True) if soup.find("h1") else "N/A"
price = soup.select_one("[data-price], .price, .amount")
price_text = price.get_text(strip=True) if price else "N/A"
return {"title": title, "price": price_text}
π·οΈ Extracts product title and price. Adjust selectors as needed.
import requests
from concurrent.futures import ThreadPoolExecutor
def check_urls(urls, timeout=5):
def get(url):
try:
r = requests.head(url, timeout=timeout, allow_redirects=True)
return (url, r.status_code, r.elapsed.total_seconds())
except: return (url, "error", 0)
with ThreadPoolExecutor(max_workers=10) as ex:
for res in ex.map(get, urls):
print(f"{res[0]} β {res[1]} ({res[2]:.2f}s)")
β‘ Checks HTTP status & response time in parallel.
import requests
import os
from bs4 import BeautifulSoup
from urllib.parse import urljoin
def dl_page_images(url, folder="downloaded"):
os.makedirs(folder, exist_ok=True)
soup = BeautifulSoup(requests.get(url).text, "html.parser")
for i, img in enumerate(soup.find_all("img")):
src = img.get("src")
if src:
img_url = urljoin(url, src)
ext = os.path.splitext(img_url)[1] or ".jpg"
data = requests.get(img_url).content
with open(os.path.join(folder, f"img_{i}{ext}"), "wb") as f:
f.write(data)
πΌοΈ Saves all images from a webpage.
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import time
class Handler(FileSystemEventHandler):
def on_created(self, event):
if not event.is_directory:
print(f"New: {event.src_path}")
def watch(path: str):
obs = Observer()
obs.schedule(Handler(), path, recursive=False)
obs.start()
try:
while True: time.sleep(1)
except KeyboardInterrupt: obs.stop()
obs.join()
π Monitors folder and prints new files. Install watchdog.
05 β IMAGE & PDF AUTOMATION
Processing Visual Assets
import img2pdf
import os
def images_to_pdf(folder: str, output: str = "combined.pdf"):
images = [os.path.join(folder, f) for f in sorted(os.listdir(folder))
if f.lower().endswith((".png", ".jpg", ".jpeg"))]
if images:
with open(output, "wb") as f:
f.write(img2pdf.convert(images))
print(f"PDF created: {output}")
π¨οΈ Combines images into one PDF. Needs img2pdf and Pillow.
from PIL import Image
from pathlib import Path
def resize_all(folder: str, max_size=(800,800), suffix="_resized"):
for f in Path(folder).glob("*.[jJ][pP]*"):
img = Image.open(f)
img.thumbnail(max_size)
new = f.with_stem(f.stem + suffix)
img.save(new, optimize=True, quality=85)
print(f"Resized: {new.name}")
πΌοΈ Resizes images, keeps aspect ratio, adds suffix.
import pytesseract
from PIL import Image
from pathlib import Path
def ocr_images(folder: str, out_txt: str = "extracted.txt"):
with open(out_txt, "w") as out:
for img_file in Path(folder).glob("*.[pP][nN][gG]"):
text = pytesseract.image_to_string(Image.open(img_file))
out.write(f"\n--- {img_file.name} ---\n{text}\n")
print(f"OCR done: {img_file.name}")
π Extracts text from images. Needs Tesseract OCR.
import qrcode
def make_qr(data: str, out: str = "qr.png"):
qr = qrcode.QRCode(version=1, box_size=10, border=4)
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
img.save(out)
print(f"QR saved as {out}")
π Creates QR from text/URL. Needs qrcode[pil].
06 β COMMUNICATION & SCHEDULING
Email, Scheduling, Notifications
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def send_gmail(to: str, subject: str, body: str, from_email: str, password: str):
msg = MIMEMultipart()
msg["From"], msg["To"], msg["Subject"] = from_email, to, subject
msg.attach(MIMEText(body, "plain"))
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as server:
server.login(from_email, password)
server.sendmail(from_email, to, msg.as_string())
print("Email sent")
π Uses Gmail SMTP. For 2FA, use an app password.
import schedule
import time
def job():
print("daily task: backup, report, cleanup...")
schedule.every().day.at("07:30").do(job)
while True:
schedule.run_pending()
time.sleep(60)
π’ Uses schedule library. Lightweight in-process scheduler.
import requests
from datetime import datetime
from pathlib import Path
def daily_quote():
q = requests.get("https://zenquotes.io/api/random").json()[0]
quote = f"β{q['q']}β β {q['a']}"
path = Path.home() / "Desktop" / f"quote_{datetime.now():%Y-%m-%d}.txt"
path.write_text(quote, encoding="utf-8")
print(f"Quote saved: {path}")
π Fetches random quote and saves to desktop.
import re
def strength_check(file):
weak, med, strong = [], [], []
with open(file) as f:
for pwd in f:
p = pwd.strip()
if not p: continue
s = sum([len(p)>=12, bool(re.search(r"[A-Z]",p)),
bool(re.search(r"[a-z]",p)), bool(re.search(r"\d",p)),
bool(re.search(r"[!@#$%^&*()_+{}|:<>?]",p))])
(weak if s<3 else med if s<5 else strong).append(p)
print(f"Weak:{len(weak)} Medium:{len(med)} Strong:{len(strong)}")
βοΈ Rates password complexity from a file.
DISCUSSION
Comments & Feedback
This is a working comment section using browser storage (demo backend). To connect a real database, replace with your own service.