Repository: resciencelab/opc-skills
Low Risk
No security issues found
Skill manifest does not include a 'license' field. Specifying a license helps users understand usage terms.
Remediation Add 'license' field to SKILL.md frontmatter (e.g., MIT, Apache-2.0)
Description
Search and retrieve content from Reddit. Get posts, comments, subreddit info, and user profiles via the public JSON API. Use when user mentions Reddit, a subreddit, or r/ links.
Skill Files
# Reddit Skill Get posts, comments, subreddit info, and user profiles from Reddit via the public JSON API. ## Prerequisites **No API key required!** Reddit's public JSON API works without authentication. **Quick Check**: ```bash cd <skill_directory> python3 scripts/get_posts.py python --limit 3 ``` ## Commands All commands run from the skill directory. ### Subreddit Posts ```bash python3 scripts/get_posts.py python --limit 20 # Hot posts (default) python3 scripts/get_posts.py python --sort new --limit 20 python3 scripts/get_posts.py python --sort top --time week python3 scripts/get_posts.py python --sort top --time all --limit 10 ``` ### Search Posts ```bash python3 scripts/search_posts.py "AI agent" --limit 20 python3 scripts/search_posts.py "MCP server" --subreddit ClaudeAI --limit 10 python3 scripts/search_posts.py "async python" --sort top --time year ``` ### Subreddit Info ```bash python3 scripts/get_subreddit.py python python3 scripts/get_subreddit.py ClaudeAI ``` ### Post & Comments ```bash python3 scripts/get_post.py abc123 # Get post by ID python3 scripts/get_post.py abc123 --comments 50 # With more comments ``` ### User Profile ```bash python3 scripts/get_user.py spez python3 scripts/get_user.py spez --posts 10 # Include recent posts ``` ## Sort Options | Sort | Description | Time Options | |------|-------------|--------------| | `hot` | Trending posts (default) | - | | `new` | Latest posts | - | | `top` | Highest voted | hour, day, week, month, year, all | | `rising` | Gaining traction | - | | `controversial` | Mixed votes | hour, day, week, month, year, all | ## API Info - **Method**: Public JSON API (no auth needed) - **Trick**: Append `.json` to any Reddit URL - **Rate Limit**: 100 requests/minute - **Docs**: https://www.reddit.com/dev/api
{
"name": "reddit",
"version": "1.0.0",
"description": "Search and retrieve content from Reddit. Get posts, comments, subreddit info, and user profiles via the public JSON API.",
"author": {
"name": "ReScienceLab"
},
"homepage": "https://github.com/ReScienceLab/opc-skills/tree/main/skills/reddit",
"repository": "https://github.com/ReScienceLab/opc-skills",
"license": "MIT",
"keywords": [
"reddit",
"subreddit",
"r/"
],
"skills": [
"./SKILL.md"
],
"commands": [
"./scripts/"
]
}
#!/usr/bin/env python3
"""
Reddit API - No credentials needed for public read-only access.
Just need a proper User-Agent header.
"""
USER_AGENT = "DailyTasks-Reddit-Skill/1.0 (by /u/droid-assistant)"
def get_user_agent() -> str:
"""Get User-Agent for Reddit API requests"""
return USER_AGENT
#!/usr/bin/env python3
"""
Get a post with comments
Usage: python3 scripts/get_post.py POST_ID --comments 20
"""
import argparse
import json
from reddit_api import api_get, clean_post, print_post, print_comments_list
def main():
parser = argparse.ArgumentParser(description="Get Reddit post with comments")
parser.add_argument("post_id", help="Post ID (e.g., abc123)")
parser.add_argument("--comments", "-c", type=int, default=20, help="Max comments")
parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
args = parser.parse_args()
# Reddit returns [post_listing, comments_listing]
data = api_get(f"comments/{args.post_id}", {"limit": args.comments})
if not isinstance(data, list) or len(data) < 2:
print(f"Post not found: {args.post_id}")
return
if args.json:
print(json.dumps(data, indent=2))
return
# First element is post listing
post_listing = data[0].get("data", {}).get("children", [])
if post_listing:
post = clean_post(post_listing[0])
print_post(post)
# Second element is comments listing
comments_listing = data[1].get("data", {}).get("children", [])
if comments_listing:
print(f"---")
print_comments_list(comments_listing[:args.comments])
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get posts from a subreddit
Usage: python3 scripts/get_posts.py python --sort hot --limit 20
"""
import argparse
from reddit_api import api_get, print_posts_list, print_pagination
def main():
parser = argparse.ArgumentParser(description="Get subreddit posts")
parser.add_argument("subreddit", help="Subreddit name (without r/)")
parser.add_argument("--sort", "-s", choices=["hot", "new", "top", "rising", "controversial"],
default="hot", help="Sort method (default: hot)")
parser.add_argument("--time", "-t", choices=["hour", "day", "week", "month", "year", "all"],
help="Time filter for top/controversial")
parser.add_argument("--limit", "-l", type=int, default=25, help="Max posts (max 100)")
parser.add_argument("--after", "-a", help="Pagination cursor")
args = parser.parse_args()
path = f"r/{args.subreddit}/{args.sort}"
params = {
"limit": min(args.limit, 100),
"after": args.after,
}
if args.time and args.sort in ["top", "controversial"]:
params["t"] = args.time
data = api_get(path, params)
listing = data.get("data", {})
posts = listing.get("children", [])
label = f"r/{args.subreddit}/{args.sort}"
if args.time:
label += f"/{args.time}"
print_posts_list(posts, label)
print_pagination(listing)
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get subreddit info
Usage: python3 scripts/get_subreddit.py python
"""
import argparse
import json
from reddit_api import api_get, clean_subreddit, print_subreddit
def main():
parser = argparse.ArgumentParser(description="Get subreddit info")
parser.add_argument("subreddit", help="Subreddit name (without r/)")
parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
args = parser.parse_args()
data = api_get(f"r/{args.subreddit}/about")
if args.json:
print(json.dumps(data.get("data", data), indent=2))
return
print_subreddit(clean_subreddit(data))
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get user profile
Usage: python3 scripts/get_user.py spez --posts 10
"""
import argparse
import json
from reddit_api import api_get, clean_user, print_user, print_posts_list
def main():
parser = argparse.ArgumentParser(description="Get Reddit user profile")
parser.add_argument("username", help="Username (without u/)")
parser.add_argument("--posts", "-p", type=int, default=0, help="Include N recent posts")
parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
args = parser.parse_args()
data = api_get(f"user/{args.username}/about")
if args.json:
print(json.dumps(data.get("data", data), indent=2))
return
print_user(clean_user(data))
if args.posts > 0:
posts_data = api_get(f"user/{args.username}/submitted", {"limit": args.posts})
posts = posts_data.get("data", {}).get("children", [])
if posts:
print(f"---")
print_posts_list(posts, "recent_posts")
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Reddit public JSON API wrapper
No authentication required - just append .json to URLs
"""
import urllib.request
import urllib.parse
import json
import sys
from credential import get_user_agent
BASE_URL = "https://www.reddit.com"
def api_get(path: str, params: dict = None) -> dict:
"""Make GET request to Reddit JSON API"""
url = f"{BASE_URL}/{path}.json"
if params:
params["raw_json"] = "1" # Avoid HTML entity encoding
filtered = {k: v for k, v in params.items() if v is not None}
if filtered:
url += "?" + urllib.parse.urlencode(filtered)
else:
url += "?raw_json=1"
headers = {"User-Agent": get_user_agent()}
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
return json.loads(resp.read().decode())
except urllib.error.HTTPError as e:
if e.code == 429:
print("error: Rate limited. Wait a moment and try again.", file=sys.stderr)
elif e.code == 404:
print(f"error: Not found - {path}", file=sys.stderr)
else:
print(f"error: HTTP {e.code}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"error: {e}", file=sys.stderr)
sys.exit(1)
def format_count(n) -> str:
"""Format numbers (1234567 -> 1.2M)"""
if n is None:
return "0"
n = int(n)
if n >= 1_000_000:
return f"{n/1_000_000:.1f}M"
if n >= 1_000:
return f"{n/1_000:.1f}K"
return str(n)
def clean_post(p: dict) -> dict:
"""Clean post object from Reddit's data structure"""
data = p.get("data", p)
return {
"id": data.get("id"),
"title": data.get("title"),
"subreddit": data.get("subreddit"),
"author": data.get("author"),
"score": data.get("score"),
"upvote_ratio": data.get("upvote_ratio"),
"num_comments": data.get("num_comments"),
"url": data.get("url"),
"permalink": f"https://reddit.com{data.get('permalink', '')}",
"selftext": (data.get("selftext") or "")[:500],
"created_utc": data.get("created_utc"),
"is_self": data.get("is_self"),
"link_flair_text": data.get("link_flair_text"),
}
def clean_comment(c: dict) -> dict:
"""Clean comment object"""
data = c.get("data", c)
return {
"id": data.get("id"),
"author": data.get("author"),
"body": (data.get("body") or "")[:300],
"score": data.get("score"),
"created_utc": data.get("created_utc"),
}
def clean_subreddit(s: dict) -> dict:
"""Clean subreddit object"""
data = s.get("data", s)
return {
"name": data.get("display_name"),
"title": data.get("title"),
"description": (data.get("public_description") or "")[:200],
"subscribers": data.get("subscribers"),
"active_users": data.get("accounts_active"),
"created_utc": data.get("created_utc"),
"url": f"https://reddit.com/r/{data.get('display_name')}",
"over18": data.get("over18"),
}
def clean_user(u: dict) -> dict:
"""Clean user object"""
data = u.get("data", u)
return {
"name": data.get("name"),
"link_karma": data.get("link_karma"),
"comment_karma": data.get("comment_karma"),
"created_utc": data.get("created_utc"),
"is_mod": data.get("is_mod"),
"verified": data.get("verified"),
}
def print_post(p: dict):
"""Print post in TOON format"""
if not p:
return
print(f"id: {p.get('id', '')}")
print(f"title: {p.get('title', '')}")
print(f"subreddit: r/{p.get('subreddit', '')}")
print(f"author: u/{p.get('author', '')}")
print(f"score: {format_count(p.get('score'))} ({int((p.get('upvote_ratio') or 0) * 100)}% upvoted)")
print(f"comments: {format_count(p.get('num_comments'))}")
print(f"url: {p.get('permalink', '')}")
if p.get('link_flair_text'):
print(f"flair: {p['link_flair_text']}")
if p.get('selftext'):
print(f"---")
print(f"text: {p['selftext']}")
def print_subreddit(s: dict):
"""Print subreddit in TOON format"""
if not s:
return
print(f"name: r/{s.get('name', '')}")
print(f"title: {s.get('title', '')}")
print(f"subscribers: {format_count(s.get('subscribers'))}")
print(f"active: {format_count(s.get('active_users'))} online")
print(f"nsfw: {s.get('over18', False)}")
print(f"url: {s.get('url', '')}")
if s.get('description'):
print(f"description: {s['description']}")
def print_user(u: dict):
"""Print user in TOON format"""
if not u:
return
print(f"name: u/{u.get('name', '')}")
print(f"link_karma: {format_count(u.get('link_karma'))}")
print(f"comment_karma: {format_count(u.get('comment_karma'))}")
print(f"verified: {u.get('verified', False)}")
print(f"is_mod: {u.get('is_mod', False)}")
def print_posts_list(posts: list, label: str = "posts"):
"""Print list of posts"""
cleaned = [clean_post(p) for p in posts if p]
print(f"{label}[{len(cleaned)}]{{title,subreddit,score,comments}}:")
for p in cleaned:
title = (p['title'] or '')[:60]
print(f" {title},r/{p['subreddit']},{format_count(p['score'])},{format_count(p['num_comments'])}")
def print_comments_list(comments: list, label: str = "comments"):
"""Print list of comments"""
cleaned = [clean_comment(c) for c in comments if c.get("kind") == "t1"]
print(f"{label}[{len(cleaned)}]{{author,body,score}}:")
for c in cleaned:
body = (c['body'] or '')[:60].replace('\n', ' ')
print(f" u/{c['author']},{body},{c['score']}")
def print_pagination(data: dict):
"""Print pagination info"""
after = data.get("after")
if after:
print(f"---")
print(f"has_next_page: True")
print(f"next_cursor: {after}")
#!/usr/bin/env python3
"""
Search posts on Reddit
Usage: python3 scripts/search_posts.py "AI agent" --subreddit ClaudeAI --limit 20
"""
import argparse
from reddit_api import api_get, print_posts_list, print_pagination
def main():
parser = argparse.ArgumentParser(description="Search Reddit posts")
parser.add_argument("query", help="Search query")
parser.add_argument("--subreddit", "-r", help="Limit to subreddit")
parser.add_argument("--sort", "-s", choices=["relevance", "hot", "top", "new", "comments"],
default="relevance", help="Sort method")
parser.add_argument("--time", "-t", choices=["hour", "day", "week", "month", "year", "all"],
default="all", help="Time filter")
parser.add_argument("--limit", "-l", type=int, default=25, help="Max posts")
parser.add_argument("--after", "-a", help="Pagination cursor")
args = parser.parse_args()
if args.subreddit:
path = f"r/{args.subreddit}/search"
params = {
"q": args.query,
"restrict_sr": "1",
"sort": args.sort,
"t": args.time,
"limit": min(args.limit, 100),
"after": args.after,
}
else:
path = "search"
params = {
"q": args.query,
"sort": args.sort,
"t": args.time,
"limit": min(args.limit, 100),
"after": args.after,
}
data = api_get(path, params)
listing = data.get("data", {})
posts = listing.get("children", [])
label = f"search({args.query})"
if args.subreddit:
label = f"r/{args.subreddit}/search({args.query})"
print(f"query: {args.query}")
print(f"sort: {args.sort}, time: {args.time}")
print_posts_list(posts, label)
print_pagination(listing)
if __name__ == "__main__":
main()