producthunt
PublicRepository: resciencelab/opc-skills
Low Risk
No security issues found
Skill manifest does not include a 'license' field. Specifying a license helps users understand usage terms.
Remediation Add 'license' field to SKILL.md frontmatter (e.g., MIT, Apache-2.0)
Description
Search and retrieve content from Product Hunt. Get posts, topics, users, and collections via the GraphQL API. Use when user mentions Product Hunt, PH, or product launches.
Skill Files
# ProductHunt Skill Get posts, topics, users, and collections from Product Hunt via the official GraphQL API. ## Prerequisites Set access token in `~/.zshrc`: ```bash export PRODUCTHUNT_ACCESS_TOKEN="your_developer_token" ``` Get your token from: https://www.producthunt.com/v2/oauth/applications **Quick Check**: ```bash cd <skill_directory> python3 scripts/get_posts.py --limit 3 ``` ## Commands All commands run from the skill directory. ### Posts ```bash python3 scripts/get_post.py chatgpt # Get post by slug python3 scripts/get_post.py 12345 # Get post by ID python3 scripts/get_posts.py --limit 20 # Today's featured posts python3 scripts/get_posts.py --topic ai --limit 10 # Posts in topic python3 scripts/get_posts.py --after 2026-01-01 # Posts after date python3 scripts/get_post_comments.py POST_ID --limit 20 ``` ### Topics ```bash python3 scripts/get_topic.py artificial-intelligence # Get topic by slug python3 scripts/get_topics.py --query "AI" --limit 20 # Search topics python3 scripts/get_topics.py --limit 50 # Popular topics ``` ### Users ```bash python3 scripts/get_user.py rrhoover # Get user by username python3 scripts/get_user_posts.py rrhoover --limit 20 # User's posts ``` ### Collections ```bash python3 scripts/get_collection.py SLUG_OR_ID # Get collection python3 scripts/get_collections.py --featured --limit 20 ``` ## API Info - **Endpoint**: https://api.producthunt.com/v2/api/graphql - **Type**: GraphQL - **Rate Limits**: 6250 complexity points / 15 min - **Docs**: https://api.producthunt.com/v2/docs
{
"name": "producthunt",
"version": "1.0.0",
"description": "Search and retrieve content from Product Hunt. Get posts, topics, users, and collections via the GraphQL API.",
"author": {
"name": "ReScienceLab"
},
"homepage": "https://github.com/ReScienceLab/opc-skills/tree/main/skills/producthunt",
"repository": "https://github.com/ReScienceLab/opc-skills",
"license": "MIT",
"keywords": [
"producthunt",
"product hunt",
"PH",
"launch"
],
"skills": [
"./SKILL.md"
],
"commands": [
"./scripts/"
]
}
#!/usr/bin/env python3
"""
ProductHunt API credential management.
Reads from environment: PRODUCTHUNT_ACCESS_TOKEN
"""
import os
def get_access_token() -> str | None:
"""Get ProductHunt access token"""
return os.environ.get("PRODUCTHUNT_ACCESS_TOKEN")
def has_token() -> bool:
"""Check if access token is available"""
return get_access_token() is not None
#!/usr/bin/env python3
"""
Get collection by ID or slug
Usage: python3 scripts/get_collection.py COLLECTION_SLUG
"""
import argparse
import json
from producthunt_api import graphql, clean_collection, format_count
QUERY = """
query GetCollection($id: ID, $slug: String) {
collection(id: $id, slug: $slug) {
id
name
tagline
description
url
followersCount
featuredAt
createdAt
user { name username }
posts(first: 10) {
totalCount
edges {
node {
id
name
tagline
votesCount
}
}
}
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get ProductHunt collection")
parser.add_argument("identifier", help="Collection ID or slug")
parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
args = parser.parse_args()
variables = {}
if args.identifier.isdigit():
variables["id"] = args.identifier
else:
variables["slug"] = args.identifier
data = graphql(QUERY, variables)
collection = data.get("collection")
if not collection:
print(f"Collection not found: {args.identifier}")
return
if args.json:
print(json.dumps(collection, indent=2))
return
print(f"id: {collection.get('id')}")
print(f"name: {collection.get('name')}")
print(f"tagline: {collection.get('tagline')}")
print(f"followers: {format_count(collection.get('followersCount'))}")
print(f"url: {collection.get('url')}")
user = collection.get("user", {})
if user:
print(f"creator: @{user.get('username')} ({user.get('name')})")
if collection.get("description"):
print(f"description: {collection['description'][:200]}")
posts_data = collection.get("posts", {})
posts = [e["node"] for e in posts_data.get("edges", [])]
if posts:
print(f"---")
print(f"posts[{posts_data.get('totalCount', len(posts))}]{{name,votes}}:")
for p in posts:
print(f" {p['name']},{format_count(p['votesCount'])}")
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get collections with filters
Usage: python3 scripts/get_collections.py --featured --limit 20
"""
import argparse
from producthunt_api import graphql, clean_collection, format_count, print_pagination
QUERY = """
query GetCollections($first: Int, $after: String, $featured: Boolean, $userId: ID) {
collections(first: $first, after: $after, featured: $featured, userId: $userId, order: FOLLOWERS_COUNT) {
totalCount
pageInfo { hasNextPage endCursor }
edges {
node {
id
name
tagline
url
followersCount
featuredAt
}
}
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get ProductHunt collections")
parser.add_argument("--limit", "-l", type=int, default=20, help="Max collections")
parser.add_argument("--featured", "-f", action="store_true", help="Featured collections only")
parser.add_argument("--user", "-u", help="Filter by user ID")
parser.add_argument("--cursor", "-c", help="Pagination cursor")
args = parser.parse_args()
variables = {
"first": min(args.limit, 50),
"after": args.cursor,
"featured": True if args.featured else None,
"userId": args.user,
}
data = graphql(QUERY, variables)
collections_data = data.get("collections", {})
edges = collections_data.get("edges", [])
collections = [e["node"] for e in edges]
filters = []
if args.featured:
filters.append("featured")
if args.user:
filters.append(f"user:{args.user}")
label = f"collections({','.join(filters)})" if filters else "collections"
print(f"{label}[{len(collections)}]{{name,tagline,followers}}:")
for c in collections:
tagline = (c.get('tagline') or '')[:40]
print(f" {c['name']},{tagline},{format_count(c['followersCount'])}")
print_pagination(collections_data.get("pageInfo"), collections_data.get("totalCount"))
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get post by ID or slug
Usage: python3 scripts/get_post.py POST_ID_OR_SLUG
"""
import argparse
import json
from producthunt_api import graphql, clean_post, print_post
QUERY = """
query GetPost($id: ID, $slug: String) {
post(id: $id, slug: $slug) {
id
name
tagline
slug
description
votesCount
commentsCount
url
website
featuredAt
createdAt
makers { name username }
topics(first: 5) { edges { node { name slug } } }
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get ProductHunt post")
parser.add_argument("identifier", help="Post ID or slug")
parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
args = parser.parse_args()
variables = {}
if args.identifier.isdigit():
variables["id"] = args.identifier
else:
variables["slug"] = args.identifier
data = graphql(QUERY, variables)
post = data.get("post")
if not post:
print(f"Post not found: {args.identifier}")
return
if args.json:
print(json.dumps(post, indent=2))
return
cleaned = clean_post(post)
print_post(cleaned)
if post.get("description"):
print(f"---")
desc = post["description"][:500]
print(f"description: {desc}")
topics = post.get("topics", {}).get("edges", [])
if topics:
topic_names = [e["node"]["slug"] for e in topics]
print(f"topics: {', '.join(topic_names)}")
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get comments on a post
Usage: python3 scripts/get_post_comments.py POST_ID --limit 20
"""
import argparse
from producthunt_api import graphql, print_comments_list, print_pagination
QUERY = """
query GetPostComments($id: ID, $slug: String, $first: Int, $after: String) {
post(id: $id, slug: $slug) {
id
name
commentsCount
comments(first: $first, after: $after) {
totalCount
pageInfo { hasNextPage endCursor }
edges {
node {
id
body
votesCount
createdAt
user { name username }
}
}
}
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get post comments")
parser.add_argument("identifier", help="Post ID or slug")
parser.add_argument("--limit", "-l", type=int, default=20, help="Max comments")
parser.add_argument("--cursor", "-c", help="Pagination cursor")
args = parser.parse_args()
variables = {"first": min(args.limit, 50), "after": args.cursor}
if args.identifier.isdigit():
variables["id"] = args.identifier
else:
variables["slug"] = args.identifier
data = graphql(QUERY, variables)
post = data.get("post")
if not post:
print(f"Post not found: {args.identifier}")
return
print(f"post: {post.get('name')} (id:{post.get('id')})")
print(f"total_comments: {post.get('commentsCount')}")
comments_data = post.get("comments", {})
edges = comments_data.get("edges", [])
comments = [e["node"] for e in edges]
print_comments_list(comments)
print_pagination(comments_data.get("pageInfo"))
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get posts with filters
Usage: python3 scripts/get_posts.py --featured --limit 20
python3 scripts/get_posts.py --topic ai --limit 10
"""
import argparse
from datetime import datetime, timezone
from producthunt_api import graphql, print_posts_list, print_pagination
QUERY = """
query GetPosts($first: Int, $after: String, $featured: Boolean, $topic: String, $postedAfter: DateTime, $postedBefore: DateTime) {
posts(first: $first, after: $after, featured: $featured, topic: $topic, postedAfter: $postedAfter, postedBefore: $postedBefore) {
totalCount
pageInfo { hasNextPage endCursor }
edges {
node {
id
name
tagline
slug
votesCount
commentsCount
url
website
featuredAt
}
}
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get ProductHunt posts")
parser.add_argument("--limit", "-l", type=int, default=20, help="Max posts")
parser.add_argument("--featured", "-f", action="store_true", help="Featured posts only")
parser.add_argument("--topic", "-t", help="Filter by topic slug")
parser.add_argument("--after", help="Posts after date (YYYY-MM-DD)")
parser.add_argument("--before", help="Posts before date (YYYY-MM-DD)")
parser.add_argument("--cursor", "-c", help="Pagination cursor")
args = parser.parse_args()
variables = {
"first": min(args.limit, 50),
"after": args.cursor,
"featured": True if args.featured else None,
"topic": args.topic,
}
if args.after:
variables["postedAfter"] = f"{args.after}T00:00:00Z"
if args.before:
variables["postedBefore"] = f"{args.before}T23:59:59Z"
data = graphql(QUERY, variables)
posts_data = data.get("posts", {})
edges = posts_data.get("edges", [])
posts = [e["node"] for e in edges]
filters = []
if args.featured:
filters.append("featured")
if args.topic:
filters.append(f"topic:{args.topic}")
if args.after:
filters.append(f"after:{args.after}")
label = f"posts({','.join(filters)})" if filters else "posts"
print_posts_list(posts, label)
print_pagination(posts_data.get("pageInfo"), posts_data.get("totalCount"))
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get topic by ID or slug
Usage: python3 scripts/get_topic.py artificial-intelligence
"""
import argparse
import json
from producthunt_api import graphql, clean_topic, print_topic
QUERY = """
query GetTopic($id: ID, $slug: String) {
topic(id: $id, slug: $slug) {
id
name
slug
description
postsCount
followersCount
url
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get ProductHunt topic")
parser.add_argument("identifier", help="Topic ID or slug")
parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
args = parser.parse_args()
variables = {}
if args.identifier.isdigit():
variables["id"] = args.identifier
else:
variables["slug"] = args.identifier
data = graphql(QUERY, variables)
topic = data.get("topic")
if not topic:
print(f"Topic not found: {args.identifier}")
return
if args.json:
print(json.dumps(topic, indent=2))
return
print_topic(clean_topic(topic))
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get topics with optional search
Usage: python3 scripts/get_topics.py --query "AI" --limit 20
"""
import argparse
from producthunt_api import graphql, print_topics_list, print_pagination
QUERY = """
query GetTopics($first: Int, $after: String, $query: String) {
topics(first: $first, after: $after, query: $query, order: FOLLOWERS_COUNT) {
totalCount
pageInfo { hasNextPage endCursor }
edges {
node {
id
name
slug
description
postsCount
followersCount
}
}
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get ProductHunt topics")
parser.add_argument("--query", "-q", help="Search query")
parser.add_argument("--limit", "-l", type=int, default=20, help="Max topics")
parser.add_argument("--cursor", "-c", help="Pagination cursor")
args = parser.parse_args()
variables = {
"first": min(args.limit, 50),
"after": args.cursor,
"query": args.query,
}
data = graphql(QUERY, variables)
topics_data = data.get("topics", {})
edges = topics_data.get("edges", [])
topics = [e["node"] for e in edges]
label = f"topics(query:{args.query})" if args.query else "topics"
print_topics_list(topics, label)
print_pagination(topics_data.get("pageInfo"), topics_data.get("totalCount"))
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get user by username or ID
Usage: python3 scripts/get_user.py rrhoover
"""
import argparse
import json
from producthunt_api import graphql, clean_user, print_user
QUERY = """
query GetUser($id: ID, $username: String) {
user(id: $id, username: $username) {
id
name
username
headline
url
twitterUsername
websiteUrl
isMaker
createdAt
profileImage
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get ProductHunt user")
parser.add_argument("identifier", help="Username or user ID")
parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
args = parser.parse_args()
variables = {}
if args.identifier.isdigit():
variables["id"] = args.identifier
else:
variables["username"] = args.identifier
data = graphql(QUERY, variables)
user = data.get("user")
if not user:
print(f"User not found: {args.identifier}")
return
if args.json:
print(json.dumps(user, indent=2))
return
print_user(clean_user(user))
if user.get("createdAt"):
print(f"joined: {user['createdAt']}")
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Get user's posts (submitted or made)
Usage: python3 scripts/get_user_posts.py rrhoover --limit 20
"""
import argparse
from producthunt_api import graphql, print_posts_list, print_pagination
QUERY = """
query GetUserPosts($id: ID, $username: String, $first: Int, $after: String) {
user(id: $id, username: $username) {
id
name
username
submittedPosts(first: $first, after: $after) {
totalCount
pageInfo { hasNextPage endCursor }
edges {
node {
id
name
tagline
slug
votesCount
commentsCount
url
featuredAt
}
}
}
madePosts(first: $first, after: $after) {
totalCount
pageInfo { hasNextPage endCursor }
edges {
node {
id
name
tagline
slug
votesCount
commentsCount
url
featuredAt
}
}
}
}
}
"""
def main():
parser = argparse.ArgumentParser(description="Get user's posts")
parser.add_argument("identifier", help="Username or user ID")
parser.add_argument("--limit", "-l", type=int, default=20, help="Max posts")
parser.add_argument("--made", "-m", action="store_true", help="Show made posts instead of submitted")
parser.add_argument("--cursor", "-c", help="Pagination cursor")
args = parser.parse_args()
variables = {"first": min(args.limit, 50), "after": args.cursor}
if args.identifier.isdigit():
variables["id"] = args.identifier
else:
variables["username"] = args.identifier
data = graphql(QUERY, variables)
user = data.get("user")
if not user:
print(f"User not found: {args.identifier}")
return
print(f"user: @{user.get('username')} ({user.get('name')})")
if args.made:
posts_data = user.get("madePosts", {})
label = "made_posts"
else:
posts_data = user.get("submittedPosts", {})
label = "submitted_posts"
edges = posts_data.get("edges", [])
posts = [e["node"] for e in edges]
print_posts_list(posts, label)
print_pagination(posts_data.get("pageInfo"), posts_data.get("totalCount"))
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
ProductHunt GraphQL API wrapper
"""
import urllib.request
import json
import sys
from credential import get_access_token
API_URL = "https://api.producthunt.com/v2/api/graphql"
def graphql(query: str, variables: dict = None) -> dict:
"""Execute GraphQL query"""
token = get_access_token()
if not token:
print("error: PRODUCTHUNT_ACCESS_TOKEN not set", file=sys.stderr)
sys.exit(1)
body = json.dumps({"query": query, "variables": variables or {}}).encode()
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
}
req = urllib.request.Request(API_URL, data=body, headers=headers, method="POST")
try:
with urllib.request.urlopen(req, timeout=30) as resp:
data = json.loads(resp.read().decode())
if "errors" in data:
print(f"error: {data['errors'][0]['message']}", file=sys.stderr)
sys.exit(1)
return data.get("data", {})
except urllib.error.HTTPError as e:
error_body = e.read().decode()
print(f"error: HTTP {e.code} - {error_body}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"error: {e}", file=sys.stderr)
sys.exit(1)
def format_count(n) -> str:
"""Format numbers (1234567 -> 1.2M)"""
if n is None:
return "0"
n = int(n)
if n >= 1_000_000:
return f"{n/1_000_000:.1f}M"
if n >= 1_000:
return f"{n/1_000:.1f}K"
return str(n)
def clean_post(p: dict) -> dict:
"""Clean post object"""
if not p:
return None
return {
"id": p.get("id"),
"name": p.get("name"),
"tagline": p.get("tagline"),
"slug": p.get("slug"),
"votes": p.get("votesCount"),
"comments": p.get("commentsCount"),
"url": p.get("url"),
"website": p.get("website"),
"featured_at": p.get("featuredAt"),
"created_at": p.get("createdAt"),
"makers": [m.get("name") for m in (p.get("makers") or [])],
}
def clean_user(u: dict) -> dict:
"""Clean user object"""
if not u:
return None
return {
"id": u.get("id"),
"name": u.get("name"),
"username": u.get("username"),
"headline": u.get("headline"),
"url": u.get("url"),
"twitter": u.get("twitterUsername"),
"website": u.get("websiteUrl"),
"is_maker": u.get("isMaker"),
}
def clean_topic(t: dict) -> dict:
"""Clean topic object"""
if not t:
return None
return {
"id": t.get("id"),
"name": t.get("name"),
"slug": t.get("slug"),
"description": (t.get("description") or "")[:150],
"posts_count": t.get("postsCount"),
"followers_count": t.get("followersCount"),
"url": t.get("url"),
}
def clean_collection(c: dict) -> dict:
"""Clean collection object"""
if not c:
return None
return {
"id": c.get("id"),
"name": c.get("name"),
"tagline": c.get("tagline"),
"url": c.get("url"),
"followers": c.get("followersCount"),
"featured_at": c.get("featuredAt"),
}
def clean_comment(c: dict) -> dict:
"""Clean comment object"""
if not c:
return None
user = c.get("user", {})
return {
"id": c.get("id"),
"body": c.get("body"),
"author": user.get("username") if user else None,
"author_name": user.get("name") if user else None,
"votes": c.get("votesCount"),
"created_at": c.get("createdAt"),
}
def print_post(p: dict):
"""Print post in TOON format"""
if not p:
return
print(f"id: {p.get('id', '')}")
print(f"name: {p.get('name', '')}")
print(f"tagline: {p.get('tagline', '')}")
print(f"votes: {format_count(p.get('votes'))}")
print(f"comments: {format_count(p.get('comments'))}")
print(f"url: {p.get('url', '')}")
if p.get('website'):
print(f"website: {p['website']}")
if p.get('makers'):
print(f"makers: {', '.join(p['makers'])}")
if p.get('featured_at'):
print(f"featured: {p['featured_at']}")
def print_user(u: dict):
"""Print user in TOON format"""
if not u:
return
print(f"id: {u.get('id', '')}")
print(f"username: @{u.get('username', '')}")
print(f"name: {u.get('name', '')}")
if u.get('headline'):
print(f"headline: {u['headline']}")
print(f"maker: {u.get('is_maker', False)}")
print(f"url: {u.get('url', '')}")
if u.get('twitter'):
print(f"twitter: @{u['twitter']}")
if u.get('website'):
print(f"website: {u['website']}")
def print_topic(t: dict):
"""Print topic in TOON format"""
if not t:
return
print(f"id: {t.get('id', '')}")
print(f"name: {t.get('name', '')}")
print(f"slug: {t.get('slug', '')}")
print(f"posts: {format_count(t.get('posts_count'))}")
print(f"followers: {format_count(t.get('followers_count'))}")
if t.get('description'):
print(f"description: {t['description']}")
if t.get('url'):
print(f"url: {t['url']}")
def print_posts_list(posts: list, label: str = "posts"):
"""Print list of posts"""
cleaned = [clean_post(p) for p in posts if p]
print(f"{label}[{len(cleaned)}]{{name,tagline,votes}}:")
for p in cleaned:
tagline = (p['tagline'] or '')[:50]
print(f" {p['name']},{tagline},{format_count(p['votes'])}")
def print_topics_list(topics: list, label: str = "topics"):
"""Print list of topics"""
cleaned = [clean_topic(t) for t in topics if t]
print(f"{label}[{len(cleaned)}]{{name,slug,posts}}:")
for t in cleaned:
print(f" {t['name']},{t['slug']},{format_count(t['posts_count'])}")
def print_comments_list(comments: list, label: str = "comments"):
"""Print list of comments"""
cleaned = [clean_comment(c) for c in comments if c]
print(f"{label}[{len(cleaned)}]{{author,body,votes}}:")
for c in cleaned:
body = (c['body'] or '')[:60].replace('\n', ' ')
print(f" @{c['author']},{body},{c['votes']}")
def print_pagination(page_info: dict, total: int = None):
"""Print pagination info"""
if not page_info:
return
has_next = page_info.get("hasNextPage", False)
cursor = page_info.get("endCursor", "")
if total is not None:
print(f"---")
print(f"total: {total}")
if has_next and cursor:
if total is None:
print(f"---")
print(f"has_next_page: {has_next}")
print(f"next_cursor: {cursor}")