Created
October 14, 2022 03:32
-
-
Save andycasey/a588a2523af3c9369d9a5bbb8a133d63 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# TODO: Library.get(X) should resolve to Library.get(id=X) | |
# Make & (Document.date > (datetime.now() - timedelta(days=365.25 * 5)) work | |
# Make document.year > 2017 work | |
# Make Document.in_(library) and Library.contains(document) work using docs() function | |
# Remove default behaviour of .limit(10) | |
# Document.full() should be searchable, and Document.full == '' SHOULD NOT WORK | |
""" | |
Give me: | |
- highly cited people; | |
- who publish in astrophysics; | |
- who haven't published with me in the last 5 years; | |
- (optionally) who publish in stellar spectroscopy. | |
""" | |
import ads | |
import numpy as np | |
from scipy.stats import pareto | |
from ads import Document, Affiliation, Library | |
from tqdm import tqdm | |
from datetime import datetime, timedelta | |
from peewee import fn | |
# Score people by authorship position and citation count. | |
def scores(document): | |
x = 1 + np.arange(len(document.author)) | |
# Use uniform distribution if it's alphabetical from second-author onwards??? | |
# (e.g., to allow for X Collaboration, AAAA, BBBB) | |
# otherwise, use a pareto distribution. | |
pdf = pareto(1).pdf(x) | |
return (document.citation_count * pdf) / np.sum(pdf) | |
# First build a library of names of people who I have published with in the last N years. | |
q = ( | |
Document.select() | |
.where( | |
(Document.property == "refereed") | |
& (Document.year.between(2017, 2023)) | |
& fn.docs("library/UM9dgiNHTkq4xp7588l7pg") | |
) | |
.limit(1_000) | |
) | |
network = {} | |
with tqdm() as pb: | |
for doc in q: | |
for author_norm, author in zip(doc.author_norm, doc.author): | |
network.setdefault(author_norm, []) | |
network[author_norm].append(author) | |
pb.update() | |
print(f"Found {len(network)} unique authors.") | |
# Now let's find highly cited people. | |
limit = 10_000 | |
q = ( | |
Document.select() | |
.where( | |
(Document.property == "refereed") | |
& (Document.database == "astronomy") | |
& ( | |
(Document.full == "stellar spectroscopy") | |
| (Document.full == "data analysis") | |
) | |
) | |
.order_by(Document.citation_count.desc()) | |
.limit(limit) | |
) | |
candidate_scores, candidate_names, candidate_documents = ({}, {}, {}) | |
for document in tqdm(q, total=limit): | |
for author, full_name, score in zip(document.author_norm, document.author, scores(document)): | |
if author not in network: | |
candidate_scores.setdefault(author, 0) | |
candidate_documents.setdefault(author, []) | |
candidate_names.setdefault(author, []) | |
candidate_scores[author] += score | |
candidate_documents[author].append(document) | |
candidate_names[author].append(full_name) | |
# Now sort by score. | |
ranked_candidates = dict(sorted(candidate_scores.items(), key=lambda item: item[1], reverse=True)) | |
for i, (author, score) in enumerate(ranked_candidates.items(), start=1): | |
print(f"{i}: {author} ({set(candidate_names[author])}) {score:.0f}") | |
if i >= 1000: break |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment