123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178 |
- #!/usr/bin/env python
- # coding: utf-8
- import pandas as pd
- import numpy as np
- from matplotlib import pyplot as plt
- import matplotlib
- from matplotlib import pyplot as plt
- matplotlib.use("pgf")
- matplotlib.rcParams.update(
- {
- "pgf.texsystem": "xelatex",
- "font.family": "serif",
- "font.serif": "Times New Roman",
- "text.usetex": True,
- "pgf.rcfonts": False,
- }
- )
- plt.rcParams["text.latex.preamble"].join([
- r"\usepackage{amsmath}",
- r"\setmainfont{amssymb}",
- ])
- import seaborn as sns
- import pickle
- from os.path import join as opj
- import argparse
- parser = argparse.ArgumentParser()
- parser.add_argument("--input")
- parser.add_argument("--dataset", default="inspire-harvest/database")
- parser.add_argument("--keywords-threshold", type=int, default=200)
- parser.add_argument("--articles-threshold", type=int, default=5)
- parser.add_argument("--early-periods", nargs="+", type=int, default=[0,1]) # [2,3] for ACL, [3] for HEP
- parser.add_argument("--late-periods", nargs="+", type=int, default=[3]) # [2,3] for ACL, [3] for HEP
- parser.add_argument("--fla", action="store_true", help="first or last author")
- args = parser.parse_args()
- custom_range = "_" + "-".join(map(str, args.early_periods)) + "_" + "-".join(map(str, args.late_periods)) if (args.early_periods!=[0,1] or args.late_periods!=[3]) else ""
- print(custom_range)
- references = pd.read_parquet(opj(args.dataset, "articles_references.parquet"))
- references["cites"] = references.cites.astype(int)
- references["cited"] = references.cited.astype(int)
- topics = pd.read_csv(opj(args.input, "topics.csv"))["label"].tolist()
- topic_matrix = np.load(opj(args.input, "topics_counts.npy"))
- articles = pd.read_parquet(opj(args.dataset, "articles.parquet"))[["article_id", "date_created", "title", "accelerators"]]
- articles["article_id"] = articles.article_id.astype(int)
- experimental = articles[articles["accelerators"].map(len)>=1]
- experimental = experimental.explode("accelerators")
- experimental["accelerators"] = experimental["accelerators"].str.replace(
- "(.*)-(.*)-(.*)$", r"\1-\2", regex=True
- )
- types = {
- "FNAL-E": "colliders",
- "CERN-LEP": "colliders",
- "DESY-HERA": "colliders",
- "SUPER-KAMIOKANDE": "astro. neutrinos",
- "CERN-NA": "colliders",
- "CESR-CLEO": "colliders",
- "CERN-WA": "colliders",
- "BNL-E": "colliders",
- "KAMIOKANDE": "astro. neutrinos",
- "SLAC-E": "colliders",
- "SLAC-PEP2": "colliders",
- "KEK-BF": "colliders",
- "SNO": "neutrinos",
- "BNL-RHIC": "colliders",
- "WMAP": "cosmic $\\mu$wave background",
- "CERN-LHC": "colliders",
- "PLANCK": "cosmic $\\mu$wave background",
- "BEPC-BES": "colliders",
- "LIGO": "gravitational waves",
- "VIRGO": "gravitational waves",
- "CERN-PS": "colliders",
- "FERMI-LAT": "other cosmic sources",
- "XENON100": "dark matter (direct)",
- "ICECUBE": "astro. neutrinos",
- "LUX": "dark matter (direct)",
- "T2K": "neutrinos",
- "BICEP2": "cosmic $\\mu$wave background",
- "CDMS": "dark matter (direct)",
- "LAMPF-1173": "neutrinos",
- "FRASCATI-DAFNE": "colliders",
- "KamLAND": "neutrinos",
- "SDSS": "other cosmic sources",
- "JLAB-E-89": "colliders",
- "CHOOZ": "neutrinos",
- "XENON1T": "dark matter (direct)",
- "SCP": "supernovae",
- "DAYA-BAY": "neutrinos",
- "HOMESTAKE-CHLORINE": "neutrinos",
- "HIGH-Z": "supernovae",
- "K2K": "neutrinos",
- "MACRO": "other cosmic sources",
- "GALLEX": "neutrinos",
- "SAGE": "neutrinos",
- "PAMELA": "other cosmic sources",
- "CERN-UA": "colliders",
- "CERN SPS": "colliders",
- "DESY-PETRA": "colliders",
- "SLAC-SLC": "colliders",
- "LEPS": "colliders",
- "DOUBLECHOOZ": "neutrinos",
- "AUGER": "other cosmic sources",
- "AMS": "other cosmic sources",
- "DAMA": "dark matter (direct)",
- "DESY-DORIS": "colliders",
- "NOVOSIBIRSK-CMD": "colliders",
- "IMB": "neutrinos",
- "RENO": "neutrinos",
- "SLAC-SP": "colliders"
- }
- experimental = experimental[experimental["accelerators"].isin(types.keys())]
- experimental["type"] = experimental["accelerators"].map(types)
- articles = articles[articles["date_created"].str.len() >= 4]
- articles["year"] = articles["date_created"].str[:4].astype(int)
- _articles = pd.read_csv(opj(args.input, "articles.csv"))
- articles = _articles.merge(articles, how="left")
- print(topic_matrix.shape)
- topic_matrix = topic_matrix/np.where(topic_matrix.sum(axis=1)>0, topic_matrix.sum(axis=1), 1)[:,np.newaxis]
- articles["topics"] = list(topic_matrix)
- articles["main_topic"] = topic_matrix.argmax(axis=1)
- articles["main_topic"] = articles["main_topic"].map(lambda x: topics[x])
- citing_experiments = articles.merge(references, how="inner", left_on="article_id", right_on="cites")
- citing_experiments = citing_experiments.merge(experimental, how="inner", left_on="cited", right_on="article_id")
- counts = citing_experiments.groupby("type")["main_topic"].value_counts(normalize=True).reset_index()
- counts = counts.pivot(index="type", columns="main_topic")
- print(counts)
- fig, ax = plt.subplots()
- sns.heatmap(counts, ax=ax)
- fig.savefig(opj(args.input, "topic_experiments.eps"), bbox_inches="tight")
- articles = articles[articles["year"]<2020]
- popularity = articles.groupby("year").agg(
- topics=("topics", lambda x: list(np.mean(x, axis=0)))
- ).sort_index()
- fig, ax = plt.subplots()
- colors = [
- '#377eb8', '#ff7f00', '#4daf4a',
- '#f781bf', '#a65628', '#984ea3',
- '#999999', '#e41a1c', '#dede00'
- ]
- top = np.stack(popularity["topics"]).max(axis=0)
- top = np.argpartition(top, -12)[-12:]
- i = 0
- for t in top:
- print(t)
- ls = "dashed" if i//len(colors)>=1 else None
- ax.plot(popularity.index, popularity["topics"].map(lambda x: x[t]), label=topics[t], color=colors[i%9], ls=ls)
- ax.scatter(popularity.index, popularity["topics"].map(lambda x: x[t]), color=colors[i%9])
- i += 1
- ax.set_xticks(np.arange(2002, 2020, 2), np.arange(2002, 2020, 2))
- fig.legend(ncols=3, bbox_to_anchor=(0,1.1), loc="upper left", bbox_transform=fig.transFigure)
- fig.savefig(opj(args.input, "topic_popularity.pdf"), bbox_inches="tight")
|