comparative_analysis.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. from cProfile import label
  2. import numpy as np
  3. import pandas as pd
  4. from scipy.stats import entropy
  5. import ot
  6. from sklearn.linear_model import LinearRegression
  7. from matplotlib import pyplot as plt
  8. import matplotlib
  9. matplotlib.use("pgf")
  10. matplotlib.rcParams.update(
  11. {
  12. "pgf.texsystem": "xelatex",
  13. "font.family": "serif",
  14. "font.serif": "Times New Roman",
  15. "text.usetex": True,
  16. "pgf.rcfonts": False,
  17. }
  18. )
  19. plt.rcParams["text.latex.preamble"].join([
  20. r"\usepackage{amsmath}",
  21. r"\setmainfont{amssymb}",
  22. ])
  23. from textwrap import wrap
  24. import argparse
  25. from os.path import join as opj, exists
  26. import pickle
  27. from cmdstanpy import CmdStanModel
  28. parser = argparse.ArgumentParser()
  29. parser.add_argument("--input")
  30. parser.add_argument("--dataset", default="inspire-harvest/database")
  31. parser.add_argument("--suffix", default=None)
  32. parser.add_argument("--portfolios", default=None)
  33. parser.add_argument("--output", default="")
  34. parser.add_argument("--metric", default="change", choices=["change", "disruption", "diversification", "diversification_stirling", "entered", "exited", "exited_total_power_effect"])
  35. parser.add_argument("--diversity", default="entropy", choices=["entropy", "stirling"])
  36. parser.add_argument("--power", choices=["magnitude", "brokerage"], default="magnitude")
  37. parser.add_argument("--model", default="", choices=["", "bare"])
  38. parser.add_argument("--compact", action="store_true", default=False)
  39. parser.add_argument("--fla", action="store_true", default=False)
  40. parser.add_argument("--early-period", type=str, default="2000-2009")
  41. parser.add_argument("--end", type=int, default=2019)
  42. args = parser.parse_args()
  43. early_period = list(map(int, args.early_period.split("-")))
  44. fla = "_fla" if args.fla else ""
  45. def age():
  46. if not exists(opj(args.input, f"age_{args.early_period}.csv")):
  47. articles = pd.read_parquet(opj(args.dataset, "articles.parquet"))[["article_id", "date_created", "pacs_codes", "curated", "accelerators"]]
  48. articles["article_id"] = articles.article_id.astype(int)
  49. articles = articles[articles["date_created"].str.len() >= 4]
  50. articles["year"] = articles["date_created"].str[:4].astype(int)
  51. articles["age"] = early_period[1]+1-articles["date_created"].str[:4].astype(int)
  52. age = articles[["article_id", "age"]].copy()
  53. articles = articles[(articles["year"]>=early_period[0])&(articles["year"]<early_period[1]+1)]
  54. _articles = pd.read_csv(opj(args.input, "articles.csv"))
  55. articles = _articles.merge(articles, how="inner")
  56. authors = pd.read_parquet(opj(args.dataset, "articles_authors.parquet"))
  57. authors["article_id"] = authors.article_id.astype(int)
  58. age = age.merge(authors, how="inner", left_on="article_id", right_on="article_id")
  59. age = age.groupby("bai").agg(age=("age", "max")).reset_index()
  60. age.to_csv(opj(args.input, f"age_{args.early_period}.csv"))
  61. else:
  62. age = pd.read_csv(opj(args.input, f"age_{args.early_period}.csv"))
  63. return age
  64. def institution_stability():
  65. if exists(opj(args.input, f"institutional_stability_{args.early_period}.csv")):
  66. return pd.read_csv(opj(args.input, f"institutional_stability_{args.early_period}.csv"), index_col="bai")
  67. affiliations = pd.read_parquet(opj(args.dataset, "affiliations.parquet"))
  68. affiliations["article_id"] = affiliations.article_id.astype(int)
  69. articles = pd.read_parquet(opj(args.dataset, "articles.parquet"))[["article_id", "date_created"]]
  70. articles = articles[articles["date_created"].str.len() >= 4]
  71. articles["year"] = articles["date_created"].str[:4].astype(int)
  72. articles["article_id"] = articles.article_id.astype(int)
  73. articles = articles[articles["year"] <= args.end]
  74. articles = articles[articles["year"] >= int(early_period[0])]
  75. affiliations["article_id"] = affiliations.article_id.astype(int)
  76. affiliations = affiliations.merge(articles, how="inner", left_on="article_id", right_on="article_id")
  77. #affiliations = affiliations[affiliations["bai"].isin(df["bai"])]
  78. authors_last = affiliations.groupby("bai").agg(last_article=("year", "max"))
  79. hosts = affiliations.sort_values(["bai", "institution_id", "year"]).groupby(["bai", "institution_id"]).agg(
  80. first=("year", "min"),
  81. last=("year", "max")
  82. )
  83. hosts["duration"] = hosts["last"]-hosts["first"]
  84. stability = hosts.groupby("bai").agg(stability=("duration", "max"), last=("last", "max"), first=("first", "min"))
  85. stability = stability.merge(authors_last, left_index=True, right_index=True)
  86. stability["stable"] = stability["stability"]>=(stability["last"]-stability["first"]-1)
  87. stability.to_csv(opj(args.input, f"institutional_stability_{args.early_period}.csv"))
  88. return stability
  89. def productivity():
  90. if exists(opj(args.input, f"productivity_{args.early_period}.csv")):
  91. return pd.read_csv(opj(args.input, f"productivity_{args.early_period}.csv"), index_col="bai")
  92. articles = pd.read_parquet(opj(args.dataset, "articles.parquet"))[["article_id", "date_created", "categories"]]
  93. articles["article_id"] = articles.article_id.astype(int)
  94. articles = articles[articles["date_created"].str.len() >= 4]
  95. articles["year"] = articles["date_created"].str[:4].astype(int)
  96. articles = articles[articles["categories"].map(lambda x: "Phenomenology-HEP" in x or "Theory-HEP" in x)]
  97. articles = articles[(articles["year"]>=early_period[0])&(articles["year"]<early_period[1]+1)]
  98. _articles = pd.read_csv(opj(args.input, "articles.csv"))
  99. articles = _articles.merge(articles, how="inner")
  100. authors = pd.read_parquet(opj(args.dataset, "articles_authors.parquet"))
  101. authors["article_id"] = authors.article_id.astype(int)
  102. n_authors = authors.groupby("article_id").agg(n_authors=("bai", "count")).reset_index()
  103. articles = articles.merge(n_authors, how="inner", left_on="article_id", right_on="article_id")
  104. articles["solo"] = articles["n_authors"]==1
  105. articles = articles.merge(authors, how="left", left_on="article_id", right_on="article_id")
  106. productivity = articles.groupby("bai").agg(
  107. productivity=("solo", lambda x: (~x).sum()),
  108. productivity_solo=("solo", "sum")
  109. )
  110. productivity.to_csv(opj(args.input, f"productivity_{args.early_period}.csv"))
  111. return productivity
  112. suffix = f"_{args.suffix}" if args.suffix is not None else ""
  113. topics = pd.read_csv(opj(args.input, "topics.csv"))
  114. junk = topics["label"].str.contains("Junk")
  115. topics = topics[~junk]["label"].tolist()
  116. fig, ax = plt.subplots()
  117. n_topics = len(pd.read_csv(opj(args.input, "topics.csv")))
  118. if args.portfolios is None:
  119. df = pd.read_csv(opj(args.input, f"aggregate{fla}.csv"))
  120. else:
  121. df = pd.read_csv(args.portfolios)
  122. resources = pd.read_parquet(opj(args.input, "pooled_resources.parquet") if args.early_period == "2000-2009" else opj(args.input, f"pooled_resources_{args.early_period.replace('-', '_')}.parquet"))
  123. df = df.merge(resources, left_on="bai", right_on="bai")
  124. print(df)
  125. brokerage = pd.read_csv(opj(args.input, f"brokerage_{args.early_period.replace('-', '_')}.csv"))
  126. df = df.merge(brokerage, how="left", left_on="bai", right_on="bai")
  127. stability = institution_stability()
  128. df = df.merge(stability, left_on="bai", right_index=True)
  129. df = df.merge(age(), left_on="bai", right_on="bai")
  130. df = df.merge(productivity(), left_on="bai", right_on="bai")
  131. NR = np.stack(df[[f"start_{k+1}" for k in range(n_topics)]].values).astype(int)
  132. NC = np.stack(df[[f"end_{k+1}" for k in range(n_topics)]].values).astype(int)
  133. expertise = np.stack(df[[f"expertise_{k+1}" for k in range(n_topics)]].values)
  134. S = np.stack(df["pooled_resources"])
  135. #brokerage = pd.read_csv("output/authors_brokerage.csv")
  136. #df = df.merge(brokerage, how="left", left_on="bai", right_on="bai")
  137. print(df)
  138. NR = NR[:,~junk]
  139. NC = NC[:,~junk]
  140. expertise = expertise[:,~junk]
  141. S = S[:,~junk]
  142. x = NR/NR.sum(axis=1)[:,np.newaxis]
  143. y = NC/NC.sum(axis=1)[:,np.newaxis]
  144. S_distrib = S/S.sum(axis=1)[:,np.newaxis]
  145. R = np.array([
  146. [((expertise[:,i]>expertise[:,i].mean())&(expertise[:,j]>expertise[:,j].mean())).mean()/(expertise[:,i]>expertise[:,i].mean()).mean() for j in range(len(topics))]
  147. for i in range(len(topics))
  148. ])
  149. change = np.abs(y-x).sum(axis=1)/2
  150. diversification = (np.exp(entropy(y, axis=1))-np.exp(entropy(x, axis=1)))/x.shape[1]
  151. x_matrix = np.einsum("ki,kj->kij", x, x)
  152. y_matrix = np.einsum("ki,kj->kij", y, y)
  153. x_stirling = 1-np.einsum("ij,kij->k", R, x_matrix)
  154. y_stirling = 1-np.einsum("ij,kij->k", R, y_matrix)
  155. if exists(opj(args.input, f"cost_knowledge_bounded.npz")):
  156. cost_matrix = np.load(opj(args.input, f"cost_knowledge_bounded.npz"))["C"].mean(axis=0)
  157. print(cost_matrix.sum())
  158. cost_matrix = cost_matrix*(1-np.eye(x.shape[1])).sum()/cost_matrix.sum()
  159. else:
  160. cost_matrix = 1-np.eye(x.shape[1])
  161. disruption = np.zeros(len(change))
  162. for a in range(len(change)):
  163. # disruption[a] = ot.emd2(x[a,:].copy(order='C'), y[a,:].copy(order='C'), 1-R, processes=4)
  164. disruption[a] = ot.emd2(x[a,:].copy(order='C'), y[a,:].copy(order='C'), cost_matrix, processes=4)
  165. alpha = 1
  166. exited = ((x>alpha*x.mean(axis=0))&(y<alpha*y.mean(axis=0))).sum(axis=1)
  167. entered = ((x<alpha*x.mean(axis=0))&(y>alpha*y.mean(axis=0))).sum(axis=1)
  168. fig, ax = plt.subplots(figsize=[6.4, 3.2])
  169. ax.hist(change, bins=np.linspace(0,1,50), histtype="step", color = '#377eb8', label="Change score $c_a$")
  170. ax.hist(disruption, bins=np.linspace(0,1,50), histtype="step", color = '#ff7f00', label="Cognitive distance $d_a$")
  171. ax.set_xlabel(f"Change score $c_a$ and cognitive distance $d_a$")
  172. ax.set_ylabel("\\# of scientists")
  173. ax.legend()
  174. fig.savefig(opj(args.input, f"change_disruption_score{fla}{args.output}.eps"), bbox_inches="tight")
  175. print("change 50%% interval: ", np.quantile(change,q=0.25), np.quantile(change,q=1-0.25))
  176. fig, ax = plt.subplots(figsize=[6.4, 3.2])
  177. ax.hist(diversification, bins=np.linspace(-0.5,0.5,50), histtype="step")
  178. ax.set_xlabel(f"Diversification score $\\Delta_a$")
  179. ax.set_ylabel("\\# of scientists")
  180. fig.savefig(opj(args.input, f"diversification_score{fla}{args.output}.eps"), bbox_inches="tight")
  181. fig, ax = plt.subplots()
  182. ax.hist(disruption, bins=np.linspace(0,1,50), histtype="step")
  183. ax.set_xlabel(f"Disruption score $d_a$")
  184. ax.set_ylabel("\\# of scientists")
  185. fig.savefig(opj(args.input, f"disruption_score{fla}{args.output}.eps"), bbox_inches="tight")
  186. df["change_score"] = change
  187. df["disruption_score"] = disruption
  188. df["diversification_score"] = diversification
  189. df["diversification_stirling_score"] = y_stirling-x_stirling
  190. df["entered_score"] = (entered>0).astype(int)
  191. df["exited_score"] = (exited>0).astype(int)
  192. df["exited_total_power_effect_score"] = (exited>0).astype(int)
  193. df["origin"] = np.argmax(x, axis=1)
  194. df["target"] = np.argmax(y, axis=1)
  195. df["origin_value"] = x.max(axis=1)
  196. df["target_value"] = y.max(axis=1)
  197. df["origin_final_value"] = np.array(y[a,df.loc[a, "origin"]] for a in range(x.shape[0]))
  198. df["target_initial_value"] = np.array(x[a,df.loc[a, "target"]] for a in range(x.shape[0]))
  199. df["origin_label"] = df["origin"].apply(lambda k: topics[k])
  200. df["target_label"] = df["target"].apply(lambda k: topics[k])
  201. df["origin_label"] = df.apply(lambda row: row["origin_label"] + (f" ({row['origin_value']:.2f})" if row["origin"]==row["target"] else f" ({row['origin_value']:.2f}$\\to${row['origin_final_value']:.2f})"), axis=1)
  202. df["target_label"] = df.apply(lambda row: row["target_label"] + (f" ({row['target_value']:.2f})" if row["origin"]==row["target"] else f" ({row['target_initial_value']:.2f}$\\to${row['target_value']:.2f})"), axis=1)
  203. df["social_entropy"] = np.exp(entropy(S,axis=1))
  204. df["intellectual_entropy"] = np.exp(entropy(expertise,axis=1))
  205. expertise_matrix = np.einsum("ki,kj->kij", expertise, expertise)
  206. social_expertise_matrix = np.einsum("ki,kj->kij", S_distrib, S_distrib)
  207. df["intellectual_stirling"] = 1-np.einsum("ij,kij->k", R, expertise_matrix)
  208. df["social_stirling"] = 1-np.einsum("ij,kij->k", R, social_expertise_matrix)
  209. # normalize productivity per time active during the early time period
  210. df["time_active"] = np.minimum(df["age"], early_period[1]+1-early_period[0])
  211. df["productivity"] /= df["time_active"]
  212. df["productivity_solo"] /= df["time_active"]
  213. df["primary_research_area"] = x.argmax(axis=1)
  214. df["social_diversity"] = df[f"social_{args.diversity}"].fillna(0)
  215. df["intellectual_diversity"] = df[f"intellectual_{args.diversity}"].fillna(0)
  216. df["res_social_diversity"] = df["social_diversity"]-LinearRegression().fit(df[["intellectual_diversity"]], df["social_diversity"]).predict(df[["intellectual_diversity"]])
  217. data = {
  218. "N": len(df),
  219. "K": x.shape[1],
  220. "m": df[f"{args.metric}_score"],
  221. # "soc_cap": np.log(1+S.sum(axis=1)) if args.power == "magnitude" else np.log(1+df["brokerage"].values),
  222. "soc_cap": S.sum(axis=1) if args.power == "magnitude" else df["brokerage"].values,
  223. "soc_div": df["social_diversity"],
  224. "int_div": df["intellectual_diversity"],
  225. "res_soc_div": df["res_social_diversity"],
  226. "productivity": df["productivity"],
  227. "productivity_solo": df["productivity_solo"],
  228. "x": x,
  229. "initial_div": np.exp(entropy(x, axis=1)),
  230. "primary_research_area": df["primary_research_area"],
  231. "stable": df["stable"].astype(float).values,
  232. "age": df["age"].values
  233. }
  234. fig, ax = plt.subplots(figsize=[6.4, 3.2])
  235. ax.hist(change[df["primary_research_area"] != 4], bins=np.linspace(0,1,25), histtype="step", label=f"Others ($\\mu={change[df['primary_research_area'] != 4].mean():.2f}$)", density=True)
  236. ax.hist(change[df["primary_research_area"] == 4], bins=np.linspace(0,1,25), histtype="step", label=f"Collider physics ($\\mu={change[df['primary_research_area'] == 4].mean():.2f}$)", density=True)
  237. ax.set_xlabel(f"Change score $c_a = \\frac{{1}}{{2}}\\sum_k |y_{{ak}}-x_{{ak}}|$")
  238. ax.set_ylabel("\\# of scientists")
  239. ax.legend(loc='upper right', bbox_to_anchor=(1, 1.2))
  240. fig.savefig(opj(args.input, f"change_score_collider_physics{fla}{args.output}.eps"), bbox_inches="tight")
  241. fig, ax = plt.subplots(figsize=[6.4, 3.2])
  242. ax.hist(disruption[df["primary_research_area"] != 4], bins=np.linspace(0,1,25), histtype="step", label=f"Others ($\\mu={disruption[df['primary_research_area'] != 4].mean():.2f}$)", density=True)
  243. ax.hist(disruption[df["primary_research_area"] == 4], bins=np.linspace(0,1,25), histtype="step", label=f"Collider physics ($\\mu={disruption[df['primary_research_area'] == 4].mean():.2f}$)", density=True)
  244. ax.set_xlabel(f"Cognitive distance $d_a$")
  245. ax.set_ylabel("\\# of scientists")
  246. ax.legend(loc='upper right', bbox_to_anchor=(1, 1.2))
  247. fig.savefig(opj(args.input, f"disruption_score_collider_physics{fla}{args.output}.eps"), bbox_inches="tight")
  248. if not exists(opj(args.input, f"samples_{args.metric}_{args.diversity}_{args.power}{fla}{args.output}.npz")):
  249. model = CmdStanModel(
  250. stan_file=f"code/{args.metric}.stan" if args.model==""
  251. else f"code/{args.metric}_{args.model}_{args.power}.stan",
  252. )
  253. fit = model.sample(
  254. data=data,
  255. chains=4,
  256. iter_sampling=10000,
  257. iter_warmup=1000,
  258. show_console=True
  259. )
  260. vars = fit.stan_variables()
  261. samples = {}
  262. for (k, v) in vars.items():
  263. samples[k] = v
  264. np.savez_compressed(opj(args.input, f"samples_{args.metric}_{args.diversity}_{args.power}{fla}{args.output}.npz"), **samples)
  265. samples = np.load(opj(args.input, f"samples_{args.metric}_{args.diversity}_{args.power}{fla}{args.output}.npz"))
  266. labels = [
  267. "Intellectual capital (diversity)",
  268. "Social capital (diversity)",
  269. "Social capital (power)",
  270. "Stable affiliation",
  271. "Academic age",
  272. "Productivity (co-authored)",
  273. "Productivity (solo-authored)",
  274. ]
  275. labels = [f"\\textbf{{{label}}}" for label in labels]
  276. labels += topics
  277. names = [
  278. "beta_int_div", "beta_soc_div", "beta_soc_cap", "beta_stable", "beta_age", "beta_productivity", "beta_productivity_solo"
  279. ]
  280. if args.metric not in ["entered", "exited"] and args.metric not in ["change", "disruption"]:
  281. mu = np.array([samples[name].mean() for name in names] + [(samples["beta_x"][:,i]*samples["tau"]).mean() for i in range(x.shape[1])])
  282. low = np.array([np.quantile(samples[name], q=0.05/2) for name in names] + [np.quantile(samples["beta_x"][:,i]*samples["tau"], q=0.05/2) for i in range(x.shape[1])])
  283. up = np.array([np.quantile(samples[name], q=1-0.05/2) for name in names] + [np.quantile(samples["beta_x"][:,i]*samples["tau"], q=1-0.05/2) for i in range(x.shape[1])])
  284. sig = up*low>0
  285. prob = np.array([(samples[name]*np.sign(samples[name].mean())<0).mean() for name in names] + [((samples["beta_x"][:,i]*np.sign(samples["beta_x"][:,i].mean()))<0).mean() for i in range(x.shape[1])])
  286. keep = sig | (np.arange(len(sig))<len(names))
  287. mu = mu[keep]
  288. low = low[keep]
  289. up = up[keep]
  290. prob = prob[keep]
  291. sign = ["<" if _mu>0 else ">" for i, _mu in enumerate(mu)]
  292. labels = [label for i, label in enumerate(labels) if keep[i]]
  293. n_vars = len(labels)
  294. # effect of capital and controls
  295. fig, ax = plt.subplots(figsize=[6.4, 0.4*(1+n_vars)])
  296. ax.scatter(mu, np.arange(len(labels))[::-1])
  297. ax.errorbar(mu, np.arange(len(labels))[::-1], xerr=(mu-low,up-mu), ls="none", capsize=4, elinewidth=1)
  298. ax.set_yticks(np.arange(len(labels))[::-1], labels)
  299. for i, p in enumerate(prob):
  300. if p>1e-4 and np.abs(p-0.5)>0.4:
  301. ax.text(
  302. -0.02 if mu[i]>0 else 0.02,
  303. np.arange(len(labels))[::-1][i],
  304. f"\\scriptsize $\\mu(\\beta)={mu[i]:.2g}, P(\\beta{sign[i]}0)={p:.2g}$",
  305. ha="right" if mu[i]>0 else "left",
  306. va="center"
  307. )
  308. elif p<0.05/2 or p>1-0.05/2:
  309. ax.text(
  310. -0.02 if mu[i]>0 else 0.02,
  311. np.arange(len(labels))[::-1][i],
  312. f"\\scriptsize $\\mu(\\beta)={mu[i]:.2g}$",
  313. ha="right" if mu[i]>0 else "left",
  314. va="center"
  315. )
  316. ax.set_xlabel(f"Effect on {args.metric}")
  317. ax.axvline(0, color="black")
  318. low, high = ax.get_xlim()
  319. bound = max(abs(low), abs(high))
  320. ax.set_xlim(-bound, bound)
  321. fig.savefig(opj(args.input, f"{args.metric}_score_effects_{args.diversity}_{args.power}{fla}{args.output}.eps"), bbox_inches="tight")
  322. # average change score per research area
  323. ratio = args.metric != "diversification"
  324. labels = topics
  325. if ratio:
  326. mu = np.array([(samples["mu_x"][:,i]/samples["mu_pop"]).mean() for i in range(x.shape[1])])
  327. low = np.array([np.quantile(samples["mu_x"][:,i]/samples["mu_pop"], q=0.05/2) for i in range(x.shape[1])])
  328. up = np.array([np.quantile(samples["mu_x"][:,i]/samples["mu_pop"], q=1-0.05/2) for i in range(x.shape[1])])
  329. sig = (up-1)*(low-1)>0
  330. else:
  331. mu = np.array([(samples["mu_x"][:,i]-samples["mu_pop"]).mean() for i in range(x.shape[1])])
  332. low = np.array([np.quantile(samples["mu_x"][:,i]-samples["mu_pop"], q=0.05/2) for i in range(x.shape[1])])
  333. up = np.array([np.quantile(samples["mu_x"][:,i]-samples["mu_pop"], q=1-0.05/2) for i in range(x.shape[1])])
  334. sig = (up)*(low)>0
  335. keep = sig
  336. mu = mu[keep]
  337. low = low[keep]
  338. up = up[keep]
  339. labels = [label for i, label in enumerate(labels) if keep[i]]
  340. fig, ax = plt.subplots(figsize=[6.4, 3.2])
  341. ax.scatter(mu, np.arange(len(labels))[::-1])
  342. ax.errorbar(mu, np.arange(len(labels))[::-1], xerr=(mu-low,up-mu), ls="none", capsize=4, elinewidth=1)
  343. ax.set_yticks(np.arange(len(labels))[::-1], labels)
  344. fig, ax = plt.subplots(figsize=[6.4, 3.2])
  345. df["m_ratio"] = df[f"{args.metric}_score"]/df[f"{args.metric}_score"].mean()
  346. research_areas = df.groupby("primary_research_area").agg(
  347. mu=("m_ratio", "mean"),
  348. low=("m_ratio", lambda x: np.quantile(x, q=0.05/2)),
  349. up=("m_ratio", lambda x: np.quantile(x, q=1-0.05/2)),
  350. label=("origin_label", lambda x: x.iloc[0])
  351. ).reset_index()
  352. low, high = ax.get_xlim()
  353. bound = max(abs(low), abs(high))
  354. ax.set_xlim(-bound, bound)
  355. ax.scatter(research_areas["mu"], research_areas.index)
  356. ax.errorbar(research_areas["mu"], research_areas.index, xerr=(research_areas["mu"]-research_areas["low"],research_areas["up"]-research_areas["low"]), ls="none", capsize=4, elinewidth=1)
  357. ax.set_yticks(research_areas.index, research_areas["label"])
  358. ax.set_xlabel(f"Ratio to average {args.metric} score" if ratio else f"Difference with average {args.metric} score")
  359. ax.axvline(1 if ratio else 0, color="black")
  360. fig.savefig(opj(args.input, f"{args.metric}_research_area{fla}{args.output}.eps"), bbox_inches="tight")
  361. elif args.metric in ["change", "disruption"]:
  362. labels = [
  363. "Intellectual capital (diversity)",
  364. "Social capital (diversity)",
  365. "Social capital (power)",
  366. "Stable affiliation",
  367. "Academic age",
  368. "Productivity (co-authored)",
  369. "Productivity (solo-authored)",
  370. ]
  371. names = [
  372. "beta_int_div", "beta_soc_div", "beta_soc_cap", "beta_stable", "beta_age", "beta_productivity", "beta_productivity_solo"
  373. ]
  374. if not args.compact:
  375. labels = [f"\\textbf{{{label}}}" for label in labels]
  376. labels += topics
  377. samples = [
  378. np.load(opj(args.input, f"samples_change_{args.diversity}_{args.power}{fla}{args.output}.npz")),
  379. np.load(opj(args.input, f"samples_disruption_{args.diversity}_{args.power}{fla}{args.output}.npz"))
  380. ]
  381. mu = [None, None]
  382. low = [None, None]
  383. up = [None, None]
  384. sig = [None, None]
  385. prob = [None, None]
  386. for i in range(2):
  387. mu[i] = np.array([samples[i][name].mean() for name in names] + [(samples[i]["beta_x"][:,j]*samples[i]["tau"]).mean() for j in range(x.shape[1])])
  388. low[i] = np.array([np.quantile(samples[i][name], q=0.05/2) for name in names] + [np.quantile(samples[i]["beta_x"][:,j]*samples[i]["tau"], q=0.05/2) for j in range(x.shape[1])])
  389. up[i] = np.array([np.quantile(samples[i][name], q=1-0.05/2) for name in names] + [np.quantile(samples[i]["beta_x"][:,j]*samples[i]["tau"], q=1-0.05/2) for j in range(x.shape[1])])
  390. sig[i] = up[i]*low[i]>0
  391. prob[i] = np.array([(samples[i][name]*np.sign(samples[i][name].mean())<0).mean() for name in names] + [((samples[i]["beta_x"][:,j]*np.sign(samples[i]["beta_x"][:,j].mean()))<0).mean() for j in range(x.shape[1])])
  392. if args.compact:
  393. keep = (np.arange(len(sig[0]))<len(names))
  394. else:
  395. keep = sig[0] | sig[1] | (np.arange(len(sig[0]))<len(names))
  396. for i in range(2):
  397. mu[i] = mu[i][keep]
  398. low[i] = low[i][keep]
  399. up[i] = up[i][keep]
  400. prob[i] = prob[i][keep]
  401. sign = [["<" if _mu>0 else ">" for j, _mu in enumerate(mu[i])] for i in range(2)]
  402. labels = [label for i, label in enumerate(labels) if keep[i]]
  403. n_vars = len(labels)
  404. if args.compact:
  405. labels = [
  406. '\n'.join(map(lambda x: f"\\textbf{{{x}}}", wrap(label, width=15))) if i < 4
  407. else
  408. '\n'.join(wrap(label, width=15))
  409. for i, label in enumerate(labels)
  410. ]
  411. print(labels)
  412. # effect of capital and controls
  413. fig, ax = plt.subplots(figsize=[4.8 if args.compact else 6.4, 0.52*(1+n_vars)])
  414. colors = ['#377eb8', '#ff7f00']
  415. legend = ["change ($c_a$)", "cognitive distance ($d_a$)"]
  416. for j in range(2):
  417. R2 = samples[j]["R2"].mean()
  418. dy = -0.125 if j else +0.125
  419. ax.scatter(mu[j], np.arange(len(labels))[::-1]+dy, color=colors[j])
  420. ax.errorbar(mu[j], np.arange(len(labels))[::-1]+dy, xerr=(mu[j]-low[j],up[j]-mu[j]), ls="none", capsize=4, elinewidth=1, color=colors[j], label=f"{legend[j]}, $R^2={R2:.2f}$")
  421. for i, p in enumerate(prob[j]):
  422. significant = p<0.05/2
  423. if p>1e-4 and np.abs(p-0.5)>0.4 and significant:
  424. ax.text(
  425. -0.02 if mu[j][i]>0 else 0.02,
  426. np.arange(len(labels))[::-1][i]+dy,
  427. f"\\scriptsize $\\mu(\\beta)={mu[j][i]:.2g},P(\\beta{sign[j][i]}0)={p:.2g}$",
  428. ha="right" if mu[j][i]>0 else "left",
  429. va="center"
  430. )
  431. elif p>1e-4 and np.abs(p-0.5)>0.4 and (not significant):
  432. ax.text(
  433. -0.02 if mu[j][i]>0 else 0.02,
  434. np.arange(len(labels))[::-1][i]+dy,
  435. f"\\scriptsize $P(\\beta{sign[j][i]}0)={p:.2g}$",
  436. ha="right" if mu[j][i]>0 else "left",
  437. va="center"
  438. )
  439. elif significant:
  440. ax.text(
  441. -0.02 if mu[j][i]>0 else 0.02,
  442. np.arange(len(labels))[::-1][i]+dy,
  443. f"\\scriptsize $\\mu(\\beta)={mu[j][i]:.2g}$",
  444. ha="right" if mu[j][i]>0 else "left",
  445. va="center"
  446. )
  447. low, high = ax.get_xlim()
  448. bound = max(abs(low), abs(high))
  449. ax.set_xlim(-bound, bound)
  450. ax.set_yticks(np.arange(len(labels))[::-1], labels)
  451. ax.set_xlabel(f"Effect size (standard deviations)")
  452. ax.axvline(0, color="black")
  453. if args.compact:
  454. ax.legend(loc='upper right', bbox_to_anchor=(1, 1.3))
  455. else:
  456. ax.legend(loc='upper right', bbox_to_anchor=(1, 1.2))
  457. fig.savefig(opj(args.input, f"{args.metric}_score_effects_{args.diversity}_{args.power}{'_compact' if args.compact else ''}{fla}{args.output}.eps"), bbox_inches="tight")
  458. fig.savefig(opj(args.input, f"{args.metric}_score_effects_{args.diversity}_{args.power}{'_compact' if args.compact else ''}{fla}{args.output}.pdf"), bbox_inches="tight")
  459. fig.savefig(opj(args.input, f"{args.metric}_score_effects_{args.diversity}_{args.power}{'_compact' if args.compact else ''}{fla}{args.output}.png"), bbox_inches="tight", dpi=300)
  460. else:
  461. labels = [
  462. "Intellectual capital (diversity)",
  463. "Social capital (diversity)",
  464. "Social capital (power)",
  465. "Stable affiliation",
  466. "Academic age",
  467. "Productivity (co-authored)",
  468. "Productivity (solo-authored)",
  469. ]
  470. if not args.compact:
  471. labels = [f"\\textbf{{{label}}}" for label in labels]
  472. labels += topics
  473. samples = [
  474. np.load(opj(args.input, f"samples_entered_{args.diversity}_{args.power}{fla}{args.output}.npz")),
  475. np.load(opj(args.input, f"samples_exited_{args.diversity}_{args.power}{fla}{args.output}.npz"))
  476. ]
  477. mu = [None, None]
  478. low = [None, None]
  479. up = [None, None]
  480. sig = [None, None]
  481. prob = [None, None]
  482. for i in range(2):
  483. mu[i] = np.array([samples[i][name].mean() for name in names] + [(samples[i]["beta_x"][:,j]*samples[i]["tau"]).mean() for j in range(x.shape[1])])
  484. low[i] = np.array([np.quantile(samples[i][name], q=0.05/2) for name in names] + [np.quantile(samples[i]["beta_x"][:,j]*samples[i]["tau"], q=0.05/2) for j in range(x.shape[1])])
  485. up[i] = np.array([np.quantile(samples[i][name], q=1-0.05/2) for name in names] + [np.quantile(samples[i]["beta_x"][:,j]*samples[i]["tau"], q=1-0.05/2) for j in range(x.shape[1])])
  486. sig[i] = up[i]*low[i]>0
  487. prob[i] = np.array([(samples[i][name]*np.sign(samples[i][name].mean())<0).mean() for name in names] + [((samples[i]["beta_x"][:,j]*np.sign(samples[i]["beta_x"][:,j].mean()))<0).mean() for j in range(x.shape[1])])
  488. if args.compact:
  489. keep = (np.arange(len(sig[0]))<len(names))
  490. else:
  491. keep = sig[0] | sig[1] | (np.arange(len(sig[0]))<len(names))
  492. for i in range(2):
  493. mu[i] = mu[i][keep]
  494. low[i] = low[i][keep]
  495. up[i] = up[i][keep]
  496. prob[i] = prob[i][keep]
  497. sign = [["<" if _mu>0 else ">" for j, _mu in enumerate(mu[i])] for i in range(2)]
  498. labels = [label for i, label in enumerate(labels) if keep[i]]
  499. n_vars = len(labels)
  500. if args.compact:
  501. labels = [
  502. '\n'.join(map(lambda x: f"\\textbf{{{x}}}", wrap(label, width=15))) if i < 4
  503. else
  504. '\n'.join(wrap(label, width=15))
  505. for i, label in enumerate(labels)
  506. ]
  507. print(labels)
  508. # effect of capital and controls
  509. fig, ax = plt.subplots(figsize=[4.8 if args.compact else 6.4, 0.52*(1+n_vars)])
  510. colors = ['#377eb8', '#ff7f00']
  511. legend = ["entered new research area", "exited research area"]
  512. if args.compact:
  513. ax.set_xlim(-0.9, 1.25)
  514. for j in range(2):
  515. dy = -0.125 if j else +0.125
  516. ax.scatter(mu[j], np.arange(len(labels))[::-1]+dy, color=colors[j])
  517. ax.errorbar(mu[j], np.arange(len(labels))[::-1]+dy, xerr=(mu[j]-low[j],up[j]-mu[j]), ls="none", capsize=4, elinewidth=1, color=colors[j], label=legend[j])
  518. for i, p in enumerate(prob[j]):
  519. significant = p<0.05/2
  520. if p>1e-4 and np.abs(p-0.5)>0.4 and significant:
  521. ax.text(
  522. -0.02 if mu[j][i]>0 else 0.02,
  523. np.arange(len(labels))[::-1][i]+dy,
  524. f"\\scriptsize $\\mu(\\beta)={mu[j][i]:.2g},P(\\beta{sign[j][i]}0)={p:.2g}$",
  525. ha="right" if mu[j][i]>0 else "left",
  526. va="center"
  527. )
  528. elif p>1e-4 and np.abs(p-0.5)>0.4 and (not significant):
  529. ax.text(
  530. -0.02 if mu[j][i]>0 else 0.02,
  531. np.arange(len(labels))[::-1][i]+dy,
  532. f"\\scriptsize $P(\\beta{sign[j][i]}0)={p:.2g}$",
  533. ha="right" if mu[j][i]>0 else "left",
  534. va="center"
  535. )
  536. elif significant:
  537. ax.text(
  538. -0.02 if mu[j][i]>0 else 0.02,
  539. np.arange(len(labels))[::-1][i]+dy,
  540. f"\\scriptsize $\\mu(\\beta)={mu[j][i]:.2g}$",
  541. ha="right" if mu[j][i]>0 else "left",
  542. va="center"
  543. )
  544. low, high = ax.get_xlim()
  545. bound = max(abs(low), abs(high))
  546. ax.set_xlim(-bound, bound)
  547. ax.set_yticks(np.arange(len(labels))[::-1], labels)
  548. ax.set_xlabel(f"Effect size (log odds ratio)")
  549. ax.axvline(0, color="black")
  550. if args.compact:
  551. ax.legend(loc='upper right', bbox_to_anchor=(1, 1.3))
  552. else:
  553. ax.legend(loc='upper right', bbox_to_anchor=(1, 1.2))
  554. fig.savefig(opj(args.input, f"{args.metric}_score_effects_{args.diversity}_{args.power}{'_compact' if args.compact else ''}{fla}{args.output}.eps"), bbox_inches="tight")
  555. fig.savefig(opj(args.input, f"{args.metric}_score_effects_{args.diversity}_{args.power}{'_compact' if args.compact else ''}{fla}{args.output}.pdf"), bbox_inches="tight")
  556. fig.savefig(opj(args.input, f"{args.metric}_score_effects_{args.diversity}_{args.power}{'_compact' if args.compact else ''}{fla}{args.output}.png"), bbox_inches="tight", dpi=300)
  557. table = df[["bai", "stable", f"{args.metric}_score", "intellectual_entropy", "social_entropy", "origin_label", "target_label"]].sort_values(f"{args.metric}_score", ascending=False)
  558. table.to_csv(opj(args.input, f"{args.metric}_scores.csv"))
  559. table["bai"] = table["bai"].str.replace(".1", "")
  560. table["bai"] = table["bai"].str.replace(r"^([A-Z])\.", r"\1.~")
  561. table["bai"] = table["bai"].str.replace(r"\.\~([A-Z])\.", r".~\1.~")
  562. table["bai"] = table["bai"].str.replace(r"([a-zA-Z]{2,})\.", r"\1 ")
  563. table["bai"] = table.apply(lambda r: r["bai"] if not r["stable"] else f"{r['bai']} ($\\ast$)", axis=1)
  564. table["target_label"] += "EOL"
  565. latex = table.head(20).to_latex(
  566. columns=["bai", f"{args.metric}_score", "intellectual_entropy", "social_entropy", "origin_label", "target_label"],
  567. header=["Physicist", "$c_a$", "$D(\\bm{I_a})$", "$D(\\bm{S_a})$", "Previous main area", "Current main area"],
  568. index=False,
  569. multirow=True,
  570. multicolumn=True,
  571. column_format='p{0.15\\textwidth}|c|c|c|b{0.25\\textwidth}|b{0.25\\textwidth}',
  572. escape=False,
  573. float_format=lambda x: f"{x:.2f}",
  574. caption="Physicists with the highest change scores $c_a$. $D(\\bm{I_a})$ and $D(\\bm{S_a})$ measure the diversity of intellectual and social capital. Numbers in parentheses indicate the share of attention dedicated to each research area during each time-period. Asterisks ($\\ast$) indicate physicists with a permanent position.",
  575. label=f"table:top_{args.metric}",
  576. position="H"
  577. )
  578. latex = latex.replace('EOL \\\\\n', '\\\\ \\hline\n')
  579. with open(opj(args.input, f"top_{args.metric}.tex"), "w+") as fp:
  580. fp.write(latex)
  581. latex = table.sort_values(f"{args.metric}_score", ascending=True).head(20).to_latex(
  582. columns=["bai", f"{args.metric}_score", "intellectual_entropy", "social_entropy", "origin_label", "target_label"],
  583. header=["Physicist", "$c_a$", "$D(\\bm{I_a})$", "$D(\\bm{S_a})$", "Previous main area", "Current main area"],
  584. index=False,
  585. multirow=True,
  586. multicolumn=True,
  587. column_format='p{0.15\\textwidth}|c|c|c|b{0.25\\textwidth}|b{0.25\\textwidth}',
  588. escape=False,
  589. float_format=lambda x: f"{x:.2f}",
  590. caption="Physicists with the lowest change scores $c_a$. $D(\\bm{I_a})$ and $D(\\bm{S_a})$ measure the diversity of intellectual and social capital. Numbers in parentheses indicate the share of attention dedicated to each research area. Asterisks ($\\ast$) indicate physicists with a permanent position.",
  591. label=f"table:low_{args.metric}",
  592. position="H"
  593. )
  594. latex = latex.replace('EOL \\\\\n', '\\\\ \\hline\n')
  595. with open(opj(args.input, f"low_{args.metric}.tex"), "w+") as fp:
  596. fp.write(latex)