Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

change_effects_summary.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. from cProfile import label
  2. import numpy as np
  3. import pandas as pd
  4. from scipy.stats import entropy
  5. from sklearn.linear_model import LinearRegression
  6. from matplotlib import pyplot as plt
  7. import matplotlib
  8. matplotlib.use("pgf")
  9. matplotlib.rcParams.update(
  10. {
  11. "pgf.texsystem": "xelatex",
  12. "font.family": "serif",
  13. "font.serif": "Times New Roman",
  14. "text.usetex": True,
  15. "pgf.rcfonts": False,
  16. }
  17. )
  18. plt.rcParams["text.latex.preamble"].join([
  19. r"\usepackage{amsmath}",
  20. r"\setmainfont{amssymb}",
  21. ])
  22. import argparse
  23. from os.path import join as opj, exists
  24. import pickle
  25. parser = argparse.ArgumentParser()
  26. parser.add_argument("--input")
  27. args = parser.parse_args()
  28. topics = pd.read_csv(opj(args.input, "topics.csv"))
  29. junk = topics["label"].str.contains("Junk")
  30. topics = topics[~junk]["label"].tolist()
  31. n_topics = len(topics)
  32. labels = [
  33. "Intellectual capital (diversity)",
  34. "Social capital (diversity)",
  35. "Social capital (power)",
  36. "Stable affiliation",
  37. ]
  38. labels = [f"\\textbf{{{label}}}" for label in labels]
  39. labels += topics
  40. n_vars = len(labels)
  41. label_position = {label: i for i, label in enumerate(labels)}
  42. names = [
  43. "beta_int_div", "beta_soc_div", "beta_soc_cap", "beta_stable"
  44. ]
  45. nice_names = {
  46. "change": "Change score ($c_a$)",
  47. "disruption": "Disruption score ($d_a$)",
  48. "entered": "Entered a new research area",
  49. "exited": "Exited a research area"
  50. }
  51. def get_effects(metric, diversity, power):
  52. filename = opj(args.input, f"samples_{metric}_{diversity}_{power}.npz")
  53. if not exists(filename):
  54. print(f"samples not found: {filename}")
  55. return pd.DataFrame([])
  56. samples = np.load(filename)
  57. mu = np.array([samples[name].mean() for name in names] + [(samples["beta_x"][:,i]*samples["tau"]).mean() for i in range(n_topics)])
  58. low = np.array([np.quantile(samples[name], q=0.05/2) for name in names] + [np.quantile(samples["beta_x"][:,i]*samples["tau"], q=0.05/2) for i in range(n_topics)])
  59. up = np.array([np.quantile(samples[name], q=1-0.05/2) for name in names] + [np.quantile(samples["beta_x"][:,i]*samples["tau"], q=1-0.05/2) for i in range(n_topics)])
  60. sig = up*low>0
  61. sign = mu>0
  62. prob = np.array([(samples[name]*np.sign(samples[name].mean())<0).mean() for name in names] + [((samples["beta_x"][:,i]*np.sign(samples["beta_x"][:,i].mean()))<0).mean() for i in range(n_topics)])
  63. vars = []
  64. model = None
  65. if diversity == "entropy":
  66. model = "Reference" if power=="magnitude" else "$P=\\text{Brokerage}$"
  67. else:
  68. model = "$D=\\text{Stirling}$"
  69. for i in range(n_vars):
  70. plus = up[i]-mu[i]
  71. minus = mu[i]-low[i]
  72. sign_char = "+" if sign[i] else ""
  73. s = (f"{mu[i]:.2g}").replace("-", "")
  74. if len(s)<5 and "e" not in s:
  75. if sig[i]:
  76. string = f"$\\bm{{{sign_char}{mu[i]:.2g}}}\\substack{{+{plus:.2g} \\\\ -{minus:.2g}}}$"
  77. else:
  78. string = f"${sign_char}{mu[i]:.2g}\\substack{{+{plus:.2g} \\\\ -{minus:.2g}}}$"
  79. else:
  80. if sig[i]:
  81. string = f"$\\bm{{{sign_char}{mu[i]:.1g}}}\\substack{{+{plus:.1g} \\\\ -{minus:.1g}}}$"
  82. else:
  83. string = f"${sign_char}{mu[i]:.1g}\\substack{{+{plus:.1g} \\\\ -{minus:.1g}}}$"
  84. vars.append({
  85. "Dep. variable": nice_names[metric],
  86. "Model": model,
  87. "mu": mu[i],
  88. "low": low[i],
  89. "up": up[i],
  90. "sig": sig[i]>0,
  91. "Predictor": labels[i],
  92. "string": string,
  93. })
  94. print(metric, model)
  95. return pd.DataFrame(vars)
  96. vars = []
  97. metrics = ["change", "disruption"]
  98. for metric in metrics:
  99. vars.append(get_effects(metric, "entropy", "magnitude"))
  100. vars.append(get_effects(metric, "stirling", "magnitude"))
  101. vars.append(get_effects(metric, "entropy", "brokerage"))
  102. vars = pd.concat(vars)
  103. print(vars)
  104. vars = vars.pivot(columns=["Dep. variable", "Model"], index="Predictor", values="string")
  105. vars.sort_index(key=lambda x: x.map(label_position), inplace=True)
  106. latex = vars.to_latex(
  107. escape=False,
  108. multicolumn_format="c",
  109. caption="Effect of each variable on (a) the change score and (b) the disruption score for each model. The reference model uses entropy as the diversity measure $D$ and the magnitude of intellectual capital as a measure of power $P$. Values indicate the mean posterior effect size and the 95\\% credible interval. Significant effects are shown in bold.",
  110. label="table:summary_change_disruption",
  111. position="H"
  112. )
  113. latex = latex.replace("\\\nHadrons", "\\\n\\hline Hadrons")
  114. latex = latex.replace("\\begin{tabular}", "\\renewcommand{\\arraystretch}{2}\\fontsize{6}{7}\\selectfont\\begin{tabular}")
  115. latex = latex.replace("\\end{tabular}", "\\end{tabular}\\normalsize\\renewcommand{\\arraystretch}{1}")
  116. with open(opj(args.input, f"summary_change_disruption.tex"), "w+") as fp:
  117. fp.write(latex)
  118. vars = []
  119. metrics = ["entered", "exited"]
  120. for metric in metrics:
  121. vars.append(get_effects(metric, "entropy", "magnitude"))
  122. vars.append(get_effects(metric, "stirling", "magnitude"))
  123. vars.append(get_effects(metric, "entropy", "brokerage"))
  124. vars = pd.concat(vars)
  125. print(vars)
  126. vars = vars.pivot(columns=["Dep. variable", "Model"], index="Predictor", values="string")
  127. vars.sort_index(key=lambda x: x.map(label_position), inplace=True)
  128. latex = vars.to_latex(
  129. escape=False,
  130. multicolumn_format="c",
  131. caption="Effect of each variable on (a) the probability of having entered a new research area and (b) the probability of having exited a research area, for each model. The reference model uses entropy as the diversity measure $D$ and the magnitude of intellectual capital as a measure of power $P$. Values indicate the mean posterior effect size and the 95\\% credible interval. Significant effects are shown in bold.",
  132. label="table:summary_entered_exited",
  133. position="H"
  134. )
  135. latex = latex.replace("\\\nHadrons", "\\\n\\hline Hadrons")
  136. latex = latex.replace("\\begin{tabular}", "\\renewcommand{\\arraystretch}{2}\\fontsize{6}{7}\\selectfont\\begin{tabular}")
  137. latex = latex.replace("\\end{tabular}", "\\end{tabular}\\normalsize\\renewcommand{\\arraystretch}{1}")
  138. with open(opj(args.input, f"summary_entered_exited.tex"), "w+") as fp:
  139. fp.write(latex)