Code Reproducibility

Code Reproducibility#

import numpy as np

p = np.linspace(.02, .98, num=49)
nflips = 10; nheads = 6
likelihood = p**(nheads)*(1 - p)**(nflips - nheads)
loglikelihood = nheads*np.log(p) + (nflips - nheads)*np.log(1 - p)
import scipy as sp

# simulation of 1000 values from the N(0,1) distn
n = 1000
xs = np.random.normal(loc=0, scale=1, size=n)
ys = np.random.normal(loc=0, scale=1, size=n)
# compute the square
xssq = xs**2
yssq = ys**2
sum_xsq_ysq = xssq + yssq

# compute the centers for bin histograms from 0 to maxval in
# 30 even bins
nbins = 30
bincenters = np.linspace(start=0, stop=np.max(sum_xsq_ysq), num=nbins)

# compute the pdf of the chi-squared distribution for X^2 + Y^2, which when
# X, Y are N(0, 1), is Chi2(2), the chi-squared distn with 2 degrees of freedom
dof = 2
true_pdf = sp.stats.chi2.pdf(bincenters, dof)
print("Approximate mean: {:2f}".format(np.mean(sum_xsq_ysq)))
# mean of chi-squared is just its degrees of freedom; here, 2
print("True mean: {:2f}".format(2))
Approximate mean: 1.950946
True mean: 2.000000
import graspologic as gp

n = 10  # number of nodes
nsims = 200  # number of networks to simulate
p = 0.4

# realizations
As = [gp.simulations.er_np(n, p, directed=False, loops=False) for i in range(0, nsims)]
# fit ER models
fit_models = [gp.models.EREstimator(directed=False, loops=False).fit(A) for A in As]
hatps = [model.p_ for model in fit_models]  # the probability parameters
from pandas import DataFrame
ns = [8, 16, 32, 64, 128]
nsims = 200
p = 0.4
results = []
for n in ns:
  for i in range(0, nsims):
    A = gp.simulations.er_np(n, p, directed=False, loops=False)
    phatni = gp.models.EREstimator(directed=False, loops=False).fit(A).p_
    results.append({"n": n, "i": i, "phat": phatni})
res_df = DataFrame(results)
res_df["diff"] = np.abs(res_df["phat"] - p)
import numpy as np
from graspologic.simulations import er_np
from graspologic.embed import AdjacencySpectralEmbed

def orthogonal_align(Xhat, p=0.5):
    return -Xhat if ((Xhat*np.sqrt(p)).sum() < 0) else Xhat

p = 0.5
ns = np.round(10**np.linspace(1.5, 3.5, 5)).astype(int)
ase = AdjacencySpectralEmbed(n_components=1)

nrep = 50
As = [[er_np(n, p) for _ in range(nrep)] for n in ns]
Xhats_aligned = [[orthogonal_align(ase.fit_transform(A)) for A in An] for An in As]
import pandas as pd

data = []
for n_idx, n in enumerate(ns):
    for j in range(50):
        data.extend([(Xhats_aligned[n_idx][j][i][0], i, n, j, np.sqrt(p)) for i in range(n)])

df = pd.DataFrame(data, columns=["Xhat", "i", "n", "j", "X"])
df["abs_diff"] = np.abs(df["Xhat"] - df["X"])

max_pernet = df.groupby(["n", "j"])["abs_diff"].max().reset_index()
max_pernet["norm_factor"] = np.log(max_pernet["n"])**2 / np.sqrt(max_pernet["n"])
max_pernet["norm_diff"] = max_pernet["abs_diff"] / max_pernet["norm_factor"]
df_reduced = df[df["j"] == 0].copy()
df_reduced["limiting_factor"] = np.sqrt(df_reduced["n"]) * (df_reduced["Xhat"] - df_reduced["X"])