@Severin Pappadeux , I did this code with help of ChatGPT, but I am unsure about a few things. 1:Shouldnt the curves in y-min be 2? My curves go below 2. Or is this a result of normalization? However the first graph is unnormalized and still some curves go below 2.
import numpy as np
import matplotlib.pyplot as plt
def p_kappa_e(kappa, kappa_e):
"""
Compute the unnormalized probability density function p(kappa_e).
"""
term1 = 2
term2 = kappa_e / (kappa * (kappa - kappa_e))
term3 = (kappa_e / (kappa * (kappa - kappa_e))) + kappa_e - 2
return term1 + term2 * term3
def kappa_e_max(kappa):
"""
Compute the maximum value of kappa_e given kappa.
"""
return (2 * kappa**2) / (1 + 2 * kappa)
# Values of kappa to consider
kappa_values = [0.1, 0.2, 0.5, 1, 2, 5, 10]
# Generate plots
plt.figure(figsize=(12, 8))
for kappa in kappa_values:
# Calculate kappa_e range and kappa_e_max
kappa_e_max_val = kappa_e_max(kappa)
kappa_e_range = np.linspace(0, kappa_e_max_val, 500)
# Compute p(kappa_e)
p_values = p_kappa_e(kappa, kappa_e_range)
# Plot the results
plt.plot(kappa_e_range, p_values, label=f"kappa = {kappa}")
# Verify the maximum occurs at kappa_e_max
max_p_value = p_kappa_e(kappa, kappa_e_max_val)
print(f"For kappa = {kappa}: kappa_e_max = {kappa_e_max_val:.4f}, p(kappa_e_max) = {max_p_value:.4f}")
plt.xlabel("kappa_e")
plt.ylabel("p(kappa_e)")
plt.title("Unnormalized PDF p(kappa_e) for various kappa values")
plt.legend()
plt.grid()
plt.show()
def p_kappa_e(kappa, kappa_e):
"""
Compute the unnormalized probability density function p(kappa_e).
"""
term1 = 2
term2 = kappa_e / (kappa * (kappa - kappa_e))
term3 = (kappa_e / (kappa * (kappa - kappa_e))) + kappa_e - 2
return term1 + term2 * term3
def kappa_e_max(kappa):
"""
Compute the maximum value of kappa_e given kappa.
"""
return (2 * kappa**2) / (1 + 2 * kappa)
def rejection_sampling(kappa, n_samples):
"""
Perform rejection sampling to sample kappa_e from the unnormalized PDF p(kappa_e).
Parameters:
kappa: float, the value of kappa
n_samples: int, the number of samples to generate
Returns:
samples: numpy array of sampled kappa_e values
"""
kappa_e_max_val = kappa_e_max(kappa)
# Define a maximum value for the PDF for rejection sampling
# Slightly overestimate the maximum value of p(kappa_e) to ensure correctness
p_max = p_kappa_e(kappa, kappa_e_max_val) * 1.1
samples = []
while len(samples) < n_samples:
# Generate a candidate sample uniformly in the range [0, kappa_e_max]
kappa_e_candidate = np.random.uniform(0, kappa_e_max_val)
# Evaluate the target PDF at the candidate sample
p_candidate = p_kappa_e(kappa, kappa_e_candidate)
# Generate a uniform random number for acceptance
u = np.random.uniform(0, p_max)
# Accept the sample if u <= p_candidate
if u <= p_candidate:
samples.append(kappa_e_candidate)
return np.array(samples)
# Example usage
kappa = 1.0 # Example value of kappa
n_samples = 10000 # Number of samples to generate
# Perform rejection sampling
samples = rejection_sampling(kappa, n_samples)
# Plot histogram of samples
plt.hist(samples, bins=50, density=True, alpha=0.7, label="Sampled")
# Plot the true PDF for comparison
kappa_e_range = np.linspace(0, kappa_e_max(kappa), 500)
p_values = p_kappa_e(kappa, kappa_e_range)
plt.plot(kappa_e_range, p_values / np.trapz(p_values, kappa_e_range), label="True PDF", color="red")
plt.xlabel("kappa_e")
plt.ylabel("Density")
plt.title("Rejection Sampling of kappa_e")
plt.legend()
plt.grid()
plt.show()
def p_kappa_e(kappa, kappa_e):
"""
Compute the unnormalized probability density function p(kappa_e).
"""
term1 = 2
term2 = kappa_e / (kappa * (kappa - kappa_e))
term3 = (kappa_e / (kappa * (kappa - kappa_e))) + kappa_e - 2
return term1 + term2 * term3
def kappa_e_max(kappa):
"""
Compute the maximum value of kappa_e given kappa.
"""
return (2 * kappa**2) / (1 + 2 * kappa)
def rejection_sampling(kappa, n_samples):
"""
Perform rejection sampling to sample kappa_e from the unnormalized PDF p(kappa_e).
Parameters:
kappa: float, the value of kappa
n_samples: int, the number of samples to generate
Returns:
samples: numpy array of sampled kappa_e values
"""
kappa_e_max_val = kappa_e_max(kappa)
# Define a maximum value for the PDF for rejection sampling
# Slightly overestimate the maximum value of p(kappa_e) to ensure correctness
p_max = p_kappa_e(kappa, kappa_e_max_val) * 1.1
samples = []
while len(samples) < n_samples:
# Generate a candidate sample uniformly in the range [0, kappa_e_max]
kappa_e_candidate = np.random.uniform(0, kappa_e_max_val)
# Evaluate the target PDF at the candidate sample
p_candidate = p_kappa_e(kappa, kappa_e_candidate)
# Generate a uniform random number for acceptance
u = np.random.uniform(0, p_max)
# Accept the sample if u <= p_candidate
if u <= p_candidate:
samples.append(kappa_e_candidate)
return np.array(samples)
# Parameters for the task
kappa = 0.2 # Given value of kappa
n_samples = 10**6 # Number of samples to generate
# Perform rejection sampling
samples = rejection_sampling(kappa, n_samples)
# Normalize the Monte Carlo PDF
hist, bin_edges = np.histogram(samples, bins=100, range=(0, kappa_e_max(kappa)), density=True)
kappa_e_centers = 0.5 * (bin_edges[:-1] + bin_edges[1:])
# Compute the true PDF for comparison
kappa_e_range = np.linspace(0, kappa_e_max(kappa), 500)
p_values = p_kappa_e(kappa, kappa_e_range)
# Normalize the true PDF by matching its area to the MC PDF
area_hist = np.trapz(hist, kappa_e_centers)
area_p_values = np.trapz(p_values, kappa_e_range)
p_values_normalized = p_values * (area_hist / area_p_values)
# Plot histogram and true PDF
plt.figure(figsize=(12, 8))
plt.hist(samples, bins=100, range=(0, kappa_e_max(kappa)), density=True, alpha=0.7, label="MC PDF (Histogram)")
plt.plot(kappa_e_range, p_values_normalized, label="Normalized True PDF", color="red")
plt.xlabel("kappa_e")
plt.ylabel("Density")
plt.title("Comparison of Monte Carlo PDF and True PDF")
plt.legend()
plt.grid()
plt.show()