import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd

# --- 1. THE SETUP (CEO INPUTS) ---
SCENARIOS = 10000    # Simulating 10,000 potential futures
MEAN_DEMAND = 100    # Average demand per week
STD_DEV = 25         # Uncertainty (The "Risk")
# CEO INPUTS
PRICE_PER_UNIT = 25  # Lower margin (Profit is only $5 now)
SALVAGE_VALUE = -10  # Toxic waste! We PAY $10 to throw it away.

# Cost Structure (The Profit Lever)
# We make $30 profit per unit sold ($50 - $20)
# We lose $15 per unit unsold ($20 cost - $5 salvage)
PRICE_PER_UNIT = 50  
COST_PER_UNIT = 20   
SALVAGE_VALUE = 5    

# --- 2. THE SIMULATION (THE ENGINE) ---
# We test a range of strategies: Ordering 80 to 150 units
strategies = np.arange(80, 150, 5) 

results = []

print(f"Simulating {SCENARIOS} scenarios for each strategy...")

for order_qty in strategies:
    # A. Generate Random Demand (Vectorized Aleatory Uncertainty)
    demand = np.random.normal(MEAN_DEMAND, STD_DEV, SCENARIOS)
    demand = np.maximum(demand, 0) # Clip negatives
    
    # B. The Vectorized Logic (Runs at C++ speed)
    units_sold = np.minimum(order_qty, demand)
    units_unsold = order_qty - units_sold
    
    # C. Calculate Profit for ALL 10,000 scenarios at once
    revenue = units_sold * PRICE_PER_UNIT
    cost = order_qty * COST_PER_UNIT
    salvage = units_unsold * SALVAGE_VALUE
    
    profit = revenue - cost + salvage
    
    # D. Store the aggregate metrics
    results.append({
        'order_qty': order_qty,
        'avg_profit': np.mean(profit),
        'risk_of_loss': np.mean(profit < 0) * 100 # % chance of losing money
    })

# --- 3. THE INSIGHT (VISUALIZATION) ---
df = pd.DataFrame(results)
best_strat = df.loc[df['avg_profit'].idxmax()]

print("\n--- RESULTS ---")
print(f"Optimal Order Quantity: {best_strat['order_qty']} units")
print(f"Expected Profit: ${best_strat['avg_profit']:.2f}")
print(f"Risk of Losing Money: {best_strat['risk_of_loss']:.2f}%")

# Plotting
plt.figure(figsize=(10, 6))
sns.lineplot(data=df, x='order_qty', y='avg_profit', marker='o')
plt.axvline(best_strat['order_qty'], color='red', linestyle='--', label='Optimal Strategy')
plt.title(f"Profit Optimization (Demand Mean={MEAN_DEMAND}, Std={STD_DEV})")
plt.xlabel("Order Quantity (Inventory Decision)")
plt.ylabel("Average Profit ($)")
plt.grid(True)
plt.legend()
plt.show()
Simulating 10000 scenarios for each strategy...

--- RESULTS ---
Optimal Order Quantity: 110.0 units
Expected Profit: $2596.95
Risk of Losing Money: 0.55%

# 1. Generate the massive block of 10,000 scenarios
# (This normally happens inside the loop, but we do it here to peek)
sample_demand = np.random.normal(MEAN_DEMAND, STD_DEV, SCENARIOS)
sample_demand = np.maximum(sample_demand, 0) # Clip negatives

# 2. Print the first 10 "Universes"
print(f"--- 10 Sample Universes (Mean={MEAN_DEMAND}) ---")
for i in range(10):
    print(f"Scenario {i}: {sample_demand[i]:.2f} customers")

# 3. Systems Check (Law of Large Numbers)
print("\n--- Aggregate Reality ---")
print(f"Actual Average of all 10,000: {np.mean(sample_demand):.2f}")
--- 10 Sample Universes (Mean=100) ---
Scenario 0: 157.76 customers
Scenario 1: 93.14 customers
Scenario 2: 93.09 customers
Scenario 3: 80.67 customers
Scenario 4: 111.48 customers
Scenario 5: 73.68 customers
Scenario 6: 115.33 customers
Scenario 7: 81.71 customers
Scenario 8: 84.69 customers
Scenario 9: 104.40 customers

--- Aggregate Reality ---
Actual Average of all 10,000: 100.27
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

# --- 1. THE CEO INPUTS ---
SCENARIOS = 5000     # Parallel Universes
WEEKS = 52           # One Year
MEAN_DEMAND = 100    # Average customers per week
STD_DEV = 30         # Higher volatility (Risk)

# Financials
PRICE = 50
COST = 20
HOLDING_COST = 2     # $2 per week to store an unsold unit (The "Killer")

# THE DECISION: "Target Inventory Level"
# We try to maintain this much stock at the start of every week
TARGET_LEVEL = 200   

# --- 2. INITIALIZE THE SIMULATION ---
# Everyone starts with 0 inventory
current_inventory = np.zeros(SCENARIOS)
total_profit = np.zeros(SCENARIOS)

# To store history for plotting (Shape: 52 rows, 5000 columns)
history_inventory = []

print(f"Simulating {WEEKS} weeks across {SCENARIOS} universes...")

# --- 3. THE TIME LOOP (Dynamic Simulation) ---
for week in range(WEEKS):
    
    # A. DECISION (The "Order-Up-To" Logic)
    # If we have 150 and target is 200, we order 50.
    # If we have 250 (because of returns/bloat), we order 0.
    order_qty = TARGET_LEVEL - current_inventory
    order_qty = np.maximum(order_qty, 0) # Can't order negative amounts
    
    # B. ARRIVAL (Lead Time = 0 for now)
    stock_on_hand = current_inventory + order_qty
    
    # C. DEMAND (The Uncertainty)
    demand = np.random.normal(MEAN_DEMAND, STD_DEV, SCENARIOS)
    demand = np.maximum(demand, 0)
    
    # D. SALES (The Constraint)
    sales = np.minimum(stock_on_hand, demand)
    
    # E. END OF WEEK (State Update)
    unsold_inventory = stock_on_hand - sales
    current_inventory = unsold_inventory # Pass to next week
    
    # F. FINANCIALS
    revenue = sales * PRICE
    cogs = order_qty * COST
    holding = unsold_inventory * HOLDING_COST # The penalty for carrying stock
    
    weekly_profit = revenue - cogs - holding
    total_profit += weekly_profit
    
    # Record history for the chart
    history_inventory.append(current_inventory.copy())

# --- 4. THE VISUALIZATION (The "Spaghetti Plot") ---
# We convert list of arrays into a Matrix [52 weeks, 5000 scenarios]
history_matrix = np.array(history_inventory)

plt.figure(figsize=(12, 6))

# Plot the first 100 "Universes" to see the chaos (The "Spaghetti")
plt.plot(history_matrix[:, :100], color='blue', alpha=0.1, linewidth=1)

# Plot the Average (The "Signal")
plt.plot(np.mean(history_matrix, axis=1), color='red', linewidth=3, label='Average Inventory')

plt.axhline(TARGET_LEVEL, color='green', linestyle='--', label='Target Level (200)')
plt.title(f"Inventory Dynamics over {WEEKS} Weeks (Order-Up-To {TARGET_LEVEL})")
plt.ylabel("Inventory Level at End of Week")
plt.xlabel("Week")
plt.legend()
plt.show()

print(f"Average Annual Profit: ${np.mean(total_profit):,.2f}")
print(f"Risk of Bankruptcy (Negative Year): {np.mean(total_profit < 0)*100:.2f}%")
Simulating 52 weeks across 5000 universes...

Average Annual Profit: $143,433.80
Risk of Bankruptcy (Negative Year): 0.00%
import numpy as np
import matplotlib.pyplot as plt

# --- 1. THE CEO INPUTS ---
SCENARIOS = 5000
WEEKS = 52
MEAN_DEMAND = 100
STD_DEV = 30
TARGET_LEVEL = 200

# THE KILLER VARIABLE
LEAD_TIME = 3  # It takes 3 weeks for an order to arrive

# --- 2. INITIALIZE ---
current_inventory = np.zeros(SCENARIOS)
total_profit = np.zeros(SCENARIOS)

# The Pipeline: Tracks orders in transit for each universe
# Shape: [5000 universes, 3 weeks of delay]
pipeline = np.zeros((SCENARIOS, LEAD_TIME))

history_inventory = []

print(f"Simulating Supply Chain with {LEAD_TIME} Week Delay...")

# --- 3. THE TIME LOOP ---
for week in range(WEEKS):

    # A. WHAT IS ARRIVING? (The truck opens its doors)
    # We take the shipment from slot 0 (due this week)
    arriving_stock = pipeline[:, 0]

    # B. DECISION (The "Blind" Order)
    # Crucial: We must account for what is ALREADY coming (Pipeline)
    # If we don't count the pipeline, we will panic order and double-buy.
    inventory_position = current_inventory + np.sum(pipeline, axis=1)
    
    order_qty = TARGET_LEVEL - inventory_position
    order_qty = np.maximum(order_qty, 0)

    # C. UPDATE PIPELINE (The Truck Departs)
    # Shift everything to the left (Week 2 becomes Week 1, etc.)
    pipeline[:, :-1] = pipeline[:, 1:]
    # Put our new order at the back of the queue
    pipeline[:, -1] = order_qty

    # D. FULFILLMENT (Using what just arrived)
    # Note: We can only sell what we physically have, not what's in the pipeline
    stock_on_hand = current_inventory + arriving_stock
    
    # E. DEMAND & SALES
    demand = np.random.normal(MEAN_DEMAND, STD_DEV, SCENARIOS)
    demand = np.maximum(demand, 0)
    
    sales = np.minimum(stock_on_hand, demand)
    
    # F. END OF WEEK
    current_inventory = stock_on_hand - sales
    history_inventory.append(current_inventory.copy())

# --- 4. VISUALIZATION ---
history_matrix = np.array(history_inventory)

plt.figure(figsize=(12, 6))
# Plot 100 spaghetti lines
plt.plot(history_matrix[:, :100], color='blue', alpha=0.1, linewidth=1)
# Plot the Average
plt.plot(np.mean(history_matrix, axis=1), color='red', linewidth=3, label='Average Inventory')
plt.axhline(TARGET_LEVEL, color='green', linestyle='--', label='Target')

plt.title(f"The Bullwhip Effect: {LEAD_TIME} Week Lead Time")
plt.ylabel("Physical Inventory Level")
plt.xlabel("Week")
plt.legend()
plt.show()
Simulating Supply Chain with 3 Week Delay...

import numpy as np
import matplotlib.pyplot as plt

# --- 1. THE CEO INPUTS ---
SCENARIOS = 2000     # Reduced slightly for speed
WEEKS = 52
MEAN_DEMAND = 100
STD_DEV = 30
TARGET_LEVEL = 200
LEAD_TIME = 3        # The Delay is still there!

# THE OPTIMIZER (The "P" Controller)
# 1.0 = Full Reaction (Panic Mode - Current State)
# 0.2 = Smooth Reaction (We only order 20% of the gap per week)
SMOOTHING_FACTOR = 1.0 

# --- 2. INITIALIZE ---
current_inventory = np.zeros(SCENARIOS)
pipeline = np.zeros((SCENARIOS, LEAD_TIME))

# We now track TWO histories to see Cause & Effect
history_inventory = []
history_orders = []

print(f"Simulating with Smoothing Factor: {SMOOTHING_FACTOR}")

# --- 3. THE TIME LOOP ---
for week in range(WEEKS):
    
    # A. ARRIVAL
    arriving_stock = pipeline[:, 0]
    
    # B. DECISION (The Smoothed Logic)
    # Calculate the Gap
    inventory_position = current_inventory + np.sum(pipeline, axis=1)
    gap = TARGET_LEVEL - inventory_position
    
    # APPLY THE "P" CONTROLLER
    # Instead of ordering the full gap, we multiply by Alpha (0.2)
    order_qty = gap * SMOOTHING_FACTOR
    order_qty = np.maximum(order_qty, 0)

    # C. UPDATE PIPELINE
    pipeline[:, :-1] = pipeline[:, 1:]
    pipeline[:, -1] = order_qty

    # D. FULFILLMENT
    stock_on_hand = current_inventory + arriving_stock
    
    # E. DEMAND
    demand = np.random.normal(MEAN_DEMAND, STD_DEV, SCENARIOS)
    demand = np.maximum(demand, 0)
    sales = np.minimum(stock_on_hand, demand)
    
    # F. END OF WEEK
    current_inventory = stock_on_hand - sales
    
    # RECORD HISTORY
    history_inventory.append(current_inventory.copy())
    history_orders.append(order_qty.copy())

# --- 4. THE VISUALIZATION DASHBOARD ---
hist_inv = np.array(history_inventory)
hist_ord = np.array(history_orders)

fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10), sharex=True)

# Graph 1: Inventory Stability (The Effect)
ax1.plot(hist_inv[:, :50], color='blue', alpha=0.1) # Show 50 individual timelines
ax1.plot(np.mean(hist_inv, axis=1), color='red', linewidth=3, label='Avg Inventory')
ax1.axhline(TARGET_LEVEL, color='green', linestyle='--', label='Target')
ax1.set_title(f"Inventory Levels (Smoothing = {SMOOTHING_FACTOR})")
ax1.set_ylabel("Units in Warehouse")
ax1.legend()
ax1.grid(True)

# Graph 2: Ordering Stability (The Cause)
# This reveals if your strategy is frantic or calm
ax2.plot(hist_ord[:, :50], color='purple', alpha=0.1)
ax2.plot(np.mean(hist_ord, axis=1), color='orange', linewidth=3, label='Avg Order Size')
ax2.axhline(MEAN_DEMAND, color='black', linestyle='--', label='Mean Demand (100)')
ax2.set_title("Order Quantities (Purchasing Behavior)")
ax2.set_ylabel("Units Ordered per Week")
ax2.set_xlabel("Week")
ax2.legend()
ax2.grid(True)

plt.show()
Simulating with Smoothing Factor: 1.0

import geopandas as gpd
import matplotlib.pyplot as plt

# 1. LOAD THE WORLD (The "Arena")
# FIXED: We load directly from the official URL instead of the deprecated function
print("Downloading map data from Natural Earth...")
url = "https://naciscdn.org/naturalearth/110m/cultural/ne_110m_admin_0_countries.zip"
world = gpd.read_file(url)

# 2. FILTER (Focus on North America)
north_america = world[world.CONTINENT == "North America"]

# 3. DEFINE A SUPPLY CHAIN (The Nodes)
cities = {
    'City': ['Los Angeles', 'New York', 'Dallas', 'Chicago'],
    'Type': ['Port', 'Customer', 'Warehouse', 'Customer'],
    'Latitude': [34.05, 40.71, 32.77, 41.87], 
    'Longitude': [-118.24, -74.00, -96.79, -87.62]
}

# Convert standard data to "Geospatial" data
df_cities = gpd.GeoDataFrame(
    cities, 
    geometry=gpd.points_from_xy(cities['Longitude'], cities['Latitude']),
    crs="EPSG:4326" # Define the coordinate system (Standard Lat/Lon)
)

# 4. RENDER THE DASHBOARD
fig, ax = plt.subplots(figsize=(15, 10))

# Layer 1: The Base Map
north_america.plot(ax=ax, color='lightgrey', edgecolor='white')

# Layer 2: The Nodes
df_cities.plot(ax=ax, column='Type', legend=True, 
               cmap='Set1', markersize=200, edgecolor='black', zorder=5)

plt.title("Supply Chain Network: The Geospatial Layer")
plt.axis('off') # Clean up the look
plt.show()
Downloading map data from Natural Earth...

import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np

# --- 1. SETUP THE MAP ---
print("Initializing Dashboard...")
# Load North America again
url = "https://naciscdn.org/naturalearth/110m/cultural/ne_110m_admin_0_countries.zip"
world = gpd.read_file(url)
north_america = world[world.CONTINENT == "North America"]

# Define the Cities with their "Market Personality"
cities_data = {
    'City': ['Los Angeles', 'New York', 'Dallas', 'Chicago'],
    'Latitude': [34.05, 40.71, 32.77, 41.87], 
    'Longitude': [-118.24, -74.00, -96.79, -87.62],
    'Mean_Demand': [150, 200, 80, 100],  # Different sizes
    'Volatility': [60, 40, 10, 20]       # Different risks (LA is chaotic!)
}
df = gpd.GeoDataFrame(
    cities_data, 
    geometry=gpd.points_from_xy(cities_data['Longitude'], cities_data['Latitude']),
    crs="EPSG:4326"
)

# --- 2. THE SIMULATION ENGINE (Wrapped as a Function) ---
def run_simulation(mean, std_dev):
    """Runs a 52-week simulation and returns the Stockout Risk %"""
    SCENARIOS = 2000
    WEEKS = 52
    
    # --- THE TWEAK ---
    # Old: mean * 2 (Safe, boring, expensive)
    # New: mean * 1.2 (Lean, profitable, dangerous)
    # We only keep 20% safety stock now.
    TARGET = mean * 1.2 
    
    current_inv = np.zeros(SCENARIOS)
    stockouts = 0
    
    for week in range(WEEKS):
        demand = np.random.normal(mean, std_dev, SCENARIOS)
        demand = np.maximum(demand, 0)
        
        sales = np.minimum(current_inv, demand)
        unsold = current_inv - sales
        
        # Count failures
        stockouts += np.sum(current_inv < 1)
        
        # Reorder
        order = TARGET - unsold
        order = np.maximum(order, 0)
        current_inv = unsold + order 
        
    total_events = SCENARIOS * WEEKS
    risk_score = (stockouts / total_events) * 100
    return risk_score

# RERUN THE ANALYSIS
print("Running Lean Simulation...")
df['Risk_Score'] = df.apply(
    lambda row: run_simulation(row['Mean_Demand'], row['Volatility']), axis=1
)

# RENDER
fig, ax = plt.subplots(figsize=(15, 10))
north_america.plot(ax=ax, color='#f0f0f0', edgecolor='white')
df.plot(ax=ax, column='Risk_Score', cmap='RdYlGn_r', legend=True, 
        markersize=df['Mean_Demand'] * 5, edgecolor='black', alpha=0.8)

for x, y, label, risk in zip(df.geometry.x, df.geometry.y, df['City'], df['Risk_Score']):
    ax.text(x + 1, y, f"{label}\nRisk: {risk:.1f}%", fontsize=10)

plt.title("Supply Chain Risk Heatmap: The Cost of Being 'Lean'")
plt.axis('off')
plt.show()
Initializing Dashboard...
Running Lean Simulation...

import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np

# --- 1. SETUP MAP & DATA ---
url = "https://naciscdn.org/naturalearth/110m/cultural/ne_110m_admin_0_countries.zip"
world = gpd.read_file(url)
north_america = world[world.CONTINENT == "North America"]

cities_data = {
    'City': ['Los Angeles', 'New York', 'Dallas', 'Chicago'],
    'Latitude': [34.05, 40.71, 32.77, 41.87], 
    'Longitude': [-118.24, -74.00, -96.79, -87.62],
    'Mean_Demand': [150, 200, 80, 100],  
    'Volatility': [60, 40, 10, 20]      
}
df = gpd.GeoDataFrame(
    cities_data, 
    geometry=gpd.points_from_xy(cities_data['Longitude'], cities_data['Latitude']),
    crs="EPSG:4326"
)

# --- 2. THE FIXED SIMULATION ENGINE ---
def run_lean_simulation(mean, std_dev):
    SCENARIOS = 2000
    WEEKS = 52
    
    # AGGRESSIVE STRATEGY: Zero Safety Stock
    # We order exactly the average demand.
    # If demand is above average (50% of the time), we crash.
    TARGET = mean * 1.2 
    
    # FIX: Warm Start (Start full, not empty)
    current_inv = np.full(SCENARIOS, TARGET)
    
    # Track total lost sales events
    stockout_count = 0
    
    for week in range(WEEKS):
        demand = np.random.normal(mean, std_dev, SCENARIOS)
        demand = np.maximum(demand, 0)
        
        # LOGIC: If Demand > Inventory, we had a stockout
        # (We count how many scenarios failed this week)
        failures = demand > current_inv
        stockout_count += np.sum(failures)
        
        # Reorder (Instant replenishment for next week)
        # We assume we refill to Target at start of next week
        current_inv = np.full(SCENARIOS, TARGET)

    # Calculate Probability of Stockout
    total_opportunities = SCENARIOS * WEEKS
    risk_score = (stockout_count / total_opportunities) * 100
    return risk_score

# --- 3. RUN & VISUALIZE ---
print("Running 'Zero Safety Stock' Stress Test...")
df['Risk_Score'] = df.apply(
    lambda row: run_lean_simulation(row['Mean_Demand'], row['Volatility']), axis=1
)

# PRINT THE RAW DATA (To verify it worked)
print(df[['City', 'Risk_Score']])

# PLOT
fig, ax = plt.subplots(figsize=(15, 10))
north_america.plot(ax=ax, color='#f0f0f0', edgecolor='white')

df.plot(ax=ax, 
        column='Risk_Score', 
        cmap='RdYlGn_r', # Red = High Risk
        legend=True, 
        markersize=df['Mean_Demand'] * 5, 
        edgecolor='black',
        alpha=0.9)

for x, y, label, risk in zip(df.geometry.x, df.geometry.y, df['City'], df['Risk_Score']):
    ax.text(x + 1, y, f"{label}\nFail Rate: {risk:.1f}%", fontsize=11, fontweight='bold')

plt.title("Supply Chain Stress Test: Who Survives Zero Safety Stock?")
plt.axis('off')
plt.show()
Running 'Zero Safety Stock' Stress Test...
          City  Risk_Score
0  Los Angeles   30.975000
1     New York   15.883654
2       Dallas    5.484615
3      Chicago   15.857692

import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np

# --- 1. SETUP ---
url = "https://naciscdn.org/naturalearth/110m/cultural/ne_110m_admin_0_countries.zip"
world = gpd.read_file(url)
north_america = world[world.CONTINENT == "North America"]

cities_data = {
    'City': ['Los Angeles', 'New York', 'Dallas', 'Chicago'],
    'Latitude': [34.05, 40.71, 32.77, 41.87], 
    'Longitude': [-118.24, -74.00, -96.79, -87.62],
    'Mean_Demand': [150, 200, 80, 100],  
    'Volatility': [60, 40, 10, 20]      
}
df = gpd.GeoDataFrame(
    cities_data, 
    geometry=gpd.points_from_xy(cities_data['Longitude'], cities_data['Latitude']),
    crs="EPSG:4326"
)

# --- 2. THE SOLVER ENGINE ---
def solve_for_safety(mean, std_dev, target_risk_percent=5.0):
    """
    Finds the minimum inventory multiplier needed to achieve <= 5% risk.
    """
    SCENARIOS = 2000
    WEEKS = 52
    
    # We start with NO Safety Stock (Multiplier = 1.0)
    multiplier = 1.0
    current_risk = 100.0
    
    # THE SOLVER LOOP: Keep buying more until we are safe
    while current_risk > target_risk_percent:
        
        # 1. Set the Target based on current guess
        TARGET = mean * multiplier
        
        # 2. Run the Year (Fast simulation)
        # Note: We vectorized the logic inside the loop for speed
        demand = np.random.normal(mean, std_dev, (WEEKS, SCENARIOS))
        demand = np.maximum(demand, 0)
        
        # We simplify: If Demand > Target, it's a fail.
        # This assumes "Fresh Start" every week (no backlog), which is faster for solving.
        failures = demand > TARGET 
        total_failures = np.sum(failures)
        
        current_risk = (total_failures / (WEEKS * SCENARIOS)) * 100
        
        # 3. Check and Adjust
        if current_risk > target_risk_percent:
            multiplier += 0.05 # Add 5% more buffer and try again
            
    return multiplier

# --- 3. RUN THE SOLVER ---
print("Solving for 5% Risk across all cities...")
print("(This determines the 'Cost of Safety' for each market)")

# Run the solver for each row
df['Required_Multiplier'] = df.apply(
    lambda row: solve_for_safety(row['Mean_Demand'], row['Volatility']), axis=1
)

# Print the "Bill"
print("\n--- THE COST OF SAFETY (To reach 5% Risk) ---")
print(df[['City', 'Volatility', 'Required_Multiplier']])

# --- 4. VISUALIZE THE INEQUALITY ---
fig, ax = plt.subplots(figsize=(15, 10))
north_america.plot(ax=ax, color='#f0f0f0', edgecolor='white')

# We plot the REQUIRED MULTIPLIER (The Cost)
# Redder/Bigger dots = More Expensive to keep safe
df.plot(ax=ax, 
        column='Required_Multiplier', 
        cmap='Reds',           # Darker Red = Higher Cost
        legend=True, 
        markersize=df['Required_Multiplier'] * 300, # Size represents the stockpile bloat
        edgecolor='black',
        alpha=0.8)

for x, y, label, mult in zip(df.geometry.x, df.geometry.y, df['City'], df['Required_Multiplier']):
    ax.text(x + 1, y, f"{label}\nNeeds: {mult:.2f}x", fontsize=11, fontweight='bold')

plt.title("The Cost of Stability: How much Inventory (x Mean) is needed for 95% Service?")
plt.axis('off')
plt.show()
Solving for 5% Risk across all cities...
(This determines the 'Cost of Safety' for each market)

--- THE COST OF SAFETY (To reach 5% Risk) ---
          City  Volatility  Required_Multiplier
0  Los Angeles          60                 1.70
1     New York          40                 1.35
2       Dallas          10                 1.25
3      Chicago          20                 1.35

import numpy as np
import matplotlib.pyplot as plt

# SYSTEM INPUTS (Los Angeles)
MEAN = 150
STD_DEV = 60
WEEKS = 52
SCENARIOS = 5000

# We will test a range of multipliers from 1.0 (Average) to 2.0 (Double)
multipliers = np.linspace(1.0, 2.0, 50)
risk_scores = []

print("Mapping the Risk Curve for Los Angeles...")

for m in multipliers:
    TARGET = MEAN * m
    
    # Vectorized Simulation
    demand = np.random.normal(MEAN, STD_DEV, (WEEKS, SCENARIOS))
    failures = demand > TARGET
    risk = (np.sum(failures) / (WEEKS * SCENARIOS)) * 100
    risk_scores.append(risk)

# PLOT THE CURVE
plt.figure(figsize=(10, 6))
plt.plot(multipliers, risk_scores, linewidth=3, color='blue', label='Stockout Risk')

# The 5% Target Line
plt.axhline(5, color='red', linestyle='--', label='Target Risk (5%)')

# Find the intersection (approximate)
crossing_point = multipliers[np.where(np.array(risk_scores) < 5)[0][0]]
plt.axvline(crossing_point, color='green', linestyle=':', label=f'Required: {crossing_point:.2f}x')

plt.title(f"The Price of Safety (Los Angeles)\nCV = {STD_DEV/MEAN:.2f}")
plt.xlabel("Inventory Multiplier (x Mean)")
plt.ylabel("Risk of Stockout (%)")
plt.legend()
plt.grid(True, alpha=0.3)
plt.show()

print(f"To get 5% risk, you must cross the Green Line at {crossing_point:.2f}x")
Mapping the Risk Curve for Los Angeles...

To get 5% risk, you must cross the Green Line at 1.67x