def
definition
totalJCost
show as:
view math explainer →
open explainer
Read the cached plain-language explainer.
open lean source
IndisputableMonolith.Information.ShannonEntropy on GitHub at line 137.
browse module
All declarations in this module, on Recognition.
explainer page
depends on
-
totalJCost -
entropy -
cost -
cost -
is -
is -
is -
totalJCost -
ProbDist -
probJCost -
is -
Cost -
total -
normalized -
total -
ProbDist -
entropy -
entropy
used by
formal source
134
135/-- The total J-cost of a distribution.
136 This equals the Shannon entropy! -/
137noncomputable def totalJCost {n : ℕ} (d : ProbDist n) : ℝ :=
138 Finset.univ.sum fun i =>
139 if h : d.probs i > 0 then
140 d.probs i * probJCost (d.probs i) h (by
141 have := d.normalized
142 have hs := Finset.single_le_sum (fun j _ => d.nonneg j) (Finset.mem_univ i)
143 simp at hs
144 linarith)
145 else 0
146
147/-- **THEOREM (Shannon = J-Cost)**: Shannon entropy equals total J-cost.
148 This is the key connection! -/
149theorem shannon_equals_jcost {n : ℕ} (d : ProbDist n) :
150 shannonEntropy d = totalJCost d := by
151 -- Both compute Σ p_i × (-log p_i), just with different notation
152 -- shannonEntropy = -(Σ p*log(p)) and totalJCost = Σ p*(-log(p))
153 -- These are equal since -(p*log(p)) = p*(-log(p))
154 unfold shannonEntropy totalJCost probJCost
155 -- Goal: -(Σ if p>0 then p*log(p) else 0) = Σ if p>0 then p*(-log(p)) else 0
156 conv_lhs =>
157 rw [← Finset.sum_neg_distrib]
158 congr 1
159 funext i
160 by_cases hp : d.probs i > 0
161 · simp only [hp, ↓reduceIte, dite_eq_ite, neg_mul, mul_neg, neg_neg]
162 · simp only [hp, ↓reduceIte, dite_eq_ite, neg_zero]
163
164/-! ## Information Content -/
165
166/-- Information content (surprisal) of an outcome.
167 I(x) = -log(p(x)) = "how surprising is this outcome?" -/