def
definition
shannonEntropy
show as:
view math explainer →
open explainer
Read the cached plain-language explainer.
open lean source
IndisputableMonolith.Information.ShannonEntropy on GitHub at line 67.
browse module
All declarations in this module, on Recognition.
explainer page
depends on
used by
formal source
64
65/-- Shannon entropy: H = -Σ p_i log(p_i).
66 We use natural logarithm; for bits, divide by log(2). -/
67noncomputable def shannonEntropy {n : ℕ} (d : ProbDist n) : ℝ :=
68 -(Finset.univ.sum fun i =>
69 if d.probs i > 0 then d.probs i * Real.log (d.probs i)
70 else 0)
71
72/-- **THEOREM**: Entropy is non-negative. -/
73theorem entropy_nonneg {n : ℕ} (d : ProbDist n) :
74 shannonEntropy d ≥ 0 := by
75 unfold shannonEntropy
76 simp only [neg_nonneg]
77 apply Finset.sum_nonpos
78 intro i _
79 by_cases h : d.probs i > 0
80 · simp only [h, ↓reduceIte]
81 have hp : d.probs i ≤ 1 := by
82 have := d.normalized
83 have hs := Finset.single_le_sum (fun j _ => d.nonneg j) (Finset.mem_univ i)
84 simp at hs
85 linarith
86 have hlog : Real.log (d.probs i) ≤ 0 := Real.log_nonpos (le_of_lt h) hp
87 -- p * log(p) ≤ 0 for 0 < p ≤ 1
88 apply mul_nonpos_of_nonneg_of_nonpos (le_of_lt h) hlog
89 · simp [h]
90
91/-- **THEOREM**: Maximum entropy is log(n) for uniform distribution. -/
92theorem max_entropy_uniform (n : ℕ) (hn : n > 0) :
93 shannonEntropy (uniform n hn) = Real.log n := by
94 -- H = -n × (1/n) × log(1/n) = -log(1/n) = log(n)
95 unfold shannonEntropy uniform
96 simp only
97 have hn_pos : (0 : ℝ) < n := Nat.cast_pos.mpr hn