{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Chapter 15 : Introduction to information theory"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Page no 687 prob no 15.1"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Length = 1.14 4-ary digits\n",
"Entropy of source is, H = 0.90 4-ary units\n",
"Efficiency of code, N = 0.79 \n"
]
}
],
"source": [
"from math import log\n",
"# Here we have given six messages. For 4-ary Huffman code, we need to add one dummy variable to satisfy the required condition of r+k(r-1) messages.Probabilities are given as p(1)=0.3# p(2)=0.25# p(3)=0.15# p(4)=0.12# p(5)=0.1# p(6)=0.08# p(7)=0.\n",
"\n",
"#The length L of this code is calculated as\n",
" \n",
"n=5# the length of probability vector p\n",
"p=[.3, .25, .15, .12, .1, .08, 0]## enter probabilities in descending order\n",
"l=[1, 1, 1 ,2 ,2 ,2, 2]## code length of individual message according to order\n",
"L=0#\n",
"for i in range(0,n):\n",
" L=L+(p[(i)]*l[(i)])\n",
"\n",
"print \"Length = %0.2f \"%L,'4-ary digits'\n",
"\n",
"# Entropy of source is calculated as\n",
"H=0#\n",
"for i in range(0,n-1):#since the value of log(1/0) for the last entry is infinite which when multiply by 0 gives result as 0\n",
" H=H+(p[(i)]*log(1.0/p[(i)]))#\n",
"\n",
"H1=H/log(4)\n",
"print \"Entropy of source is, H = %0.2f \"%H1,'4-ary units'\n",
"\n",
"# Efficiency of code is given as \n",
"N=H1/L#\n",
"print \"Efficiency of code, N = %0.2f \"%N"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Page no 688 Example no. 15.2"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Length = 1.00 \n",
"Entropy of source is, H = 0.72 bit\n",
"Efficiency of code, N = 0.72 \n",
"Length = 0.78 \n",
"Efficiency of code, N = 0.93 \n",
"Length = 0.73 \n",
"Efficiency of code, N = 0.99 \n"
]
}
],
"source": [
"from __future__ import division\n",
"from math import log\n",
"# N=1\n",
"#Here we have given two messages with probabilities m1=0.8 and m2=0.2 . Therefore, Huffman code for the source is simply 0 and 1.\n",
"\n",
"#The length L of this code is calculated as\n",
"N=1#\n",
"p=[.8, .2]##enter probabilities in descending order\n",
"n=len(p)\n",
"l=[1, 1]##code length of individual message according to order\n",
"L=0#\n",
"for i in range(0,n):\n",
" L=L+(p[(i)]*l[(i)])#\n",
"\n",
"print \"Length = %0.2f \"%L\n",
"\n",
"# Entropy of source is calculated as\n",
"H=0#\n",
"for i in range(0,n):\n",
" H=H+(p[(i)]*log(1/p[(i)],2))\n",
"\n",
"print \"Entropy of source is, H = %0.2f bit\"%H\n",
"\n",
"# Efficiency of code is given as \n",
"N1=H/L#\n",
"print \"Efficiency of code, N = %0.2f \"%N1\n",
"\n",
"#for N=2\n",
"#There are four (2**N) combinations and their probabilities obtained by multiplying individuals probability.\n",
"#The length L of this code is calculated as\n",
"N=2#\n",
"p=[0.64, 0.16, 0.16, 0.04]##enter probabilities in descending order\n",
"n=len(p)#\n",
"l=[1 ,2 ,3 ,3]##code length of individual message according to order\n",
"L1=0#\n",
"for i in range(0,n):\n",
" L1=L1+(p[(i)]*l[(i)])#\n",
"\n",
"L=L1/N## word length per message\n",
"print \"Length = %0.2f \"%L\n",
"\n",
"# Efficiency of code is given as \n",
"N2=H/L#\n",
"print \"Efficiency of code, N = %0.2f \"%N2\n",
"\n",
"\n",
"#for N=3\n",
"#There are eight (2**N)combinations and their probabilities obtained by multiplying individuals probability\n",
"#The length L of this code is calculated as\n",
"N=3#\n",
"p=[.512, .128, .128, .128, .032, .032, .032, .008]##enter probabilities in descending order\n",
"n=len(p)#\n",
"l=[1, 3 ,3 ,3, 5, 5 ,5 ,5]##code length of individual message according to order\n",
"L1=0\n",
"for i in range(0,n):\n",
" L1=L1+(p[(i)]*l[(i)])#\n",
"\n",
"L=L1/N## word length per message\n",
"print \"Length = %0.2f \"%L\n",
"\n",
"# Efficiency of code is given as \n",
"N3=H/L#\n",
"print \"Efficiency of code, N = %0.2f \"%N3"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## page no 702 prob no 15.4"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"entropy = 1.00 bits\n",
"entropy = 2.00 bits\n",
" if x and y have equal absolute entropies,their relative (differential) entropies must differ by 1 bit \n"
]
}
],
"source": [
"from __future__ import division\n",
"from math import log\n",
"from mpmath import quad\n",
"\n",
"\n",
"x0=(-1)#\n",
"x1=1##given\n",
"y0=(-2)#\n",
"y1=2##given\n",
"G=2##gain of amplifier\n",
"#the probbilities are given as P(x)=1/2 for |x|<1 & P(y)=1/4 for |y<2| otherwise P(x)=P(y)=0.\n",
"#P(x<1 & -x<1)=1/2#\n",
"#P(y<2 & -y<2)=1/4#\n",
"# hence entropies are given as\n",
"g1=(1./2)*log(2,2)#\n",
"g2=(1./4)*log(4,2)# \n",
"X=quad(lambda x:g1*1,[x0,x1])\n",
"Y=quad(lambda y:g2*1,[y0,y1])\n",
"print \"entropy = %0.2f bits\"%X\n",
"print \"entropy = %0.2f bits\"%Y\n",
"#Here the entropy of random variable 'y' is twice that of the 'x'.This results may come as a surprise,since a knowledge of 'x' uniquely determines 'y' and vice versa , since y=2x.Hence , the average uncertainty of x and y should be identical.\n",
"# The reference entropy R1 for x is -log dx ,and The reference entropy R2 for y is -log dy (in the limit as dx,dy->0 ).\n",
"# R1= lim (dx->0) -log dx\n",
"#R2= lim (dy->0) -log dy\n",
"#and R1-R2 = lim(dx,dy->0) log(dx/dy) = log (dy/dx) = log2 2 =1 bit\n",
"#Therefore,the reference entropy of x is higher than the reference entropy for y. Hence we conclude that \n",
"print \" if x and y have equal absolute entropies,their relative (differential) entropies must differ by 1 bit \""
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 0
}