scripts init

This commit is contained in:
Jinghao Shi
2017-04-03 12:52:21 -04:00
parent 9edf1899bd
commit 20a33eb560
31 changed files with 4729 additions and 0 deletions

View File

@ -0,0 +1,27 @@
"""
CommPy
================================================
Contents
--------
Subpackages
-----------
::
channelcoding --- Channel Coding Algorithms [*]
"""
#from channelcoding import *
from commpy.filters import *
from commpy.modulation import *
from commpy.impairments import *
from commpy.sequences import *
from commpy.channels import *
try:
from numpy.testing import Tester
test = Tester().test
except:
pass

View File

@ -0,0 +1,76 @@
"""
============================================
Channel Coding (:mod:`commpy.channelcoding`)
============================================
.. module:: commpy.channelcoding
Galois Fields
=============
.. autosummary::
:toctree: generated/
GF -- Class representing a Galois Field object.
Algebraic Codes
===============
.. autosummary::
:toctree: generated/
cyclic_code_genpoly -- Generate a cylic code generator polynomial.
Convolutional Codes
===================
.. autosummary::
:toctree: generated/
Trellis -- Class representing convolutional code trellis.
conv_encode -- Convolutional Encoder.
viterbi_decode -- Convolutional Decoder using the Viterbi algorithm.
Turbo Codes
===========
.. autosummary::
:toctree: generated/
turbo_encode -- Turbo Encoder.
map_decode -- Convolutional Code decoder using MAP algorithm.
turbo_decode -- Turbo Decoder.
LDPC Codes
==========
.. autosummary::
:toctree: generated/
get_ldpc_code_params -- Extract parameters from LDPC code design file.
ldpc_bp_decode -- LDPC Code Decoder using Belief propagation.
Interleavers and De-interleavers
================================
.. autosummary::
:toctree: generated/
RandInterlv -- Random Interleaver.
"""
from commpy.channelcoding.convcode import Trellis, conv_encode, viterbi_decode
from commpy.channelcoding.interleavers import *
from commpy.channelcoding.turbo import turbo_encode, map_decode, turbo_decode
from commpy.channelcoding.ldpc import get_ldpc_code_params, ldpc_bp_decode
from commpy.channelcoding.gfields import *
from commpy.channelcoding.algcode import *
try:
from numpy.testing import Tester
test = Tester().test
except:
pass

View File

@ -0,0 +1,73 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
from fractions import gcd
from numpy import array, arange, concatenate, convolve
from commpy.channelcoding.gfields import GF, polymultiply, poly_to_string
from commpy.utilities import dec2bitarray, bitarray2dec
__all__ = ['cyclic_code_genpoly']
def cyclic_code_genpoly(n, k):
"""
Generate all possible generator polynomials for a (n, k)-cyclic code.
Parameters
----------
n : int
Code blocklength of the cyclic code.
k : int
Information blocklength of the cyclic code.
Returns
-------
poly_list : 1D ndarray of ints
A list of generator polynomials (represented as integers) for the (n, k)-cyclic code.
"""
if n%2 == 0:
raise ValueError("n cannot be an even number")
for m in arange(1, 18):
if (2**m-1)%n == 0:
break
x_gf = GF(arange(1, 2**m), m)
coset_fields = x_gf.cosets()
coset_leaders = array([])
minpol_degrees = array([])
for field in coset_fields:
coset_leaders = concatenate((coset_leaders, array([field.elements[0]])))
minpol_degrees = concatenate((minpol_degrees, array([len(field.elements)])))
y_gf = GF(coset_leaders, m)
minpol_list = y_gf.minpolys()
idx_list = arange(1, len(minpol_list))
poly_list = array([])
for i in range(1, 2**len(minpol_list)):
i_array = dec2bitarray(i, len(minpol_list))
subset_array = minpol_degrees[i_array == 1]
if int(subset_array.sum()) == (n-k):
poly_set = minpol_list[i_array == 1]
gpoly = 1
for poly in poly_set:
gpoly_array = dec2bitarray(gpoly, 2**m)
poly_array = dec2bitarray(poly, 2**m)
gpoly = bitarray2dec(convolve(gpoly_array, poly_array) % 2)
poly_list = concatenate((poly_list, array([gpoly])))
return poly_list.astype(int)
if __name__ == "__main__":
genpolys = cyclic_code_genpoly(31, 21)
for poly in genpolys:
print(poly_to_string(poly))

View File

@ -0,0 +1,573 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" Algorithms for Convolutional Codes """
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
import matplotlib.patches as mpatches
from commpy.utilities import dec2bitarray, bitarray2dec, hamming_dist, euclid_dist
#from commpy.channelcoding.acstb import acs_traceback
__all__ = ['Trellis', 'conv_encode', 'viterbi_decode']
class Trellis:
"""
Class defining a Trellis corresponding to a k/n - rate convolutional code.
Parameters
----------
memory : 1D ndarray of ints
Number of memory elements per input of the convolutional encoder.
g_matrix : 2D ndarray of ints (octal representation)
Generator matrix G(D) of the convolutional encoder. Each element of
G(D) represents a polynomial.
feedback : int, optional
Feedback polynomial of the convolutional encoder. Default value is 00.
code_type : {'default', 'rsc'}, optional
Use 'rsc' to generate a recursive systematic convolutional code.
If 'rsc' is specified, then the first 'k x k' sub-matrix of
G(D) must represent a identity matrix along with a non-zero
feedback polynomial.
Attributes
----------
k : int
Size of the smallest block of input bits that can be encoded using
the convolutional code.
n : int
Size of the smallest block of output bits generated using
the convolutional code.
total_memory : int
Total number of delay elements needed to implement the convolutional
encoder.
number_states : int
Number of states in the convolutional code trellis.
number_inputs : int
Number of branches from each state in the convolutional code trellis.
next_state_table : 2D ndarray of ints
Table representing the state transition matrix of the
convolutional code trellis. Rows represent current states and
columns represent current inputs in decimal. Elements represent the
corresponding next states in decimal.
output_table : 2D ndarray of ints
Table representing the output matrix of the convolutional code trellis.
Rows represent current states and columns represent current inputs in
decimal. Elements represent corresponding outputs in decimal.
Examples
--------
>>> from numpy import array
>>> import commpy.channelcoding.convcode as cc
>>> memory = array([2])
>>> g_matrix = array([[05, 07]]) # G(D) = [1+D^2, 1+D+D^2]
>>> trellis = cc.Trellis(memory, g_matrix)
>>> print trellis.k
1
>>> print trellis.n
2
>>> print trellis.total_memory
2
>>> print trellis.number_states
4
>>> print trellis.number_inputs
2
>>> print trellis.next_state_table
[[0 2]
[0 2]
[1 3]
[1 3]]
>>>print trellis.output_table
[[0 3]
[3 0]
[1 2]
[2 1]]
"""
def __init__(self, memory, g_matrix, feedback = 0, code_type = 'default'):
[self.k, self.n] = g_matrix.shape
if code_type == 'rsc':
for i in range(self.k):
g_matrix[i][i] = feedback
self.total_memory = memory.sum()
self.number_states = pow(2, self.total_memory)
self.number_inputs = pow(2, self.k)
self.next_state_table = np.zeros([self.number_states,
self.number_inputs], 'int')
self.output_table = np.zeros([self.number_states,
self.number_inputs], 'int')
# Compute the entries in the next state table and the output table
for current_state in range(self.number_states):
for current_input in range(self.number_inputs):
outbits = np.zeros(self.n, 'int')
# Compute the values in the output_table
for r in range(self.n):
output_generator_array = np.zeros(self.k, 'int')
shift_register = dec2bitarray(current_state,
self.total_memory)
for l in range(self.k):
# Convert the number representing a polynomial into a
# bit array
generator_array = dec2bitarray(g_matrix[l][r],
memory[l]+1)
# Loop over M delay elements of the shift register
# to compute their contribution to the r-th output
for i in range(memory[l]):
outbits[r] = (outbits[r] + \
(shift_register[i+l]*generator_array[i+1])) % 2
output_generator_array[l] = generator_array[0]
if l == 0:
feedback_array = (dec2bitarray(feedback, memory[l]) * shift_register[0:memory[l]]).sum()
shift_register[1:memory[l]] = \
shift_register[0:memory[l]-1]
shift_register[0] = (dec2bitarray(current_input,
self.k)[0] + feedback_array) % 2
else:
feedback_array = (dec2bitarray(feedback, memory[l]) *
shift_register[l+memory[l-1]-1:l+memory[l-1]+memory[l]-1]).sum()
shift_register[l+memory[l-1]:l+memory[l-1]+memory[l]-1] = \
shift_register[l+memory[l-1]-1:l+memory[l-1]+memory[l]-2]
shift_register[l+memory[l-1]-1] = \
(dec2bitarray(current_input, self.k)[l] + feedback_array) % 2
# Compute the contribution of the current_input to output
outbits[r] = (outbits[r] + \
(np.sum(dec2bitarray(current_input, self.k) * \
output_generator_array + feedback_array) % 2)) % 2
# Update the ouput_table using the computed output value
self.output_table[current_state][current_input] = \
bitarray2dec(outbits)
# Update the next_state_table using the new state of
# the shift register
self.next_state_table[current_state][current_input] = \
bitarray2dec(shift_register)
def _generate_grid(self, trellis_length):
""" Private method """
grid = np.mgrid[0.12:0.22*trellis_length:(trellis_length+1)*(0+1j),
0.1:0.1+self.number_states*0.1:self.number_states*(0+1j)].reshape(2, -1)
return grid
def _generate_states(self, trellis_length, grid, state_order, state_radius, font):
""" Private method """
state_patches = []
for state_count in range(self.number_states * trellis_length):
state_patch = mpatches.Circle(grid[:,state_count], state_radius,
color="#003399", ec="#cccccc")
state_patches.append(state_patch)
plt.text(grid[0, state_count], grid[1, state_count]-0.02,
str(state_order[state_count % self.number_states]),
ha="center", family=font, size=20, color="#ffffff")
return state_patches
def _generate_edges(self, trellis_length, grid, state_order, state_radius, edge_colors):
""" Private method """
edge_patches = []
for current_time_index in range(trellis_length-1):
grid_subset = grid[:,self.number_states * current_time_index:]
for state_count_1 in range(self.number_states):
input_count = 0
for state_count_2 in range(self.number_states):
dx = grid_subset[0, state_count_2+self.number_states] - grid_subset[0,state_count_1] - 2*state_radius
dy = grid_subset[1, state_count_2+self.number_states] - grid_subset[1,state_count_1]
if np.count_nonzero(self.next_state_table[state_order[state_count_1],:] == state_order[state_count_2]):
found_index = np.where(self.next_state_table[state_order[state_count_1],:] ==
state_order[state_count_2])
edge_patch = mpatches.FancyArrow(grid_subset[0,state_count_1]+state_radius,
grid_subset[1,state_count_1], dx, dy, width=0.005,
length_includes_head = True, color = edge_colors[found_index[0][0]])
edge_patches.append(edge_patch)
input_count = input_count + 1
return edge_patches
def _generate_labels(self, grid, state_order, state_radius, font):
""" Private method """
for state_count in range(self.number_states):
for input_count in range(self.number_inputs):
edge_label = str(input_count) + "/" + str(
self.output_table[state_order[state_count], input_count])
plt.text(grid[0, state_count]-1.5*state_radius,
grid[1, state_count]+state_radius*(1-input_count-0.7),
edge_label, ha="center", family=font, size=14)
def visualize(self, trellis_length = 2, state_order = None,
state_radius = 0.04, edge_colors = None):
""" Plot the trellis diagram.
Parameters
----------
trellis_length : int, optional
Specifies the number of time steps in the trellis diagram.
Default value is 2.
state_order : list of ints, optional
Specifies the order in the which the states of the trellis
are to be displayed starting from the top in the plot.
Default order is [0,...,number_states-1]
state_radius : float, optional
Radius of each state (circle) in the plot.
Default value is 0.04
edge_colors = list of hex color codes, optional
A list of length equal to the number_inputs,
containing color codes that represent the edge corresponding
to the input.
"""
if edge_colors is None:
edge_colors = ["#9E1BE0", "#06D65D"]
if state_order is None:
state_order = range(self.number_states)
font = "sans-serif"
fig = plt.figure()
ax = plt.axes([0,0,1,1])
trellis_patches = []
state_order.reverse()
trellis_grid = self._generate_grid(trellis_length)
state_patches = self._generate_states(trellis_length, trellis_grid,
state_order, state_radius, font)
edge_patches = self._generate_edges(trellis_length, trellis_grid,
state_order, state_radius,
edge_colors)
self._generate_labels(trellis_grid, state_order, state_radius, font)
trellis_patches.extend(state_patches)
trellis_patches.extend(edge_patches)
collection = PatchCollection(trellis_patches, match_original=True)
ax.add_collection(collection)
ax.set_xticks([])
ax.set_yticks([])
#plt.legend([edge_patches[0], edge_patches[1]], ["1-input", "0-input"])
plt.show()
def conv_encode(message_bits, trellis, code_type = 'default', puncture_matrix=None):
"""
Encode bits using a convolutional code.
Parameters
----------
message_bits : 1D ndarray containing {0, 1}
Stream of bits to be convolutionally encoded.
generator_matrix : 2-D ndarray of ints
Generator matrix G(D) of the convolutional code using which the input
bits are to be encoded.
M : 1D ndarray of ints
Number of memory elements per input of the convolutional encoder.
Returns
-------
coded_bits : 1D ndarray containing {0, 1}
Encoded bit stream.
"""
k = trellis.k
n = trellis.n
total_memory = trellis.total_memory
rate = float(k)/n
if puncture_matrix is None:
puncture_matrix = np.ones((trellis.k, trellis.n))
number_message_bits = np.size(message_bits)
# Initialize an array to contain the message bits plus the truncation zeros
if code_type == 'default':
inbits = np.zeros(number_message_bits + total_memory + total_memory % k,
'int')
number_inbits = number_message_bits + total_memory + total_memory % k
# Pad the input bits with M zeros (L-th terminated truncation)
inbits[0:number_message_bits] = message_bits
number_outbits = int(number_inbits/rate)
else:
inbits = message_bits
number_inbits = number_message_bits
number_outbits = int((number_inbits + total_memory)/rate)
outbits = np.zeros(number_outbits, 'int')
p_outbits = np.zeros(int(number_outbits*
puncture_matrix[0:].sum()/np.size(puncture_matrix, 1)), 'int')
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Encoding process - Each iteration of the loop represents one clock cycle
current_state = 0
j = 0
for i in range(int(number_inbits/k)): # Loop through all input bits
current_input = bitarray2dec(inbits[i*k:(i+1)*k])
current_output = output_table[current_state][current_input]
outbits[j*n:(j+1)*n] = dec2bitarray(current_output, n)
current_state = next_state_table[current_state][current_input]
j += 1
if code_type == 'rsc':
term_bits = dec2bitarray(current_state, trellis.total_memory)
term_bits = term_bits[::-1]
for i in range(trellis.total_memory):
current_input = bitarray2dec(term_bits[i*k:(i+1)*k])
current_output = output_table[current_state][current_input]
outbits[j*n:(j+1)*n] = dec2bitarray(current_output, n)
current_state = next_state_table[current_state][current_input]
j += 1
j = 0
for i in range(number_outbits):
if puncture_matrix[0][i % np.size(puncture_matrix, 1)] == 1:
p_outbits[j] = outbits[i]
j = j + 1
return p_outbits
def _where_c(inarray, rows, cols, search_value, index_array):
#cdef int i, j,
number_found = 0
for i in range(rows):
for j in range(cols):
if inarray[i, j] == search_value:
index_array[number_found, 0] = i
index_array[number_found, 1] = j
number_found += 1
return number_found
def _acs_traceback(r_codeword, trellis, decoding_type,
path_metrics, paths, decoded_symbols,
decoded_bits, tb_count, t, count,
tb_depth, current_number_states):
#cdef int state_num, i, j, number_previous_states, previous_state, \
# previous_input, i_codeword, number_found, min_idx, \
# current_state, dec_symbol
k = trellis.k
n = trellis.n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
branch_metric = 0.0
next_state_table = trellis.next_state_table
output_table = trellis.output_table
pmetrics = np.empty(number_inputs)
i_codeword_array = np.empty(n, 'int')
index_array = np.empty([number_states, 2], 'int')
decoded_bitarray = np.empty(k, 'int')
# Loop over all the current states (Time instant: t)
for state_num in range(current_number_states):
# Using the next state table find the previous states and inputs
# leading into the current state (Trellis)
number_found = _where_c(next_state_table, number_states, number_inputs, state_num, index_array)
# Loop over all the previous states (Time instant: t-1)
for i in range(number_found):
previous_state = index_array[i, 0]
previous_input = index_array[i, 1]
# Using the output table, find the ideal codeword
i_codeword = output_table[previous_state, previous_input]
#dec2bitarray_c(i_codeword, n, i_codeword_array)
i_codeword_array = dec2bitarray(i_codeword, n)
# Compute Branch Metrics
if decoding_type == 'hard':
#branch_metric = hamming_dist_c(r_codeword.astype(int), i_codeword_array.astype(int), n)
branch_metric = hamming_dist(r_codeword.astype(int), i_codeword_array.astype(int))
elif decoding_type == 'soft':
pass
elif decoding_type == 'unquantized':
i_codeword_array = 2*i_codeword_array - 1
branch_metric = euclid_dist(r_codeword, i_codeword_array)
else:
pass
# ADD operation: Add the branch metric to the
# accumulated path metric and store it in the temporary array
pmetrics[i] = path_metrics[previous_state, 0] + branch_metric
# COMPARE and SELECT operations
# Compare and Select the minimum accumulated path metric
path_metrics[state_num, 1] = pmetrics.min()
# Store the previous state corresponding to the minimum
# accumulated path metric
min_idx = pmetrics.argmin()
paths[state_num, tb_count] = index_array[min_idx, 0]
# Store the previous input corresponding to the minimum
# accumulated path metric
decoded_symbols[state_num, tb_count] = index_array[min_idx, 1]
if t >= tb_depth - 1:
current_state = path_metrics[:,1].argmin()
# Traceback Loop
for j in reversed(range(1, tb_depth)):
dec_symbol = decoded_symbols[current_state, j]
previous_state = paths[current_state, j]
decoded_bitarray = dec2bitarray(dec_symbol, k)
decoded_bits[(t-tb_depth-1)+(j+1)*k+count:(t-tb_depth-1)+(j+2)*k+count] = \
decoded_bitarray
current_state = previous_state
paths[:,0:tb_depth-1] = paths[:,1:]
decoded_symbols[:,0:tb_depth-1] = decoded_symbols[:,1:]
def viterbi_decode(coded_bits, trellis, tb_depth=None, decoding_type='hard'):
"""
Decodes a stream of convolutionally encoded bits using the Viterbi Algorithm
Parameters
----------
coded_bits : 1D ndarray
Stream of convolutionally encoded bits which are to be decoded.
generator_matrix : 2D ndarray of ints
Generator matrix G(D) of the convolutional code using which the
input bits are to be decoded.
M : 1D ndarray of ints
Number of memory elements per input of the convolutional encoder.
tb_length : int
Traceback depth (Typically set to 5*(M+1)).
decoding_type : str {'hard', 'unquantized'}
The type of decoding to be used.
'hard' option is used for hard inputs (bits) to the decoder, e.g., BSC channel.
'unquantized' option is used for soft inputs (real numbers) to the decoder, e.g., BAWGN channel.
Returns
-------
decoded_bits : 1D ndarray
Decoded bit stream.
References
----------
.. [1] Todd K. Moon. Error Correction Coding: Mathematical Methods and
Algorithms. John Wiley and Sons, 2005.
"""
# k = Rows in G(D), n = columns in G(D)
k = trellis.k
n = trellis.n
rate = float(k)/n
total_memory = trellis.total_memory
number_states = trellis.number_states
number_inputs = trellis.number_inputs
if tb_depth is None:
tb_depth = 5*total_memory
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Number of message bits after decoding
L = int(len(coded_bits)*rate)
path_metrics = np.empty([number_states, 2])
path_metrics[:, :] = 1000000
path_metrics[0][0] = 0
paths = np.empty([number_states, tb_depth], 'int')
paths[:, :] = 1000000
paths[0][0] = 0
decoded_symbols = np.zeros([number_states, tb_depth], 'int')
decoded_bits = np.zeros(L+tb_depth+k, 'int')
r_codeword = np.zeros(n, 'int')
tb_count = 1
count = 0
current_number_states = number_states
for t in range(1, int((L+total_memory+total_memory%k)/k) + 1):
# Get the received codeword corresponding to t
if t <= L:
r_codeword = coded_bits[(t-1)*n:t*n]
else:
if decoding_type == 'hard':
r_codeword[:] = 0
elif decoding_type == 'soft':
pass
elif decoding_type == 'unquantized':
r_codeword[:] = 0
r_codeword = 2*r_codeword - 1
else:
pass
_acs_traceback(r_codeword, trellis, decoding_type, path_metrics, paths,
decoded_symbols, decoded_bits, tb_count, t, count, tb_depth,
current_number_states)
if t >= tb_depth - 1:
tb_count = tb_depth - 1
count = count + k - 1
else:
tb_count = tb_count + 1
# Path metrics (at t-1) = Path metrics (at t)
path_metrics[:, 0] = path_metrics[:, 1]
# Force all the paths back to '0' state at the end of decoding
if t == (L+total_memory+total_memory%k)/k:
current_number_states = 1
return decoded_bits[0:len(decoded_bits)-tb_depth-1]

View File

@ -0,0 +1,148 @@
96 48
3 6
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
10 30 40
5 32 45
16 18 39
12 22 38
15 19 47
2 17 34
9 24 42
1 29 33
4 27 36
3 26 35
11 31 43
7 21 44
8 20 48
14 23 46
6 28 37
13 25 41
14 32 43
5 23 37
2 31 36
1 28 34
7 25 47
10 21 33
15 30 35
16 26 48
3 22 46
12 20 41
8 18 38
4 19 45
6 24 40
9 27 39
13 17 42
11 29 44
8 24 34
6 25 36
9 19 43
1 20 46
14 27 42
7 22 39
13 18 35
4 26 40
16 29 38
15 21 48
11 23 45
3 17 47
5 28 44
12 32 33
2 30 41
10 31 37
10 18 36
4 23 44
9 29 40
2 27 38
8 30 42
12 28 43
11 20 37
1 19 35
15 31 39
16 32 41
5 26 33
3 25 45
13 21 34
14 24 48
7 17 46
6 22 47
7 27 40
11 18 33
2 32 35
10 28 47
5 24 41
12 25 37
3 19 39
14 31 44
16 30 34
13 20 38
9 22 36
6 17 45
4 21 42
15 29 46
8 26 43
1 23 48
1 25 42
15 22 40
8 21 41
9 18 47
6 27 43
11 30 46
7 31 35
5 20 36
14 17 38
16 28 45
4 32 37
13 23 33
12 26 44
3 29 48
2 24 39
10 19 34
8 20 36 56 80 81
6 19 47 52 67 95
10 25 44 60 71 94
9 28 40 50 77 91
2 18 45 59 69 88
15 29 34 64 76 85
12 21 38 63 65 87
13 27 33 53 79 83
7 30 35 51 75 84
1 22 48 49 68 96
11 32 43 55 66 86
4 26 46 54 70 93
16 31 39 61 74 92
14 17 37 62 72 89
5 23 42 57 78 82
3 24 41 58 73 90
6 31 44 63 76 89
3 27 39 49 66 84
5 28 35 56 71 96
13 26 36 55 74 88
12 22 42 61 77 83
4 25 38 64 75 82
14 18 43 50 80 92
7 29 33 62 69 95
16 21 34 60 70 81
10 24 40 59 79 93
9 30 37 52 65 85
15 20 45 54 68 90
8 32 41 51 78 94
1 23 47 53 73 86
11 19 48 57 72 87
2 17 46 58 67 91
8 22 46 59 66 92
6 20 33 61 73 96
10 23 39 56 67 87
9 19 34 49 75 88
15 18 48 55 70 91
4 27 41 52 74 89
3 30 38 57 71 95
1 29 40 51 65 82
16 26 47 58 69 83
7 31 37 53 77 81
11 17 35 54 79 85
12 32 45 50 72 93
2 28 43 60 76 90
14 25 36 63 78 86
5 21 44 64 68 84
13 24 42 62 80 94

View File

@ -0,0 +1,148 @@
96 48
3 6
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47 4 21
33 38 31
11 1 33
3 48 37
42 9 36
17 22 7
48 15 13
40 28 47
22 42 5
28 33 30
27 18 19
2 34 10
38 41 27
18 7 32
16 32 45
26 24 1
25 16 22
35 25 34
37 2 11
21 3 39
34 21 28
12 13 6
1 39 38
9 8 12
44 12 48
29 14 9
31 29 26
5 46 14
36 6 24
46 23 3
45 30 4
24 11 8
23 10 42
7 35 43
32 19 41
19 20 25
15 47 46
39 31 2
13 43 20
43 40 15
8 5 35
4 26 44
6 37 17
10 45 18
20 27 29
30 17 16
41 36 23
14 44 40
7 31 42
25 23 21
22 34 41
42 3 19
40 35 27
21 19 17
4 8 28
35 45 31
2 28 32
37 30 9
38 40 30
34 36 13
33 46 10
32 12 40
18 41 11
17 1 2
45 39 29
9 48 4
47 11 34
19 29 24
44 17 5
15 2 3
16 21 33
11 20 44
20 9 47
23 47 38
24 16 12
41 24 37
39 5 43
6 43 23
31 10 16
48 33 35
28 18 48
8 42 18
36 32 8
14 6 25
29 15 36
46 38 26
5 4 6
27 44 22
26 22 45
43 27 1
10 25 39
12 14 7
13 7 46
30 13 14
3 26 20
1 37 15
23 96 3 64 16 90
12 57 19 70 38 64
4 95 20 52 30 70
42 55 1 87 31 66
28 87 41 77 9 69
43 78 29 84 22 87
34 49 14 93 6 92
41 82 24 55 32 83
24 66 5 73 26 58
44 91 33 79 12 61
3 72 32 67 19 63
22 92 25 62 24 75
39 93 22 94 7 60
48 84 26 92 28 94
37 70 7 85 40 96
15 71 17 75 46 79
6 64 46 69 43 54
14 63 11 81 44 82
36 68 35 54 11 52
45 73 36 72 39 95
20 54 21 71 1 50
9 51 6 89 17 88
33 74 30 50 47 78
32 75 16 76 29 68
17 50 18 91 36 84
16 89 42 95 27 86
11 88 45 90 13 53
10 81 8 57 21 55
26 85 27 68 45 65
46 94 31 58 10 59
27 79 38 49 2 56
35 62 15 83 14 57
2 61 10 80 3 71
21 60 12 51 18 67
18 56 34 53 41 80
29 83 47 60 5 85
19 58 43 96 4 76
13 59 2 86 23 74
38 77 23 65 20 91
8 53 40 59 48 62
47 76 13 63 35 51
5 52 9 82 33 49
40 90 39 78 34 77
25 69 48 88 42 72
31 65 44 56 15 89
30 86 28 61 37 93
1 67 37 74 8 73
7 80 4 66 25 81

View File

@ -0,0 +1,196 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" Galois Fields """
from fractions import gcd
from numpy import array, zeros, arange, convolve, ndarray, concatenate
from itertools import *
from commpy.utilities import dec2bitarray, bitarray2dec
__all__ = ['GF', 'polydivide', 'polymultiply', 'poly_to_string']
class GF:
"""
Defines a Binary Galois Field of order m, containing n,
where n can be a single element or a list of elements within the field.
Parameters
----------
n : int
Represents the Galois field element(s).
m : int
Specifies the order of the Galois Field.
Returns
-------
x : int
A Galois Field GF(2\ :sup:`m`) object.
Examples
--------
>>> from numpy import arange
>>> from gfields import GF
>>> x = arange(16)
>>> m = 4
>>> x = GF(x, m)
>>> print x.elements
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
>>> print x.prim_poly
19
"""
# Initialization
def __init__(self, x, m):
self.m = m
primpoly_array = array([0, 3, 7, 11, 19, 37, 67, 137, 285, 529, 1033,
2053, 4179, 8219, 17475, 32771, 69643])
self.prim_poly = primpoly_array[self.m]
if type(x) is int and x >= 0 and x < pow(2, m):
self.elements = array([x])
elif type(x) is ndarray and len(x) >= 1:
self.elements = x
# Overloading addition operator for Galois Field
def __add__(self, x):
if len(self.elements) == len(x.elements):
return GF(self.elements ^ x.elements, self.m)
else:
raise ValueError("The arguments should have the same number of elements")
# Overloading multiplication operator for Galois Field
def __mul__(self, x):
if len(x.elements) == len(self.elements):
prod_elements = arange(len(self.elements))
for i in range(len(self.elements)):
prod_elements[i] = polymultiply(self.elements[i], x.elements[i], self.m, self.prim_poly)
return GF(prod_elements, self.m)
else:
raise ValueError("Two sets of elements cannot be multiplied")
def power_to_tuple(self):
"""
Convert Galois field elements from power form to tuple form representation.
"""
y = zeros(len(self.elements))
for idx, i in enumerate(self.elements):
if 2**i < 2**self.m:
y[idx] = 2**i
else:
y[idx] = polydivide(2**i, self.prim_poly)
return GF(y, self.m)
def tuple_to_power(self):
"""
Convert Galois field elements from tuple form to power form representation.
"""
y = zeros(len(self.elements))
for idx, i in enumerate(self.elements):
if i != 0:
init_state = 1
cur_state = 1
power = 0
while cur_state != i:
cur_state = ((cur_state << 1) & (2**self.m-1)) ^ (-((cur_state & 2**(self.m-1)) >> (self.m - 1)) &
(self.prim_poly & (2**self.m-1)))
power+=1
y[idx] = power
else:
y[idx] = 0
return GF(y, self.m)
def order(self):
"""
Compute the orders of the Galois field elements.
"""
orders = zeros(len(self.elements))
power_gf = self.tuple_to_power()
for idx, i in enumerate(power_gf.elements):
orders[idx] = (2**self.m - 1)/(gcd(i, 2**self.m-1))
return orders
def cosets(self):
"""
Compute the cyclotomic cosets of the Galois field.
"""
coset_list = []
x = self.tuple_to_power().elements
mark_list = zeros(len(x))
coset_count = 1
for idx in range(len(x)):
if mark_list[idx] == 0:
a = x[idx]
mark_list[idx] = coset_count
i = 1
while (a*(2**i) % (2**self.m-1)) != a:
for idx2 in range(len(x)):
if (mark_list[idx2] == 0) and (x[idx2] == a*(2**i)%(2**self.m-1)):
mark_list[idx2] = coset_count
i+=1
coset_count+=1
for counts in range(1, coset_count):
coset_list.append(GF(self.elements[mark_list==counts], self.m))
return coset_list
def minpolys(self):
"""
Compute the minimal polynomials for all elements of the Galois field.
"""
minpol_list = array([])
full_gf = GF(arange(2**self.m), self.m)
full_cosets = full_gf.cosets()
for x in self.elements:
for i in range(len(full_cosets)):
if x in full_cosets[i].elements:
t = array([1, full_cosets[i].elements[0]])[::-1]
for root in full_cosets[i].elements[1:]:
t2 = concatenate((zeros(len(t)-1), array([1, root]), zeros(len(t)-1)))
prod_poly = array([])
for n in range(len(t2)-len(t)+1):
root_sum = 0
for k in range(len(t)):
root_sum = root_sum ^ polymultiply(int(t[k]), int(t2[n+k]), self.m, self.prim_poly)
prod_poly = concatenate((prod_poly, array([root_sum])))
t = prod_poly[::-1]
minpol_list = concatenate((minpol_list, array([bitarray2dec(t[::-1])])))
return minpol_list.astype(int)
# Divide two polynomials and returns the remainder
def polydivide(x, y):
r = y
while len(bin(r)) >= len(bin(y)):
shift_count = len(bin(x)) - len(bin(y))
if shift_count > 0:
d = y << shift_count
else:
d = y
x = x ^ d
r = x
return r
def polymultiply(x, y, m, prim_poly):
x_array = dec2bitarray(x, m)
y_array = dec2bitarray(y, m)
prod = bitarray2dec(convolve(x_array, y_array) % 2)
return polydivide(prod, prim_poly)
def poly_to_string(x):
i = 0
polystr = ""
while x != 0:
y = x%2
x = x >> 1
if y == 1:
polystr = polystr + "x^" + str(i) + " + "
i+=1
return polystr[:-2]

View File

@ -0,0 +1,84 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" Interleavers and De-interleavers """
from numpy import array, arange, zeros
from numpy.random import mtrand
__all__ = ['RandInterlv']
class _Interleaver:
def interlv(self, in_array):
""" Interleave input array using the specific interleaver.
Parameters
----------
in_array : 1D ndarray of ints
Input data to be interleaved.
Returns
-------
out_array : 1D ndarray of ints
Interleaved output data.
"""
out_array = array(map(lambda x: in_array[x], self.p_array))
return out_array
def deinterlv(self, in_array):
""" De-interleave input array using the specific interleaver.
Parameters
----------
in_array : 1D ndarray of ints
Input data to be de-interleaved.
Returns
-------
out_array : 1D ndarray of ints
De-interleaved output data.
"""
out_array = zeros(len(in_array), in_array.dtype)
for index, element in enumerate(self.p_array):
out_array[element] = in_array[index]
return out_array
class RandInterlv(_Interleaver):
""" Random Interleaver.
Parameters
----------
length : int
Length of the interleaver.
seed : int
Seed to initialize the random number generator
which generates the random permutation for
interleaving.
Returns
-------
random_interleaver : RandInterlv object
A random interleaver object.
Note
----
The random number generator is the
RandomState object from NumPy,
which uses the Mersenne Twister algorithm.
"""
def __init__(self, length, seed):
rand_gen = mtrand.RandomState(seed)
self.p_array = rand_gen.permutation(arange(length))
#class SRandInterlv(_Interleaver):
#class QPPInterlv(_Interleaver):

View File

@ -0,0 +1,237 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" LDPC Codes """
import numpy as np
__all__ = ['get_ldpc_code_params, ldpc_bp_decode']
MAX_POS_LLR = 38.0
MIN_NEG_LLR = -38.0
def get_ldpc_code_params(ldpc_design_filename):
"""
Extract parameters from LDPC code design file.
Parameters
----------
ldpc_design_filename : string
Filename of the LDPC code design file.
Returns
-------
ldpc_code_params : dictionary
Parameters of the LDPC code.
"""
ldpc_design_file = open(ldpc_design_filename)
ldpc_code_params = {}
[n_vnodes, n_cnodes] = [int(x) for x in ldpc_design_file.readline().split(' ')]
[max_vnode_deg, max_cnode_deg] = [int(x) for x in ldpc_design_file.readline().split(' ')]
vnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_adj_list = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_adj_list = -np.ones([n_vnodes, max_vnode_deg], int)
for vnode_idx in range(n_vnodes):
vnode_adj_list[vnode_idx, 0:vnode_deg_list[vnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
for cnode_idx in range(n_cnodes):
cnode_adj_list[cnode_idx, 0:cnode_deg_list[cnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
cnode_vnode_map = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_cnode_map = -np.ones([n_vnodes, max_vnode_deg], int)
cnode_list = np.arange(n_cnodes)
vnode_list = np.arange(n_vnodes)
for cnode in range(n_cnodes):
for i, vnode in enumerate(cnode_adj_list[cnode, 0:cnode_deg_list[cnode]]):
cnode_vnode_map[cnode, i] = cnode_list[np.where(vnode_adj_list[vnode, :] == cnode)]
for vnode in range(n_vnodes):
for i, cnode in enumerate(vnode_adj_list[vnode, 0:vnode_deg_list[vnode]]):
vnode_cnode_map[vnode, i] = vnode_list[np.where(cnode_adj_list[cnode, :] == vnode)]
cnode_adj_list_1d = cnode_adj_list.flatten().astype(np.int32)
vnode_adj_list_1d = vnode_adj_list.flatten().astype(np.int32)
cnode_vnode_map_1d = cnode_vnode_map.flatten().astype(np.int32)
vnode_cnode_map_1d = vnode_cnode_map.flatten().astype(np.int32)
pmat = np.zeros([n_cnodes, n_vnodes], int)
for cnode_idx in range(n_cnodes):
pmat[cnode_idx, cnode_adj_list[cnode_idx, :]] = 1
ldpc_code_params['n_vnodes'] = n_vnodes
ldpc_code_params['n_cnodes'] = n_cnodes
ldpc_code_params['max_cnode_deg'] = max_cnode_deg
ldpc_code_params['max_vnode_deg'] = max_vnode_deg
ldpc_code_params['cnode_adj_list'] = cnode_adj_list_1d
ldpc_code_params['cnode_vnode_map'] = cnode_vnode_map_1d
ldpc_code_params['vnode_adj_list'] = vnode_adj_list_1d
ldpc_code_params['vnode_cnode_map'] = vnode_cnode_map_1d
ldpc_code_params['cnode_deg_list'] = cnode_deg_list
ldpc_code_params['vnode_deg_list'] = vnode_deg_list
ldpc_design_file.close()
return ldpc_code_params
def _limit_llr(in_llr):
out_llr = in_llr
if in_llr > MAX_POS_LLR:
out_llr = MAX_POS_LLR
if in_llr < MIN_NEG_LLR:
out_llr = MIN_NEG_LLR
return out_llr
def sum_product_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg):
start_idx = cnode_idx*max_cnode_deg
offset = cnode_deg_list[cnode_idx]
vnode_list = cnode_adj_list[start_idx:start_idx+offset]
vnode_list_msgs_tanh = np.tanh(vnode_msgs[vnode_list*max_vnode_deg +
cnode_vnode_map[start_idx:start_idx+offset]]/2.0)
msg_prod = np.prod(vnode_list_msgs_tanh)
# Compute messages on outgoing edges using the incoming message product
cnode_msgs[start_idx:start_idx+offset]= 2.0*np.arctanh(msg_prod/vnode_list_msgs_tanh)
def min_sum_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg):
start_idx = cnode_idx*max_cnode_deg
offset = cnode_deg_list[cnode_idx]
vnode_list = cnode_adj_list[start_idx:start_idx+offset]
vnode_list_msgs = vnode_msgs[vnode_list*max_vnode_deg +
cnode_vnode_map[start_idx:start_idx+offset]]
vnode_list_msgs = np.ma.array(vnode_list_msgs, mask=False)
# Compute messages on outgoing edges using the incoming messages
for i in range(start_idx, start_idx+offset):
vnode_list_msgs.mask[i-start_idx] = True
cnode_msgs[i] = np.prod(np.sign(vnode_list_msgs))*np.min(np.abs(vnode_list_msgs))
vnode_list_msgs.mask[i-start_idx] = False
#print cnode_msgs[i]
def ldpc_bp_decode(llr_vec, ldpc_code_params, decoder_algorithm, n_iters):
"""
LDPC Decoder using Belief Propagation (BP).
Parameters
----------
llr_vec : 1D array of float
Received codeword LLR values from the channel.
ldpc_code_params : dictionary
Parameters of the LDPC code.
decoder_algorithm: string
Specify the decoder algorithm type.
SPA for Sum-Product Algorithm
MSA for Min-Sum Algorithm
n_iters : int
Max. number of iterations of decoding to be done.
Returns
-------
dec_word : 1D array of 0's and 1's
The codeword after decoding.
out_llrs : 1D array of float
LLR values corresponding to the decoded output.
"""
n_cnodes = ldpc_code_params['n_cnodes']
n_vnodes = ldpc_code_params['n_vnodes']
max_cnode_deg = ldpc_code_params['max_cnode_deg']
max_vnode_deg = ldpc_code_params['max_vnode_deg']
cnode_adj_list = ldpc_code_params['cnode_adj_list']
cnode_vnode_map = ldpc_code_params['cnode_vnode_map']
vnode_adj_list = ldpc_code_params['vnode_adj_list']
vnode_cnode_map = ldpc_code_params['vnode_cnode_map']
cnode_deg_list = ldpc_code_params['cnode_deg_list']
vnode_deg_list = ldpc_code_params['vnode_deg_list']
dec_word = np.zeros(n_vnodes, int)
out_llrs = np.zeros(n_vnodes, int)
cnode_msgs = np.zeros(n_cnodes*max_cnode_deg)
vnode_msgs = np.zeros(n_vnodes*max_vnode_deg)
_limit_llr_v = np.vectorize(_limit_llr)
if decoder_algorithm == 'SPA':
check_node_update = sum_product_update
elif decoder_algorithm == 'MSA':
check_node_update = min_sum_update
else:
raise NameError('Please input a valid decoder_algorithm string.')
# Initialize vnode messages with the LLR values received
for vnode_idx in range(n_vnodes):
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
vnode_msgs[start_idx : start_idx+offset] = llr_vec[vnode_idx]
# Main loop of Belief Propagation (BP) decoding iterations
for iter_cnt in range(n_iters):
continue_flag = 0
# Check Node Update
for cnode_idx in range(n_cnodes):
check_node_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg)
# Variable Node Update
for vnode_idx in range(n_vnodes):
# Compute sum of all incoming messages at the variable node
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
cnode_list = vnode_adj_list[start_idx:start_idx+offset]
cnode_list_msgs = cnode_msgs[cnode_list*max_cnode_deg + vnode_cnode_map[start_idx:start_idx+offset]]
msg_sum = np.sum(cnode_list_msgs)
# Compute messages on outgoing edges using the incoming message sum
vnode_msgs[start_idx:start_idx+offset] = _limit_llr_v(llr_vec[vnode_idx] + msg_sum -
cnode_list_msgs)
# Update output LLRs and decoded word
out_llrs[vnode_idx] = llr_vec[vnode_idx] + msg_sum
if out_llrs[vnode_idx] > 0:
dec_word[vnode_idx] = 0
else:
dec_word[vnode_idx] = 1
# Compute if early termination using parity check matrix
for cnode_idx in range(n_cnodes):
p_sum = 0
for i in range(cnode_deg_list[cnode_idx]):
p_sum ^= dec_word[cnode_adj_list[cnode_idx*max_cnode_deg + i]]
if p_sum != 0:
continue_flag = 1
break
# Stop iterations
if continue_flag == 0:
break
return dec_word, out_llrs

View File

@ -0,0 +1,21 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
from numpy import array
from numpy.testing import assert_array_equal
from commpy.channelcoding.algcode import cyclic_code_genpoly
class TestAlgebraicCoding(object):
def test_cyclic_code_gen_poly(self):
code_lengths = array([15, 31])
code_dims = array([4, 21])
desired_genpolys = array([[2479, 3171, 3929],
[1653, 1667, 1503, 1207, 1787, 1561, 1903,
1219, 1137, 2013, 1453, 1897, 1975, 1395, 1547]])
count = 0
for n, k in zip(code_lengths, code_dims):
genpolys = cyclic_code_genpoly(n, k)
assert_array_equal(genpolys, desired_genpolys[count])
count += 1

View File

@ -0,0 +1,87 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
from numpy import array
from numpy.random import randint
from numpy.testing import assert_array_equal
from commpy.channelcoding.convcode import Trellis, conv_encode, viterbi_decode
class TestConvCode(object):
@classmethod
def setup_class(cls):
# Convolutional Code 1: G(D) = [1+D^2, 1+D+D^2]
memory = array([2])
g_matrix = array([[0o5, 0o7]])
cls.code_type_1 = 'default'
cls.trellis_1 = Trellis(memory, g_matrix, 0, cls.code_type_1)
cls.desired_next_state_table_1 = array([[0, 2],
[0, 2],
[1, 3],
[1, 3]])
cls.desired_output_table_1 = array([[0, 3],
[3, 0],
[1, 2],
[2, 1]])
# Convolutional Code 2: G(D) = [1 1+D+D^2/1+D]
memory = array([2])
g_matrix = array([[0o1, 0o7]])
feedback = 0o5
cls.code_type_2 = 'rsc'
cls.trellis_2 = Trellis(memory, g_matrix, feedback, cls.code_type_2)
cls.desired_next_state_table_2 = array([[0, 2],
[2, 0],
[1, 3],
[3, 1]])
cls.desired_output_table_2 = array([[0, 3],
[0, 3],
[1, 2],
[1, 2]])
@classmethod
def teardown_class(cls):
pass
def test_next_state_table(self):
assert_array_equal(self.trellis_1.next_state_table, self.desired_next_state_table_1)
assert_array_equal(self.trellis_2.next_state_table, self.desired_next_state_table_2)
def test_output_table(self):
assert_array_equal(self.trellis_1.output_table, self.desired_output_table_1)
assert_array_equal(self.trellis_2.output_table, self.desired_output_table_2)
def test_conv_encode(self):
pass
def test_viterbi_decode(self):
pass
def test_conv_encode_viterbi_decode(self):
niters = 10
blocklength = 1000
for i in range(niters):
msg = randint(0, 2, blocklength)
coded_bits = conv_encode(msg, self.trellis_1)
decoded_bits = viterbi_decode(coded_bits.astype(float), self.trellis_1, 15)
assert_array_equal(decoded_bits[:-2], msg)
coded_bits = conv_encode(msg, self.trellis_1)
coded_syms = 2.0*coded_bits - 1
decoded_bits = viterbi_decode(coded_syms, self.trellis_1, 15, 'unquantized')
assert_array_equal(decoded_bits[:-2], msg)
coded_bits = conv_encode(msg, self.trellis_2)
decoded_bits = viterbi_decode(coded_bits.astype(float), self.trellis_2, 15)
assert_array_equal(decoded_bits[:-2], msg)
coded_bits = conv_encode(msg, self.trellis_2)
coded_syms = 2.0*coded_bits - 1
decoded_bits = viterbi_decode(coded_syms, self.trellis_2, 15, 'unquantized')
assert_array_equal(decoded_bits[:-2], msg)

View File

@ -0,0 +1,68 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3 clause
from numpy import array, ones_like, arange
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_, assert_equal
from commpy.channelcoding.gfields import GF
class TestGaloisFields(object):
def test_closure(self):
for m in arange(1, 9):
x = GF(arange(2**m), m)
for a in x.elements:
for b in x.elements:
assert_((GF(array([a]), m) + GF(array([b]), m)).elements[0] in x.elements)
assert_((GF(array([a]), m) * GF(array([b]), m)).elements[0] in x.elements)
def test_addition(self):
m = 3
x = GF(arange(2**m), m)
y = GF(array([6, 4, 3, 1, 2, 0, 5, 7]), m)
z = GF(array([6, 5, 1, 2, 6, 5, 3, 0]), m)
assert_array_equal((x+y).elements, z.elements)
def test_multiplication(self):
m = 3
x = GF(array([7, 6, 5, 4, 3, 2, 1, 0]), m)
y = GF(array([6, 4, 3, 1, 2, 0, 5, 7]), m)
z = GF(array([4, 5, 4, 4, 6, 0, 5, 0]), m)
assert_array_equal((x*y).elements, z.elements)
def test_tuple_form(self):
m = 3
x = GF(arange(0, 2**m-1), m)
y = x.power_to_tuple()
z = GF(array([1, 2, 4, 3, 6, 7, 5]), m)
assert_array_equal(y.elements, z.elements)
def test_power_form(self):
m = 3
x = GF(arange(1, 2**m), m)
y = x.tuple_to_power()
z = GF(array([0, 1, 3, 2, 6, 4, 5]), m)
assert_array_equal(y.elements, z.elements)
m = 4
x = GF(arange(1, 2**m), m)
y = x.tuple_to_power()
z = GF(array([0, 1, 4, 2, 8, 5, 10, 3, 14, 9, 7, 6, 13, 11, 12]), m)
assert_array_equal(y.elements, z.elements)
def test_order(self):
m = 4
x = GF(arange(1, 2**m), m)
y = x.order()
z = array([1, 15, 15, 15, 15, 3, 3, 5, 15, 5, 15, 5, 15, 15, 5])
assert_array_equal(y, z)
def test_minpols(self):
m = 4
x = GF(arange(2**m), m)
z = array([2, 3, 19, 19, 19, 19, 7, 7, 31, 25, 31, 25, 31, 25, 25, 31])
assert_array_equal(x.minpolys(), z)
m = 6
x = GF(array([2, 8, 32, 6, 24, 35, 10, 40, 59, 41, 14, 37]), m)
z = array([67, 87, 103, 73, 13, 109, 91, 117, 7, 115, 11, 97])
assert_array_equal(x.minpolys(), z)

View File

@ -0,0 +1,62 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
from numpy import array, sqrt, zeros
from numpy.random import randn
from numpy.testing import assert_allclose
from commpy.channelcoding.ldpc import get_ldpc_code_params, ldpc_bp_decode
from commpy.utilities import hamming_dist
import os
from nose.plugins.attrib import attr
@attr('slow')
class TestLDPCCode(object):
@classmethod
def setup_class(cls):
dir = os.path.dirname(__file__)
ldpc_design_file_1 = os.path.join(dir, '../designs/ldpc/gallager/96.33.964.txt')
#ldpc_design_file_1 = "../designs/ldpc/gallager/96.33.964.txt"
cls.ldpc_code_params = get_ldpc_code_params(ldpc_design_file_1)
@classmethod
def teardown_class(cls):
pass
def test_ldpc_bp_decode(self):
N = 96
k = 48
rate = 0.5
Es = 1.0
snr_list = array([2.0, 2.5])
niters = 10000000
tx_codeword = zeros(N, int)
ldpcbp_iters = 100
fer_array_ref = array([200.0/1000, 200.0/2000])
fer_array_test = zeros(len(snr_list))
for idx, ebno in enumerate(snr_list):
noise_std = 1/sqrt((10**(ebno/10.0))*rate*2/Es)
fer_cnt_bp = 0
for iter_cnt in range(niters):
awgn_array = noise_std * randn(N)
rx_word = 1-(2*tx_codeword) + awgn_array
rx_llrs = 2.0*rx_word/(noise_std**2)
[dec_word, out_llrs] = ldpc_bp_decode(rx_llrs, self.ldpc_code_params, 'SPA',
ldpcbp_iters)
num_bit_errors = hamming_dist(tx_codeword, dec_word)
if num_bit_errors > 0:
fer_cnt_bp += 1
if fer_cnt_bp >= 200:
fer_array_test[idx] = float(fer_cnt_bp)/(iter_cnt+1)
break
assert_allclose(fer_array_test, fer_array_ref, rtol=2e-1, atol=0)

View File

@ -0,0 +1,332 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" Turbo Codes """
from numpy import array, append, zeros, exp, pi, log, empty
from commpy.channelcoding import Trellis, conv_encode
from commpy.utilities import dec2bitarray, bitarray2dec
#from commpy.channelcoding.map_c import backward_recursion, forward_recursion_decoding
def turbo_encode(msg_bits, trellis1, trellis2, interleaver):
""" Turbo Encoder.
Encode Bits using a parallel concatenated rate-1/3
turbo code consisting of two rate-1/2 systematic
convolutional component codes.
Parameters
----------
msg_bits : 1D ndarray containing {0, 1}
Stream of bits to be turbo encoded.
trellis1 : Trellis object
Trellis representation of the
first code in the parallel concatenation.
trellis2 : Trellis object
Trellis representation of the
second code in the parallel concatenation.
interleaver : Interleaver object
Interleaver used in the turbo code.
Returns
-------
[sys_stream, non_sys_stream1, non_sys_stream2] : list of 1D ndarrays
Encoded bit streams corresponding
to the systematic output
and the two non-systematic
outputs from the two component codes.
"""
stream = conv_encode(msg_bits, trellis1, 'rsc')
sys_stream = stream[::2]
non_sys_stream_1 = stream[1::2]
interlv_msg_bits = interleaver.interlv(sys_stream)
puncture_matrix = array([[0, 1]])
non_sys_stream_2 = conv_encode(interlv_msg_bits, trellis2, 'rsc', puncture_matrix)
sys_stream = sys_stream[0:-trellis1.total_memory]
non_sys_stream_1 = non_sys_stream_1[0:-trellis1.total_memory]
non_sys_stream_2 = non_sys_stream_2[0:-trellis2.total_memory]
return [sys_stream, non_sys_stream_1, non_sys_stream_2]
def _compute_branch_prob(code_bit_0, code_bit_1, rx_symbol_0, rx_symbol_1,
noise_variance):
#cdef np.float64_t code_symbol_0, code_symbol_1, branch_prob, x, y
code_symbol_0 = 2*code_bit_0 - 1
code_symbol_1 = 2*code_bit_1 - 1
x = rx_symbol_0 - code_symbol_0
y = rx_symbol_1 - code_symbol_1
# Normalized branch transition probability
branch_prob = exp(-(x*x + y*y)/(2*noise_variance))
return branch_prob
def _backward_recursion(trellis, msg_length, noise_variance,
sys_symbols, non_sys_symbols, branch_probs,
priors, b_state_metrics):
n = trellis.n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
codeword_array = empty(n, 'int')
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Backward recursion
for reverse_time_index in reversed(xrange(1, msg_length+1)):
for current_state in xrange(number_states):
for current_input in xrange(number_inputs):
next_state = next_state_table[current_state, current_input]
code_symbol = output_table[current_state, current_input]
codeword_array = dec2bitarray(code_symbol, n)
parity_bit = codeword_array[1]
msg_bit = codeword_array[0]
rx_symbol_0 = sys_symbols[reverse_time_index-1]
rx_symbol_1 = non_sys_symbols[reverse_time_index-1]
branch_prob = _compute_branch_prob(msg_bit, parity_bit,
rx_symbol_0, rx_symbol_1,
noise_variance)
branch_probs[current_input, current_state, reverse_time_index-1] = branch_prob
b_state_metrics[current_state, reverse_time_index-1] += \
(b_state_metrics[next_state, reverse_time_index] * branch_prob *
priors[current_input, reverse_time_index-1])
b_state_metrics[:,reverse_time_index-1] /= \
b_state_metrics[:,reverse_time_index-1].sum()
def _forward_recursion_decoding(trellis, mode, msg_length, noise_variance,
sys_symbols, non_sys_symbols, b_state_metrics,
f_state_metrics, branch_probs, app, L_int,
priors, L_ext, decoded_bits):
n = trellis.n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
codeword_array = empty(n, 'int')
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Forward Recursion
for time_index in xrange(1, msg_length+1):
app[:] = 0
for current_state in xrange(number_states):
for current_input in xrange(number_inputs):
next_state = next_state_table[current_state, current_input]
branch_prob = branch_probs[current_input, current_state, time_index-1]
# Compute the forward state metrics
f_state_metrics[next_state, 1] += (f_state_metrics[current_state, 0] *
branch_prob *
priors[current_input, time_index-1])
# Compute APP
app[current_input] += (f_state_metrics[current_state, 0] *
branch_prob *
b_state_metrics[next_state, time_index])
lappr = L_int[time_index-1] + log(app[1]/app[0])
L_ext[time_index-1] = lappr
if mode == 'decode':
if lappr > 0:
decoded_bits[time_index-1] = 1
else:
decoded_bits[time_index-1] = 0
# Normalization of the forward state metrics
f_state_metrics[:,1] = f_state_metrics[:,1]/f_state_metrics[:,1].sum()
f_state_metrics[:,0] = f_state_metrics[:,1]
f_state_metrics[:,1] = 0.0
def map_decode(sys_symbols, non_sys_symbols, trellis, noise_variance, L_int, mode='decode'):
""" Maximum a-posteriori probability (MAP) decoder.
Decodes a stream of convolutionally encoded
(rate 1/2) bits using the MAP algorithm.
Parameters
----------
sys_symbols : 1D ndarray
Received symbols corresponding to
the systematic (first output) bits in
the codeword.
non_sys_symbols : 1D ndarray
Received symbols corresponding to the non-systematic
(second output) bits in the codeword.
trellis : Trellis object
Trellis representation of the convolutional code.
noise_variance : float
Variance (power) of the AWGN channel.
L_int : 1D ndarray
Array representing the initial intrinsic
information for all received
symbols.
Typically all zeros,
corresponding to equal prior
probabilities of bits 0 and 1.
mode : str{'decode', 'compute'}, optional
The mode in which the MAP decoder is used.
'decode' mode returns the decoded bits
along with the extrinsic information.
'compute' mode returns only the
extrinsic information.
Returns
-------
[L_ext, decoded_bits] : list of two 1D ndarrays
The first element of the list is the extrinsic information.
The second element of the list is the decoded bits.
"""
k = trellis.k
n = trellis.n
rate = float(k)/n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
msg_length = len(sys_symbols)
# Initialize forward state metrics (alpha)
f_state_metrics = zeros([number_states, 2])
f_state_metrics[0][0] = 1
#print f_state_metrics
# Initialize backward state metrics (beta)
b_state_metrics = zeros([number_states, msg_length+1])
b_state_metrics[:,msg_length] = 1
# Initialize branch transition probabilities (gamma)
branch_probs = zeros([number_inputs, number_states, msg_length+1])
app = zeros(number_inputs)
lappr = 0
decoded_bits = zeros(msg_length, 'int')
L_ext = zeros(msg_length)
priors = empty([2, msg_length])
priors[0,:] = 1/(1 + exp(L_int))
priors[1,:] = 1 - priors[0,:]
# Backward recursion
_backward_recursion(trellis, msg_length, noise_variance, sys_symbols,
non_sys_symbols, branch_probs, priors, b_state_metrics)
# Forward recursion
_forward_recursion_decoding(trellis, mode, msg_length, noise_variance, sys_symbols,
non_sys_symbols, b_state_metrics, f_state_metrics,
branch_probs, app, L_int, priors, L_ext, decoded_bits)
return [L_ext, decoded_bits]
def turbo_decode(sys_symbols, non_sys_symbols_1, non_sys_symbols_2, trellis,
noise_variance, number_iterations, interleaver, L_int = None):
""" Turbo Decoder.
Decodes a stream of convolutionally encoded
(rate 1/3) bits using the BCJR algorithm.
Parameters
----------
sys_symbols : 1D ndarray
Received symbols corresponding to
the systematic (first output) bits in the codeword.
non_sys_symbols_1 : 1D ndarray
Received symbols corresponding to
the first parity bits in the codeword.
non_sys_symbols_2 : 1D ndarray
Received symbols corresponding to the
second parity bits in the codeword.
trellis : Trellis object
Trellis representation of the convolutional codes
used in the Turbo code.
noise_variance : float
Variance (power) of the AWGN channel.
number_iterations : int
Number of the iterations of the
BCJR algorithm used in turbo decoding.
interleaver : Interleaver object.
Interleaver used in the turbo code.
L_int : 1D ndarray
Array representing the initial intrinsic
information for all received
symbols.
Typically all zeros,
corresponding to equal prior
probabilities of bits 0 and 1.
Returns
-------
decoded_bits : 1D ndarray of ints containing {0, 1}
Decoded bit stream.
"""
if L_int is None:
L_int = zeros(len(sys_symbols))
L_int_1 = L_int
# Interleave systematic symbols for input to second decoder
sys_symbols_i = interleaver.interlv(sys_symbols)
for iteration_count in xrange(number_iterations):
# MAP Decoder - 1
[L_ext_1, decoded_bits] = map_decode(sys_symbols, non_sys_symbols_1,
trellis, noise_variance, L_int_1, 'compute')
L_ext_1 = L_ext_1 - L_int_1
L_int_2 = interleaver.interlv(L_ext_1)
if iteration_count == number_iterations - 1:
mode = 'decode'
else:
mode = 'compute'
# MAP Decoder - 2
[L_2, decoded_bits] = map_decode(sys_symbols_i, non_sys_symbols_2,
trellis, noise_variance, L_int_2, mode)
L_ext_2 = L_2 - L_int_2
L_int_1 = interleaver.deinterlv(L_ext_2)
decoded_bits = interleaver.deinterlv(decoded_bits)
return decoded_bits

175
scripts/commpy/channels.py Normal file
View File

@ -0,0 +1,175 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
============================================
Channel Models (:mod:`commpy.channels`)
============================================
.. autosummary::
:toctree: generated/
bec -- Binary Erasure Channel.
bsc -- Binary Symmetric Channel.
awgn -- Additive White Gaussian Noise Channel.
"""
from numpy import complex, sum, pi, arange, array, size, shape, real, sqrt
from numpy import matrix, sqrt, sum, zeros, concatenate, sinc
from numpy.random import randn, seed, random
#from scipy.special import gamma, jn
#from scipy.signal import hamming, convolve, resample
#from scipy.fftpack import ifft, fftshift, fftfreq
#from scipy.interpolate import interp1d
__all__=['bec', 'bsc', 'awgn']
def bec(input_bits, p_e):
"""
Binary Erasure Channel.
Parameters
----------
input_bits : 1D ndarray containing {0, 1}
Input arrary of bits to the channel.
p_e : float in [0, 1]
Erasure probability of the channel.
Returns
-------
output_bits : 1D ndarray containing {0, 1}
Output bits from the channel.
"""
output_bits = input_bits.copy()
output_bits[random(len(output_bits)) <= p_e] = -1
return output_bits
def bsc(input_bits, p_t):
"""
Binary Symmetric Channel.
Parameters
----------
input_bits : 1D ndarray containing {0, 1}
Input arrary of bits to the channel.
p_t : float in [0, 1]
Transition/Error probability of the channel.
Returns
-------
output_bits : 1D ndarray containing {0, 1}
Output bits from the channel.
"""
output_bits = input_bits.copy()
flip_locs = (random(len(output_bits)) <= p_t)
output_bits[flip_locs] = 1 ^ output_bits[flip_locs]
return output_bits
def awgn(input_signal, snr_dB, rate=1.0):
"""
Addditive White Gaussian Noise (AWGN) Channel.
Parameters
----------
input_signal : 1D ndarray of floats
Input signal to the channel.
snr_dB : float
Output SNR required in dB.
rate : float
Rate of the a FEC code used if any, otherwise 1.
Returns
-------
output_signal : 1D ndarray of floats
Output signal from the channel with the specified SNR.
"""
avg_energy = sum(abs(input_signal) * abs(input_signal))/len(input_signal)
snr_linear = 10**(snr_dB/10.0)
noise_variance = avg_energy/(2*rate*snr_linear)
if input_signal.dtype == complex:
noise = (sqrt(noise_variance) * randn(len(input_signal))) + (sqrt(noise_variance) * randn(len(input_signal))*1j)
else:
noise = sqrt(2*noise_variance) * randn(len(input_signal))
output_signal = input_signal + noise
return output_signal
# =============================================================================
# Incomplete code to implement fading channels
# =============================================================================
#def doppler_jakes(max_doppler, filter_length):
# fs = 32.0*max_doppler
# ts = 1/fs
# m = arange(0, filter_length/2)
# Generate the Jakes Doppler Spectrum impulse response h[m]
# h_jakes_left = (gamma(3.0/4) *
# pow((max_doppler/(pi*abs((m-(filter_length/2))*ts))), 0.25) *
# jn(0.25, 2*pi*max_doppler*abs((m-(filter_length/2))*ts)))
# h_jakes_center = array([(gamma(3.0/4)/gamma(5.0/4)) * pow(max_doppler, 0.5)])
# h_jakes = concatenate((h_jakes_left[0:filter_length/2-1],
# h_jakes_center, h_jakes_left[::-1]))
# h_jakes = h_jakes*hamming(filter_length)
# h_jakes = h_jakes/(sum(h_jakes**2)**0.5)
# -----------------------------------------------------------------------------
# jakes_psd_right = (1/(pi*max_doppler*(1-(freqs/max_doppler)**2)**0.5))**0.5
# zero_pad = zeros([(fft_size-filter_length)/2, ])
# jakes_psd = concatenate((zero_pad, jakes_psd_right[::-1],
# jakes_psd_right, zero_pad))
#print size(jakes_psd)
# jakes_impulse = real(fftshift(ifft(jakes_psd, fft_size)))
# h_jakes = jakes_impulse[(fft_size-filter_length)/2 + 1 : (fft_size-filter_length)/2 + filter_length + 1]
# h_jakes = h_jakes*hamming(filter_length)
# h_jakes = h_jakes/(sum(h_jakes**2)**0.5)
# -----------------------------------------------------------------------------
# return h_jakes
#def rayleigh_channel(ts_input, max_doppler, block_length, path_gains,
# path_delays):
# fs_input = 1.0/ts_input
# fs_channel = 32.0*max_doppler
# ts_channel = 1.0/fs_channel
# interp_factor = fs_input/fs_channel
# channel_length = block_length/interp_factor
# n1 = -10
# n2 = 10
# filter_length = 1024
# Generate the Jakes Doppler Spectrum impulse response h[m]
# h_jakes = doppler_jakes(max_doppler, filter_length)
# Generate the complex Gaussian Random Process
# g_var = 0.5
# gain_process = zeros([len(path_gains), block_length], dtype=complex)
# delay_process = zeros([n2+1-n1, len(path_delays)])
# for k in xrange(len(path_gains)):
# g = (g_var**0.5) * (randn(channel_length) + 1j*randn(channel_length))
# g_filt = convolve(g, h_jakes, mode='same')
# g_filt_interp = resample(g_filt, block_length)
# gain_process[k,:] = pow(10, (path_gains[k]/10.0)) * g_filt_interp
# delay_process[:,k] = sinc((path_delays[k]/ts_input) - arange(n1, n2+1))
#channel_matrix = 0
# channel_matrix = matrix(delay_process)*matrix(gain_process)
# return channel_matrix, gain_process, h_jakes

View File

@ -0,0 +1,57 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
import numpy as np
import commpy.channelcoding.convcode as cc
from commpy.utilities import *
# =============================================================================
# Example showing the encoding and decoding of convolutional codes
# =============================================================================
# G(D) corresponding to the convolutional encoder
generator_matrix = np.array([[05, 07]])
#generator_matrix = np.array([[03, 00, 02], [07, 04, 06]])
# Number of delay elements in the convolutional encoder
M = np.array([2])
# Create trellis data structure
trellis = cc.Trellis(M, generator_matrix)
# Traceback depth of the decoder
tb_depth = 5*(M.sum() + 1)
for i in range(10):
# Generate random message bits to be encoded
message_bits = np.random.randint(0, 2, 1000)
# Encode message bits
coded_bits = cc.conv_encode(message_bits, trellis)
# Introduce bit errors (channel)
#coded_bits[4] = 0
#coded_bits[7] = 0
# Decode the received bits
decoded_bits = cc.viterbi_decode(coded_bits.astype(float), trellis, tb_depth)
num_bit_errors = hamming_dist(message_bits, decoded_bits[:-M])
#num_bit_errors = 1
if num_bit_errors !=0:
#print num_bit_errors, "Bit Errors found!"
#print message_bits
#print decoded_bits[tb_depth+3:]
#print decoded_bits
break
else:
print "No Bit Errors :)"
#print "==== Message Bits ==="
#print message_bits
#print "==== Coded Bits ====="
#print coded_bits
#print "==== Decoded Bits ==="
#print decoded_bits[tb_depth:]

187
scripts/commpy/filters.py Normal file
View File

@ -0,0 +1,187 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
============================================
Pulse Shaping Filters (:mod:`commpy.filters`)
============================================
.. autosummary::
:toctree: generated/
rcosfilter -- Raised Cosine (RC) Filter.
rrcosfilter -- Root Raised Cosine (RRC) Filter.
gaussianfilter -- Gaussian Filter.
rectfilter -- Rectangular Filter.
"""
import numpy as np
__all__=['rcosfilter', 'rrcosfilter', 'gaussianfilter', 'rectfilter']
def rcosfilter(N, alpha, Ts, Fs):
"""
Generates a raised cosine (RC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_rc : 1-D ndarray (float)
Impulse response of the raised cosine filter.
time_idx : 1-D ndarray (float)
Array containing the time indices, in seconds, for the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rc[x] = 1.0
elif alpha != 0 and t == Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
elif alpha != 0 and t == -Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
else:
h_rc[x] = (np.sin(np.pi*t/Ts)/(np.pi*t/Ts))* \
(np.cos(np.pi*alpha*t/Ts)/(1-(((2*alpha*t)/Ts)*((2*alpha*t)/Ts))))
return time_idx, h_rc
def rrcosfilter(N, alpha, Ts, Fs):
"""
Generates a root raised cosine (RRC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
---------
h_rrc : 1-D ndarray of floats
Impulse response of the root raised cosine filter.
time_idx : 1-D ndarray of floats
Array containing the time indices, in seconds, for
the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rrc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rrc[x] = 1.0 - alpha + (4*alpha/np.pi)
elif alpha != 0 and t == Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
elif alpha != 0 and t == -Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
else:
h_rrc[x] = (np.sin(np.pi*t*(1-alpha)/Ts) + \
4*alpha*(t/Ts)*np.cos(np.pi*t*(1+alpha)/Ts))/ \
(np.pi*t*(1-(4*alpha*t/Ts)*(4*alpha*t/Ts))/Ts)
return time_idx, h_rrc
def gaussianfilter(N, alpha, Ts, Fs):
"""
Generates a gaussian filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_gaussian : 1-D ndarray of floats
Impulse response of the gaussian filter.
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
h_gaussian = (np.sqrt(np.pi)/alpha)*np.exp(-((np.pi*time_idx/alpha)*(np.pi*time_idx/alpha)))
return time_idx, h_gaussian
def rectfilter(N, Ts, Fs):
"""
Generates a rectangular filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_rect : 1-D ndarray of floats
Impulse response of the rectangular filter.
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
"""
h_rect = np.ones(N)
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
return time_idx, h_rect

View File

@ -0,0 +1,43 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
============================================
Impairments (:mod:`commpy.impairments`)
============================================
.. autosummary::
:toctree: generated/
add_frequency_offset -- Add frequency offset impairment.
"""
from numpy import exp, pi, arange
__all__ = ['add_frequency_offset']
def add_frequency_offset(waveform, Fs, delta_f):
"""
Add frequency offset impairment to input signal.
Parameters
----------
waveform : 1D ndarray of floats
Input signal.
Fs : float
Sampling frequency (in Hz).
delta_f : float
Frequency offset (in Hz).
Returns
-------
output_waveform : 1D ndarray of floats
Output signal with frequency offset.
"""
output_waveform = waveform*exp(1j*2*pi*(delta_f/Fs)*arange(len(waveform)))
return output_waveform

View File

@ -0,0 +1,194 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
==================================================
Modulation Demodulation (:mod:`commpy.modulation`)
==================================================
.. autosummary::
:toctree: generated/
PSKModem -- Phase Shift Keying (PSK) Modem.
QAMModem -- Quadrature Amplitude Modulation (QAM) Modem.
mimo_ml -- MIMO Maximum Likelihood (ML) Detection.
"""
from numpy import arange, array, zeros, pi, cos, sin, sqrt, log2, argmin, \
hstack, repeat, tile, dot, sum, shape, concatenate, exp, log
from itertools import product
from commpy.utilities import bitarray2dec, dec2bitarray
from numpy.fft import fft, ifft
__all__ = ['PSKModem', 'QAMModem', 'mimo_ml']
class Modem:
def modulate(self, input_bits):
""" Modulate (map) an array of bits to constellation symbols.
Parameters
----------
input_bits : 1D ndarray of ints
Inputs bits to be modulated (mapped).
Returns
-------
baseband_symbols : 1D ndarray of complex floats
Modulated complex symbols.
"""
index_list = map(lambda i: bitarray2dec(input_bits[i:i+self.num_bits_symbol]), \
xrange(0, len(input_bits), self.num_bits_symbol))
baseband_symbols = self.constellation[index_list]
return baseband_symbols
def demodulate(self, input_symbols, demod_type, noise_var = 0):
""" Demodulate (map) a set of constellation symbols to corresponding bits.
Supports hard-decision demodulation only.
Parameters
----------
input_symbols : 1D ndarray of complex floats
Input symbols to be demodulated.
demod_type : string
'hard' for hard decision output (bits)
'soft' for soft decision output (LLRs)
noise_var : float
AWGN variance. Needs to be specified only if demod_type is 'soft'
Returns
-------
demod_bits : 1D ndarray of ints
Corresponding demodulated bits.
"""
if demod_type == 'hard':
index_list = map(lambda i: argmin(abs(input_symbols[i] - self.constellation)), \
xrange(0, len(input_symbols)))
demod_bits = hstack(map(lambda i: dec2bitarray(i, self.num_bits_symbol),
index_list))
elif demod_type == 'soft':
demod_bits = zeros(len(input_symbols) * self.num_bits_symbol)
for i in arange(len(input_symbols)):
current_symbol = input_symbols[i]
for bit_index in arange(self.num_bits_symbol):
llr_num = 0
llr_den = 0
for const_index in self.symbol_mapping:
if (const_index >> bit_index) & 1:
llr_num = llr_num + exp((-abs(current_symbol - self.constellation[const_index])**2)/noise_var)
else:
llr_den = llr_den + exp((-abs(current_symbol - self.constellation[const_index])**2)/noise_var)
demod_bits[i*self.num_bits_symbol + self.num_bits_symbol - 1 - bit_index] = log(llr_num/llr_den)
else:
pass
# throw an error
return demod_bits
class PSKModem(Modem):
""" Creates a Phase Shift Keying (PSK) Modem object. """
def _constellation_symbol(self, i):
return cos(2*pi*(i-1)/self.m) + sin(2*pi*(i-1)/self.m)*(0+1j)
def __init__(self, m):
""" Creates a Phase Shift Keying (PSK) Modem object.
Parameters
----------
m : int
Size of the PSK constellation.
"""
self.m = m
self.num_bits_symbol = int(log2(self.m))
self.symbol_mapping = arange(self.m)
self.constellation = array(map(self._constellation_symbol,
self.symbol_mapping))
class QAMModem(Modem):
""" Creates a Quadrature Amplitude Modulation (QAM) Modem object."""
def _constellation_symbol(self, i):
return (2*i[0]-1) + (2*i[1]-1)*(1j)
def __init__(self, m):
""" Creates a Quadrature Amplitude Modulation (QAM) Modem object.
Parameters
----------
m : int
Size of the QAM constellation.
"""
self.m = m
self.num_bits_symbol = int(log2(self.m))
self.symbol_mapping = arange(self.m)
mapping_array = arange(1, sqrt(self.m)+1) - (sqrt(self.m)/2)
self.constellation = array(map(self._constellation_symbol,
list(product(mapping_array, repeat=2))))
def ofdm_tx(x, nfft, nsc, cp_length):
""" OFDM Transmit signal generation """
nfft = float(nfft)
nsc = float(nsc)
cp_length = float(cp_length)
ofdm_tx_signal = array([])
for i in xrange(0, shape(x)[1]):
symbols = x[:,i]
ofdm_sym_freq = zeros(nfft, dtype=complex)
ofdm_sym_freq[1:(nsc/2)+1] = symbols[nsc/2:]
ofdm_sym_freq[-(nsc/2):] = symbols[0:nsc/2]
ofdm_sym_time = ifft(ofdm_sym_freq)
cp = ofdm_sym_time[-cp_length:]
ofdm_tx_signal = concatenate((ofdm_tx_signal, cp, ofdm_sym_time))
return ofdm_tx_signal
def ofdm_rx(y, nfft, nsc, cp_length):
""" OFDM Receive Signal Processing """
num_ofdm_symbols = int(len(y)/(nfft + cp_length))
x_hat = zeros([nsc, num_ofdm_symbols], dtype=complex)
for i in xrange(0, num_ofdm_symbols):
ofdm_symbol = y[i*nfft + (i+1)*cp_length:(i+1)*(nfft + cp_length)]
symbols_freq = fft(ofdm_symbol)
x_hat[:,i] = concatenate((symbols_freq[-nsc/2:], symbols_freq[1:(nsc/2)+1]))
return x_hat
def mimo_ml(y, h, constellation):
""" MIMO ML Detection.
parameters
----------
y : 1D ndarray of complex floats
Received complex symbols (shape: num_receive_antennas x 1)
h : 2D ndarray of complex floats
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)
constellation : 1D ndarray of complex floats
Constellation used to modulate the symbols
"""
m = len(constellation)
x_ideal = array([tile(constellation, m), repeat(constellation, m)])
y_vector = tile(y, m*m)
min_idx = argmin(sum(abs(y_vector - dot(h, x_ideal)), axis=0))
x_r = x_ideal[:, min_idx]
return x_r

View File

@ -0,0 +1,84 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
==================================================
Sequences (:mod:`commpy.sequences`)
==================================================
.. autosummary::
:toctree: generated/
pnsequence -- PN Sequence Generator.
zcsequence -- Zadoff-Chu (ZC) Sequence Generator.
"""
__all__ = ['pnsequence', 'zcsequence']
from numpy import array, empty, zeros, roll, exp, pi, arange
def pnsequence(pn_order, pn_seed, pn_mask, seq_length):
"""
Generate a PN (Pseudo-Noise) sequence using a Linear Feedback Shift Register (LFSR).
Parameters
----------
pn_order : int
Number of delay elements used in the LFSR.
pn_seed : string containing 0's and 1's
Seed for the initialization of the LFSR delay elements.
The length of this string must be equal to 'pn_order'.
pn_mask : string containing 0's and 1's
Mask representing which delay elements contribute to the feedback
in the LFSR. The length of this string must be equal to 'pn_order'.
seq_length : int
Length of the PN sequence to be generated. Usually (2^pn_order - 1)
Returns
-------
pnseq : 1D ndarray of ints
PN sequence generated.
"""
# Check if pn_order is equal to the length of the strings 'pn_seed' and 'pn_mask'
pnseq = zeros(seq_length)
# Initialize shift register with the pn_seed
sr = array(map(lambda i: int(pn_seed[i]), xrange(0, len(pn_seed))))
for i in xrange(seq_length):
new_bit = 0
for j in xrange(pn_order):
if int(pn_mask[j]) == 1:
new_bit = new_bit ^ sr[j]
pnseq[i] = sr[pn_order-1]
sr = roll(sr, 1)
sr[0] = new_bit
return pnseq.astype(int)
def zcsequence(u, seq_length):
"""
Generate a Zadoff-Chu (ZC) sequence.
Parameters
----------
u : int
Root index of the the ZC sequence.
seq_length : int
Length of the sequence to be generated. Usually a prime number.
Returns
-------
zcseq : 1D ndarray of complex floats
ZC sequence generated.
"""
zcseq = exp((-1j * pi * u * arange(seq_length) * (arange(seq_length)+1)) / seq_length)
return zcseq

144
scripts/commpy/utilities.py Normal file
View File

@ -0,0 +1,144 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
============================================
Utilities (:mod:`commpy.utilities`)
============================================
.. autosummary::
:toctree: generated/
dec2bitarray -- Integer to binary (bit array).
bitarray2dec -- Binary (bit array) to integer.
hamming_dist -- Hamming distance.
euclid_dist -- Squared Euclidean distance.
upsample -- Upsample by an integral factor (zero insertion).
"""
import numpy as np
__all__ = ['dec2bitarray', 'bitarray2dec', 'hamming_dist', 'euclid_dist', 'upsample']
def dec2bitarray(in_number, bit_width):
"""
Converts a positive integer to NumPy array of the specified size containing
bits (0 and 1).
Parameters
----------
in_number : int
Positive integer to be converted to a bit array.
bit_width : int
Size of the output bit array.
Returns
-------
bitarray : 1D ndarray of ints
Array containing the binary representation of the input decimal.
"""
binary_string = bin(in_number)
length = len(binary_string)
bitarray = np.zeros(bit_width, 'int')
for i in range(length-2):
bitarray[bit_width-i-1] = int(binary_string[length-i-1])
return bitarray
def bitarray2dec(in_bitarray):
"""
Converts an input NumPy array of bits (0 and 1) to a decimal integer.
Parameters
----------
in_bitarray : 1D ndarray of ints
Input NumPy array of bits.
Returns
-------
number : int
Integer representation of input bit array.
"""
number = 0
for i in range(len(in_bitarray)):
number = number + in_bitarray[i]*pow(2, len(in_bitarray)-1-i)
return number
def hamming_dist(in_bitarray_1, in_bitarray_2):
"""
Computes the Hamming distance between two NumPy arrays of bits (0 and 1).
Parameters
----------
in_bit_array_1 : 1D ndarray of ints
NumPy array of bits.
in_bit_array_2 : 1D ndarray of ints
NumPy array of bits.
Returns
-------
distance : int
Hamming distance between input bit arrays.
"""
distance = 0
for i, j in zip(in_bitarray_1, in_bitarray_2):
# Jinghao: 2016-10-19: handle "don't care" bits
if i in [0, 1] and j in [0, 1] and i != j:
distance += 1
return distance
def euclid_dist(in_array1, in_array2):
"""
Computes the squared euclidean distance between two NumPy arrays
Parameters
----------
in_array1 : 1D ndarray of floats
NumPy array of real values.
in_array2 : 1D ndarray of floats
NumPy array of real values.
Returns
-------
distance : float
Squared Euclidean distance between two input arrays.
"""
distance = ((in_array1 - in_array2)*(in_array1 - in_array2)).sum()
return distance
def upsample(x, n):
"""
Upsample the input array by a factor of n
Adds n-1 zeros between consecutive samples of x
Parameters
----------
x : 1D ndarray
Input array.
n : int
Upsampling factor
Returns
-------
y : 1D ndarray
Output upsampled array.
"""
y = np.empty(len(x)*n, dtype=complex)
y[0::n] = x
zero_array = np.zeros(len(x), dtype=complex)
for i in range(1, n):
y[i::n] = zero_array
return y