scripts init

This commit is contained in:
Jinghao Shi 2017-04-03 12:52:21 -04:00
parent 9edf1899bd
commit 20a33eb560
31 changed files with 4729 additions and 0 deletions

66
scripts/bin_to_mem.py Executable file
View File

@ -0,0 +1,66 @@
#!/usr/bin/env python
"""
Convert a binary data file (generated by rx_samples_to_file) to a memroy text
file that can be read by Verilog's $readmemh.
"""
import argparse
import os
import time
import sys
CHUNK_SIZE = 2**20
def le_bin_str_to_signed_short(b):
v = ord(b[1])*(1<<8) + ord(b[0])
if v > (1<<15):
v = v - (1<<16)
return v
def signed_short_to_hex_str(n):
return format(n%(1<<16), '04x')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', help="Binary file.")
parser.add_argument('--out', help="Output file.")
parser.add_argument('--scale', type=int, default=1)
args = parser.parse_args()
if args.out is None:
args.out = '%s.txt' % (os.path.splitext(args.file)[0])
begin = time.clock()
byte_count = 0
total_bytes = os.path.getsize(args.file)
with open(args.file, 'rb') as input:
with open(args.out, 'wb', buffering=2**26) as output:
while True:
bytes = input.read(CHUNK_SIZE)
if len(bytes) == 0:
break
for i in range(0, len(bytes), 4):
I = le_bin_str_to_signed_short(bytes[i:i+2])/args.scale
Q = le_bin_str_to_signed_short(bytes[i+2:i+4])/args.scale
output.write('%s%s\n' % (signed_short_to_hex_str(I),
signed_short_to_hex_str(Q)))
byte_count += len(bytes)
elapsed = time.clock() - begin
speed = byte_count / elapsed
eta = (total_bytes - byte_count)/speed
progress = '%d / %d B\tSpeed: %.1f B/s\t Elapsed: %d s\tETA: %d s' %\
(byte_count>>20, total_bytes, speed, int(elapsed), int(eta))
sys.stdout.write('\r%s' % (progress))
sys.stdout.flush()
sys.stdout.write('\n')
if __name__ == '__main__':
main()

View File

@ -0,0 +1,27 @@
"""
CommPy
================================================
Contents
--------
Subpackages
-----------
::
channelcoding --- Channel Coding Algorithms [*]
"""
#from channelcoding import *
from commpy.filters import *
from commpy.modulation import *
from commpy.impairments import *
from commpy.sequences import *
from commpy.channels import *
try:
from numpy.testing import Tester
test = Tester().test
except:
pass

View File

@ -0,0 +1,76 @@
"""
============================================
Channel Coding (:mod:`commpy.channelcoding`)
============================================
.. module:: commpy.channelcoding
Galois Fields
=============
.. autosummary::
:toctree: generated/
GF -- Class representing a Galois Field object.
Algebraic Codes
===============
.. autosummary::
:toctree: generated/
cyclic_code_genpoly -- Generate a cylic code generator polynomial.
Convolutional Codes
===================
.. autosummary::
:toctree: generated/
Trellis -- Class representing convolutional code trellis.
conv_encode -- Convolutional Encoder.
viterbi_decode -- Convolutional Decoder using the Viterbi algorithm.
Turbo Codes
===========
.. autosummary::
:toctree: generated/
turbo_encode -- Turbo Encoder.
map_decode -- Convolutional Code decoder using MAP algorithm.
turbo_decode -- Turbo Decoder.
LDPC Codes
==========
.. autosummary::
:toctree: generated/
get_ldpc_code_params -- Extract parameters from LDPC code design file.
ldpc_bp_decode -- LDPC Code Decoder using Belief propagation.
Interleavers and De-interleavers
================================
.. autosummary::
:toctree: generated/
RandInterlv -- Random Interleaver.
"""
from commpy.channelcoding.convcode import Trellis, conv_encode, viterbi_decode
from commpy.channelcoding.interleavers import *
from commpy.channelcoding.turbo import turbo_encode, map_decode, turbo_decode
from commpy.channelcoding.ldpc import get_ldpc_code_params, ldpc_bp_decode
from commpy.channelcoding.gfields import *
from commpy.channelcoding.algcode import *
try:
from numpy.testing import Tester
test = Tester().test
except:
pass

View File

@ -0,0 +1,73 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
from fractions import gcd
from numpy import array, arange, concatenate, convolve
from commpy.channelcoding.gfields import GF, polymultiply, poly_to_string
from commpy.utilities import dec2bitarray, bitarray2dec
__all__ = ['cyclic_code_genpoly']
def cyclic_code_genpoly(n, k):
"""
Generate all possible generator polynomials for a (n, k)-cyclic code.
Parameters
----------
n : int
Code blocklength of the cyclic code.
k : int
Information blocklength of the cyclic code.
Returns
-------
poly_list : 1D ndarray of ints
A list of generator polynomials (represented as integers) for the (n, k)-cyclic code.
"""
if n%2 == 0:
raise ValueError("n cannot be an even number")
for m in arange(1, 18):
if (2**m-1)%n == 0:
break
x_gf = GF(arange(1, 2**m), m)
coset_fields = x_gf.cosets()
coset_leaders = array([])
minpol_degrees = array([])
for field in coset_fields:
coset_leaders = concatenate((coset_leaders, array([field.elements[0]])))
minpol_degrees = concatenate((minpol_degrees, array([len(field.elements)])))
y_gf = GF(coset_leaders, m)
minpol_list = y_gf.minpolys()
idx_list = arange(1, len(minpol_list))
poly_list = array([])
for i in range(1, 2**len(minpol_list)):
i_array = dec2bitarray(i, len(minpol_list))
subset_array = minpol_degrees[i_array == 1]
if int(subset_array.sum()) == (n-k):
poly_set = minpol_list[i_array == 1]
gpoly = 1
for poly in poly_set:
gpoly_array = dec2bitarray(gpoly, 2**m)
poly_array = dec2bitarray(poly, 2**m)
gpoly = bitarray2dec(convolve(gpoly_array, poly_array) % 2)
poly_list = concatenate((poly_list, array([gpoly])))
return poly_list.astype(int)
if __name__ == "__main__":
genpolys = cyclic_code_genpoly(31, 21)
for poly in genpolys:
print(poly_to_string(poly))

View File

@ -0,0 +1,573 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" Algorithms for Convolutional Codes """
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
import matplotlib.patches as mpatches
from commpy.utilities import dec2bitarray, bitarray2dec, hamming_dist, euclid_dist
#from commpy.channelcoding.acstb import acs_traceback
__all__ = ['Trellis', 'conv_encode', 'viterbi_decode']
class Trellis:
"""
Class defining a Trellis corresponding to a k/n - rate convolutional code.
Parameters
----------
memory : 1D ndarray of ints
Number of memory elements per input of the convolutional encoder.
g_matrix : 2D ndarray of ints (octal representation)
Generator matrix G(D) of the convolutional encoder. Each element of
G(D) represents a polynomial.
feedback : int, optional
Feedback polynomial of the convolutional encoder. Default value is 00.
code_type : {'default', 'rsc'}, optional
Use 'rsc' to generate a recursive systematic convolutional code.
If 'rsc' is specified, then the first 'k x k' sub-matrix of
G(D) must represent a identity matrix along with a non-zero
feedback polynomial.
Attributes
----------
k : int
Size of the smallest block of input bits that can be encoded using
the convolutional code.
n : int
Size of the smallest block of output bits generated using
the convolutional code.
total_memory : int
Total number of delay elements needed to implement the convolutional
encoder.
number_states : int
Number of states in the convolutional code trellis.
number_inputs : int
Number of branches from each state in the convolutional code trellis.
next_state_table : 2D ndarray of ints
Table representing the state transition matrix of the
convolutional code trellis. Rows represent current states and
columns represent current inputs in decimal. Elements represent the
corresponding next states in decimal.
output_table : 2D ndarray of ints
Table representing the output matrix of the convolutional code trellis.
Rows represent current states and columns represent current inputs in
decimal. Elements represent corresponding outputs in decimal.
Examples
--------
>>> from numpy import array
>>> import commpy.channelcoding.convcode as cc
>>> memory = array([2])
>>> g_matrix = array([[05, 07]]) # G(D) = [1+D^2, 1+D+D^2]
>>> trellis = cc.Trellis(memory, g_matrix)
>>> print trellis.k
1
>>> print trellis.n
2
>>> print trellis.total_memory
2
>>> print trellis.number_states
4
>>> print trellis.number_inputs
2
>>> print trellis.next_state_table
[[0 2]
[0 2]
[1 3]
[1 3]]
>>>print trellis.output_table
[[0 3]
[3 0]
[1 2]
[2 1]]
"""
def __init__(self, memory, g_matrix, feedback = 0, code_type = 'default'):
[self.k, self.n] = g_matrix.shape
if code_type == 'rsc':
for i in range(self.k):
g_matrix[i][i] = feedback
self.total_memory = memory.sum()
self.number_states = pow(2, self.total_memory)
self.number_inputs = pow(2, self.k)
self.next_state_table = np.zeros([self.number_states,
self.number_inputs], 'int')
self.output_table = np.zeros([self.number_states,
self.number_inputs], 'int')
# Compute the entries in the next state table and the output table
for current_state in range(self.number_states):
for current_input in range(self.number_inputs):
outbits = np.zeros(self.n, 'int')
# Compute the values in the output_table
for r in range(self.n):
output_generator_array = np.zeros(self.k, 'int')
shift_register = dec2bitarray(current_state,
self.total_memory)
for l in range(self.k):
# Convert the number representing a polynomial into a
# bit array
generator_array = dec2bitarray(g_matrix[l][r],
memory[l]+1)
# Loop over M delay elements of the shift register
# to compute their contribution to the r-th output
for i in range(memory[l]):
outbits[r] = (outbits[r] + \
(shift_register[i+l]*generator_array[i+1])) % 2
output_generator_array[l] = generator_array[0]
if l == 0:
feedback_array = (dec2bitarray(feedback, memory[l]) * shift_register[0:memory[l]]).sum()
shift_register[1:memory[l]] = \
shift_register[0:memory[l]-1]
shift_register[0] = (dec2bitarray(current_input,
self.k)[0] + feedback_array) % 2
else:
feedback_array = (dec2bitarray(feedback, memory[l]) *
shift_register[l+memory[l-1]-1:l+memory[l-1]+memory[l]-1]).sum()
shift_register[l+memory[l-1]:l+memory[l-1]+memory[l]-1] = \
shift_register[l+memory[l-1]-1:l+memory[l-1]+memory[l]-2]
shift_register[l+memory[l-1]-1] = \
(dec2bitarray(current_input, self.k)[l] + feedback_array) % 2
# Compute the contribution of the current_input to output
outbits[r] = (outbits[r] + \
(np.sum(dec2bitarray(current_input, self.k) * \
output_generator_array + feedback_array) % 2)) % 2
# Update the ouput_table using the computed output value
self.output_table[current_state][current_input] = \
bitarray2dec(outbits)
# Update the next_state_table using the new state of
# the shift register
self.next_state_table[current_state][current_input] = \
bitarray2dec(shift_register)
def _generate_grid(self, trellis_length):
""" Private method """
grid = np.mgrid[0.12:0.22*trellis_length:(trellis_length+1)*(0+1j),
0.1:0.1+self.number_states*0.1:self.number_states*(0+1j)].reshape(2, -1)
return grid
def _generate_states(self, trellis_length, grid, state_order, state_radius, font):
""" Private method """
state_patches = []
for state_count in range(self.number_states * trellis_length):
state_patch = mpatches.Circle(grid[:,state_count], state_radius,
color="#003399", ec="#cccccc")
state_patches.append(state_patch)
plt.text(grid[0, state_count], grid[1, state_count]-0.02,
str(state_order[state_count % self.number_states]),
ha="center", family=font, size=20, color="#ffffff")
return state_patches
def _generate_edges(self, trellis_length, grid, state_order, state_radius, edge_colors):
""" Private method """
edge_patches = []
for current_time_index in range(trellis_length-1):
grid_subset = grid[:,self.number_states * current_time_index:]
for state_count_1 in range(self.number_states):
input_count = 0
for state_count_2 in range(self.number_states):
dx = grid_subset[0, state_count_2+self.number_states] - grid_subset[0,state_count_1] - 2*state_radius
dy = grid_subset[1, state_count_2+self.number_states] - grid_subset[1,state_count_1]
if np.count_nonzero(self.next_state_table[state_order[state_count_1],:] == state_order[state_count_2]):
found_index = np.where(self.next_state_table[state_order[state_count_1],:] ==
state_order[state_count_2])
edge_patch = mpatches.FancyArrow(grid_subset[0,state_count_1]+state_radius,
grid_subset[1,state_count_1], dx, dy, width=0.005,
length_includes_head = True, color = edge_colors[found_index[0][0]])
edge_patches.append(edge_patch)
input_count = input_count + 1
return edge_patches
def _generate_labels(self, grid, state_order, state_radius, font):
""" Private method """
for state_count in range(self.number_states):
for input_count in range(self.number_inputs):
edge_label = str(input_count) + "/" + str(
self.output_table[state_order[state_count], input_count])
plt.text(grid[0, state_count]-1.5*state_radius,
grid[1, state_count]+state_radius*(1-input_count-0.7),
edge_label, ha="center", family=font, size=14)
def visualize(self, trellis_length = 2, state_order = None,
state_radius = 0.04, edge_colors = None):
""" Plot the trellis diagram.
Parameters
----------
trellis_length : int, optional
Specifies the number of time steps in the trellis diagram.
Default value is 2.
state_order : list of ints, optional
Specifies the order in the which the states of the trellis
are to be displayed starting from the top in the plot.
Default order is [0,...,number_states-1]
state_radius : float, optional
Radius of each state (circle) in the plot.
Default value is 0.04
edge_colors = list of hex color codes, optional
A list of length equal to the number_inputs,
containing color codes that represent the edge corresponding
to the input.
"""
if edge_colors is None:
edge_colors = ["#9E1BE0", "#06D65D"]
if state_order is None:
state_order = range(self.number_states)
font = "sans-serif"
fig = plt.figure()
ax = plt.axes([0,0,1,1])
trellis_patches = []
state_order.reverse()
trellis_grid = self._generate_grid(trellis_length)
state_patches = self._generate_states(trellis_length, trellis_grid,
state_order, state_radius, font)
edge_patches = self._generate_edges(trellis_length, trellis_grid,
state_order, state_radius,
edge_colors)
self._generate_labels(trellis_grid, state_order, state_radius, font)
trellis_patches.extend(state_patches)
trellis_patches.extend(edge_patches)
collection = PatchCollection(trellis_patches, match_original=True)
ax.add_collection(collection)
ax.set_xticks([])
ax.set_yticks([])
#plt.legend([edge_patches[0], edge_patches[1]], ["1-input", "0-input"])
plt.show()
def conv_encode(message_bits, trellis, code_type = 'default', puncture_matrix=None):
"""
Encode bits using a convolutional code.
Parameters
----------
message_bits : 1D ndarray containing {0, 1}
Stream of bits to be convolutionally encoded.
generator_matrix : 2-D ndarray of ints
Generator matrix G(D) of the convolutional code using which the input
bits are to be encoded.
M : 1D ndarray of ints
Number of memory elements per input of the convolutional encoder.
Returns
-------
coded_bits : 1D ndarray containing {0, 1}
Encoded bit stream.
"""
k = trellis.k
n = trellis.n
total_memory = trellis.total_memory
rate = float(k)/n
if puncture_matrix is None:
puncture_matrix = np.ones((trellis.k, trellis.n))
number_message_bits = np.size(message_bits)
# Initialize an array to contain the message bits plus the truncation zeros
if code_type == 'default':
inbits = np.zeros(number_message_bits + total_memory + total_memory % k,
'int')
number_inbits = number_message_bits + total_memory + total_memory % k
# Pad the input bits with M zeros (L-th terminated truncation)
inbits[0:number_message_bits] = message_bits
number_outbits = int(number_inbits/rate)
else:
inbits = message_bits
number_inbits = number_message_bits
number_outbits = int((number_inbits + total_memory)/rate)
outbits = np.zeros(number_outbits, 'int')
p_outbits = np.zeros(int(number_outbits*
puncture_matrix[0:].sum()/np.size(puncture_matrix, 1)), 'int')
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Encoding process - Each iteration of the loop represents one clock cycle
current_state = 0
j = 0
for i in range(int(number_inbits/k)): # Loop through all input bits
current_input = bitarray2dec(inbits[i*k:(i+1)*k])
current_output = output_table[current_state][current_input]
outbits[j*n:(j+1)*n] = dec2bitarray(current_output, n)
current_state = next_state_table[current_state][current_input]
j += 1
if code_type == 'rsc':
term_bits = dec2bitarray(current_state, trellis.total_memory)
term_bits = term_bits[::-1]
for i in range(trellis.total_memory):
current_input = bitarray2dec(term_bits[i*k:(i+1)*k])
current_output = output_table[current_state][current_input]
outbits[j*n:(j+1)*n] = dec2bitarray(current_output, n)
current_state = next_state_table[current_state][current_input]
j += 1
j = 0
for i in range(number_outbits):
if puncture_matrix[0][i % np.size(puncture_matrix, 1)] == 1:
p_outbits[j] = outbits[i]
j = j + 1
return p_outbits
def _where_c(inarray, rows, cols, search_value, index_array):
#cdef int i, j,
number_found = 0
for i in range(rows):
for j in range(cols):
if inarray[i, j] == search_value:
index_array[number_found, 0] = i
index_array[number_found, 1] = j
number_found += 1
return number_found
def _acs_traceback(r_codeword, trellis, decoding_type,
path_metrics, paths, decoded_symbols,
decoded_bits, tb_count, t, count,
tb_depth, current_number_states):
#cdef int state_num, i, j, number_previous_states, previous_state, \
# previous_input, i_codeword, number_found, min_idx, \
# current_state, dec_symbol
k = trellis.k
n = trellis.n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
branch_metric = 0.0
next_state_table = trellis.next_state_table
output_table = trellis.output_table
pmetrics = np.empty(number_inputs)
i_codeword_array = np.empty(n, 'int')
index_array = np.empty([number_states, 2], 'int')
decoded_bitarray = np.empty(k, 'int')
# Loop over all the current states (Time instant: t)
for state_num in range(current_number_states):
# Using the next state table find the previous states and inputs
# leading into the current state (Trellis)
number_found = _where_c(next_state_table, number_states, number_inputs, state_num, index_array)
# Loop over all the previous states (Time instant: t-1)
for i in range(number_found):
previous_state = index_array[i, 0]
previous_input = index_array[i, 1]
# Using the output table, find the ideal codeword
i_codeword = output_table[previous_state, previous_input]
#dec2bitarray_c(i_codeword, n, i_codeword_array)
i_codeword_array = dec2bitarray(i_codeword, n)
# Compute Branch Metrics
if decoding_type == 'hard':
#branch_metric = hamming_dist_c(r_codeword.astype(int), i_codeword_array.astype(int), n)
branch_metric = hamming_dist(r_codeword.astype(int), i_codeword_array.astype(int))
elif decoding_type == 'soft':
pass
elif decoding_type == 'unquantized':
i_codeword_array = 2*i_codeword_array - 1
branch_metric = euclid_dist(r_codeword, i_codeword_array)
else:
pass
# ADD operation: Add the branch metric to the
# accumulated path metric and store it in the temporary array
pmetrics[i] = path_metrics[previous_state, 0] + branch_metric
# COMPARE and SELECT operations
# Compare and Select the minimum accumulated path metric
path_metrics[state_num, 1] = pmetrics.min()
# Store the previous state corresponding to the minimum
# accumulated path metric
min_idx = pmetrics.argmin()
paths[state_num, tb_count] = index_array[min_idx, 0]
# Store the previous input corresponding to the minimum
# accumulated path metric
decoded_symbols[state_num, tb_count] = index_array[min_idx, 1]
if t >= tb_depth - 1:
current_state = path_metrics[:,1].argmin()
# Traceback Loop
for j in reversed(range(1, tb_depth)):
dec_symbol = decoded_symbols[current_state, j]
previous_state = paths[current_state, j]
decoded_bitarray = dec2bitarray(dec_symbol, k)
decoded_bits[(t-tb_depth-1)+(j+1)*k+count:(t-tb_depth-1)+(j+2)*k+count] = \
decoded_bitarray
current_state = previous_state
paths[:,0:tb_depth-1] = paths[:,1:]
decoded_symbols[:,0:tb_depth-1] = decoded_symbols[:,1:]
def viterbi_decode(coded_bits, trellis, tb_depth=None, decoding_type='hard'):
"""
Decodes a stream of convolutionally encoded bits using the Viterbi Algorithm
Parameters
----------
coded_bits : 1D ndarray
Stream of convolutionally encoded bits which are to be decoded.
generator_matrix : 2D ndarray of ints
Generator matrix G(D) of the convolutional code using which the
input bits are to be decoded.
M : 1D ndarray of ints
Number of memory elements per input of the convolutional encoder.
tb_length : int
Traceback depth (Typically set to 5*(M+1)).
decoding_type : str {'hard', 'unquantized'}
The type of decoding to be used.
'hard' option is used for hard inputs (bits) to the decoder, e.g., BSC channel.
'unquantized' option is used for soft inputs (real numbers) to the decoder, e.g., BAWGN channel.
Returns
-------
decoded_bits : 1D ndarray
Decoded bit stream.
References
----------
.. [1] Todd K. Moon. Error Correction Coding: Mathematical Methods and
Algorithms. John Wiley and Sons, 2005.
"""
# k = Rows in G(D), n = columns in G(D)
k = trellis.k
n = trellis.n
rate = float(k)/n
total_memory = trellis.total_memory
number_states = trellis.number_states
number_inputs = trellis.number_inputs
if tb_depth is None:
tb_depth = 5*total_memory
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Number of message bits after decoding
L = int(len(coded_bits)*rate)
path_metrics = np.empty([number_states, 2])
path_metrics[:, :] = 1000000
path_metrics[0][0] = 0
paths = np.empty([number_states, tb_depth], 'int')
paths[:, :] = 1000000
paths[0][0] = 0
decoded_symbols = np.zeros([number_states, tb_depth], 'int')
decoded_bits = np.zeros(L+tb_depth+k, 'int')
r_codeword = np.zeros(n, 'int')
tb_count = 1
count = 0
current_number_states = number_states
for t in range(1, int((L+total_memory+total_memory%k)/k) + 1):
# Get the received codeword corresponding to t
if t <= L:
r_codeword = coded_bits[(t-1)*n:t*n]
else:
if decoding_type == 'hard':
r_codeword[:] = 0
elif decoding_type == 'soft':
pass
elif decoding_type == 'unquantized':
r_codeword[:] = 0
r_codeword = 2*r_codeword - 1
else:
pass
_acs_traceback(r_codeword, trellis, decoding_type, path_metrics, paths,
decoded_symbols, decoded_bits, tb_count, t, count, tb_depth,
current_number_states)
if t >= tb_depth - 1:
tb_count = tb_depth - 1
count = count + k - 1
else:
tb_count = tb_count + 1
# Path metrics (at t-1) = Path metrics (at t)
path_metrics[:, 0] = path_metrics[:, 1]
# Force all the paths back to '0' state at the end of decoding
if t == (L+total_memory+total_memory%k)/k:
current_number_states = 1
return decoded_bits[0:len(decoded_bits)-tb_depth-1]

View File

@ -0,0 +1,148 @@
96 48
3 6
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
10 30 40
5 32 45
16 18 39
12 22 38
15 19 47
2 17 34
9 24 42
1 29 33
4 27 36
3 26 35
11 31 43
7 21 44
8 20 48
14 23 46
6 28 37
13 25 41
14 32 43
5 23 37
2 31 36
1 28 34
7 25 47
10 21 33
15 30 35
16 26 48
3 22 46
12 20 41
8 18 38
4 19 45
6 24 40
9 27 39
13 17 42
11 29 44
8 24 34
6 25 36
9 19 43
1 20 46
14 27 42
7 22 39
13 18 35
4 26 40
16 29 38
15 21 48
11 23 45
3 17 47
5 28 44
12 32 33
2 30 41
10 31 37
10 18 36
4 23 44
9 29 40
2 27 38
8 30 42
12 28 43
11 20 37
1 19 35
15 31 39
16 32 41
5 26 33
3 25 45
13 21 34
14 24 48
7 17 46
6 22 47
7 27 40
11 18 33
2 32 35
10 28 47
5 24 41
12 25 37
3 19 39
14 31 44
16 30 34
13 20 38
9 22 36
6 17 45
4 21 42
15 29 46
8 26 43
1 23 48
1 25 42
15 22 40
8 21 41
9 18 47
6 27 43
11 30 46
7 31 35
5 20 36
14 17 38
16 28 45
4 32 37
13 23 33
12 26 44
3 29 48
2 24 39
10 19 34
8 20 36 56 80 81
6 19 47 52 67 95
10 25 44 60 71 94
9 28 40 50 77 91
2 18 45 59 69 88
15 29 34 64 76 85
12 21 38 63 65 87
13 27 33 53 79 83
7 30 35 51 75 84
1 22 48 49 68 96
11 32 43 55 66 86
4 26 46 54 70 93
16 31 39 61 74 92
14 17 37 62 72 89
5 23 42 57 78 82
3 24 41 58 73 90
6 31 44 63 76 89
3 27 39 49 66 84
5 28 35 56 71 96
13 26 36 55 74 88
12 22 42 61 77 83
4 25 38 64 75 82
14 18 43 50 80 92
7 29 33 62 69 95
16 21 34 60 70 81
10 24 40 59 79 93
9 30 37 52 65 85
15 20 45 54 68 90
8 32 41 51 78 94
1 23 47 53 73 86
11 19 48 57 72 87
2 17 46 58 67 91
8 22 46 59 66 92
6 20 33 61 73 96
10 23 39 56 67 87
9 19 34 49 75 88
15 18 48 55 70 91
4 27 41 52 74 89
3 30 38 57 71 95
1 29 40 51 65 82
16 26 47 58 69 83
7 31 37 53 77 81
11 17 35 54 79 85
12 32 45 50 72 93
2 28 43 60 76 90
14 25 36 63 78 86
5 21 44 64 68 84
13 24 42 62 80 94

View File

@ -0,0 +1,148 @@
96 48
3 6
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47 4 21
33 38 31
11 1 33
3 48 37
42 9 36
17 22 7
48 15 13
40 28 47
22 42 5
28 33 30
27 18 19
2 34 10
38 41 27
18 7 32
16 32 45
26 24 1
25 16 22
35 25 34
37 2 11
21 3 39
34 21 28
12 13 6
1 39 38
9 8 12
44 12 48
29 14 9
31 29 26
5 46 14
36 6 24
46 23 3
45 30 4
24 11 8
23 10 42
7 35 43
32 19 41
19 20 25
15 47 46
39 31 2
13 43 20
43 40 15
8 5 35
4 26 44
6 37 17
10 45 18
20 27 29
30 17 16
41 36 23
14 44 40
7 31 42
25 23 21
22 34 41
42 3 19
40 35 27
21 19 17
4 8 28
35 45 31
2 28 32
37 30 9
38 40 30
34 36 13
33 46 10
32 12 40
18 41 11
17 1 2
45 39 29
9 48 4
47 11 34
19 29 24
44 17 5
15 2 3
16 21 33
11 20 44
20 9 47
23 47 38
24 16 12
41 24 37
39 5 43
6 43 23
31 10 16
48 33 35
28 18 48
8 42 18
36 32 8
14 6 25
29 15 36
46 38 26
5 4 6
27 44 22
26 22 45
43 27 1
10 25 39
12 14 7
13 7 46
30 13 14
3 26 20
1 37 15
23 96 3 64 16 90
12 57 19 70 38 64
4 95 20 52 30 70
42 55 1 87 31 66
28 87 41 77 9 69
43 78 29 84 22 87
34 49 14 93 6 92
41 82 24 55 32 83
24 66 5 73 26 58
44 91 33 79 12 61
3 72 32 67 19 63
22 92 25 62 24 75
39 93 22 94 7 60
48 84 26 92 28 94
37 70 7 85 40 96
15 71 17 75 46 79
6 64 46 69 43 54
14 63 11 81 44 82
36 68 35 54 11 52
45 73 36 72 39 95
20 54 21 71 1 50
9 51 6 89 17 88
33 74 30 50 47 78
32 75 16 76 29 68
17 50 18 91 36 84
16 89 42 95 27 86
11 88 45 90 13 53
10 81 8 57 21 55
26 85 27 68 45 65
46 94 31 58 10 59
27 79 38 49 2 56
35 62 15 83 14 57
2 61 10 80 3 71
21 60 12 51 18 67
18 56 34 53 41 80
29 83 47 60 5 85
19 58 43 96 4 76
13 59 2 86 23 74
38 77 23 65 20 91
8 53 40 59 48 62
47 76 13 63 35 51
5 52 9 82 33 49
40 90 39 78 34 77
25 69 48 88 42 72
31 65 44 56 15 89
30 86 28 61 37 93
1 67 37 74 8 73
7 80 4 66 25 81

View File

@ -0,0 +1,196 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" Galois Fields """
from fractions import gcd
from numpy import array, zeros, arange, convolve, ndarray, concatenate
from itertools import *
from commpy.utilities import dec2bitarray, bitarray2dec
__all__ = ['GF', 'polydivide', 'polymultiply', 'poly_to_string']
class GF:
"""
Defines a Binary Galois Field of order m, containing n,
where n can be a single element or a list of elements within the field.
Parameters
----------
n : int
Represents the Galois field element(s).
m : int
Specifies the order of the Galois Field.
Returns
-------
x : int
A Galois Field GF(2\ :sup:`m`) object.
Examples
--------
>>> from numpy import arange
>>> from gfields import GF
>>> x = arange(16)
>>> m = 4
>>> x = GF(x, m)
>>> print x.elements
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
>>> print x.prim_poly
19
"""
# Initialization
def __init__(self, x, m):
self.m = m
primpoly_array = array([0, 3, 7, 11, 19, 37, 67, 137, 285, 529, 1033,
2053, 4179, 8219, 17475, 32771, 69643])
self.prim_poly = primpoly_array[self.m]
if type(x) is int and x >= 0 and x < pow(2, m):
self.elements = array([x])
elif type(x) is ndarray and len(x) >= 1:
self.elements = x
# Overloading addition operator for Galois Field
def __add__(self, x):
if len(self.elements) == len(x.elements):
return GF(self.elements ^ x.elements, self.m)
else:
raise ValueError("The arguments should have the same number of elements")
# Overloading multiplication operator for Galois Field
def __mul__(self, x):
if len(x.elements) == len(self.elements):
prod_elements = arange(len(self.elements))
for i in range(len(self.elements)):
prod_elements[i] = polymultiply(self.elements[i], x.elements[i], self.m, self.prim_poly)
return GF(prod_elements, self.m)
else:
raise ValueError("Two sets of elements cannot be multiplied")
def power_to_tuple(self):
"""
Convert Galois field elements from power form to tuple form representation.
"""
y = zeros(len(self.elements))
for idx, i in enumerate(self.elements):
if 2**i < 2**self.m:
y[idx] = 2**i
else:
y[idx] = polydivide(2**i, self.prim_poly)
return GF(y, self.m)
def tuple_to_power(self):
"""
Convert Galois field elements from tuple form to power form representation.
"""
y = zeros(len(self.elements))
for idx, i in enumerate(self.elements):
if i != 0:
init_state = 1
cur_state = 1
power = 0
while cur_state != i:
cur_state = ((cur_state << 1) & (2**self.m-1)) ^ (-((cur_state & 2**(self.m-1)) >> (self.m - 1)) &
(self.prim_poly & (2**self.m-1)))
power+=1
y[idx] = power
else:
y[idx] = 0
return GF(y, self.m)
def order(self):
"""
Compute the orders of the Galois field elements.
"""
orders = zeros(len(self.elements))
power_gf = self.tuple_to_power()
for idx, i in enumerate(power_gf.elements):
orders[idx] = (2**self.m - 1)/(gcd(i, 2**self.m-1))
return orders
def cosets(self):
"""
Compute the cyclotomic cosets of the Galois field.
"""
coset_list = []
x = self.tuple_to_power().elements
mark_list = zeros(len(x))
coset_count = 1
for idx in range(len(x)):
if mark_list[idx] == 0:
a = x[idx]
mark_list[idx] = coset_count
i = 1
while (a*(2**i) % (2**self.m-1)) != a:
for idx2 in range(len(x)):
if (mark_list[idx2] == 0) and (x[idx2] == a*(2**i)%(2**self.m-1)):
mark_list[idx2] = coset_count
i+=1
coset_count+=1
for counts in range(1, coset_count):
coset_list.append(GF(self.elements[mark_list==counts], self.m))
return coset_list
def minpolys(self):
"""
Compute the minimal polynomials for all elements of the Galois field.
"""
minpol_list = array([])
full_gf = GF(arange(2**self.m), self.m)
full_cosets = full_gf.cosets()
for x in self.elements:
for i in range(len(full_cosets)):
if x in full_cosets[i].elements:
t = array([1, full_cosets[i].elements[0]])[::-1]
for root in full_cosets[i].elements[1:]:
t2 = concatenate((zeros(len(t)-1), array([1, root]), zeros(len(t)-1)))
prod_poly = array([])
for n in range(len(t2)-len(t)+1):
root_sum = 0
for k in range(len(t)):
root_sum = root_sum ^ polymultiply(int(t[k]), int(t2[n+k]), self.m, self.prim_poly)
prod_poly = concatenate((prod_poly, array([root_sum])))
t = prod_poly[::-1]
minpol_list = concatenate((minpol_list, array([bitarray2dec(t[::-1])])))
return minpol_list.astype(int)
# Divide two polynomials and returns the remainder
def polydivide(x, y):
r = y
while len(bin(r)) >= len(bin(y)):
shift_count = len(bin(x)) - len(bin(y))
if shift_count > 0:
d = y << shift_count
else:
d = y
x = x ^ d
r = x
return r
def polymultiply(x, y, m, prim_poly):
x_array = dec2bitarray(x, m)
y_array = dec2bitarray(y, m)
prod = bitarray2dec(convolve(x_array, y_array) % 2)
return polydivide(prod, prim_poly)
def poly_to_string(x):
i = 0
polystr = ""
while x != 0:
y = x%2
x = x >> 1
if y == 1:
polystr = polystr + "x^" + str(i) + " + "
i+=1
return polystr[:-2]

View File

@ -0,0 +1,84 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" Interleavers and De-interleavers """
from numpy import array, arange, zeros
from numpy.random import mtrand
__all__ = ['RandInterlv']
class _Interleaver:
def interlv(self, in_array):
""" Interleave input array using the specific interleaver.
Parameters
----------
in_array : 1D ndarray of ints
Input data to be interleaved.
Returns
-------
out_array : 1D ndarray of ints
Interleaved output data.
"""
out_array = array(map(lambda x: in_array[x], self.p_array))
return out_array
def deinterlv(self, in_array):
""" De-interleave input array using the specific interleaver.
Parameters
----------
in_array : 1D ndarray of ints
Input data to be de-interleaved.
Returns
-------
out_array : 1D ndarray of ints
De-interleaved output data.
"""
out_array = zeros(len(in_array), in_array.dtype)
for index, element in enumerate(self.p_array):
out_array[element] = in_array[index]
return out_array
class RandInterlv(_Interleaver):
""" Random Interleaver.
Parameters
----------
length : int
Length of the interleaver.
seed : int
Seed to initialize the random number generator
which generates the random permutation for
interleaving.
Returns
-------
random_interleaver : RandInterlv object
A random interleaver object.
Note
----
The random number generator is the
RandomState object from NumPy,
which uses the Mersenne Twister algorithm.
"""
def __init__(self, length, seed):
rand_gen = mtrand.RandomState(seed)
self.p_array = rand_gen.permutation(arange(length))
#class SRandInterlv(_Interleaver):
#class QPPInterlv(_Interleaver):

View File

@ -0,0 +1,237 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" LDPC Codes """
import numpy as np
__all__ = ['get_ldpc_code_params, ldpc_bp_decode']
MAX_POS_LLR = 38.0
MIN_NEG_LLR = -38.0
def get_ldpc_code_params(ldpc_design_filename):
"""
Extract parameters from LDPC code design file.
Parameters
----------
ldpc_design_filename : string
Filename of the LDPC code design file.
Returns
-------
ldpc_code_params : dictionary
Parameters of the LDPC code.
"""
ldpc_design_file = open(ldpc_design_filename)
ldpc_code_params = {}
[n_vnodes, n_cnodes] = [int(x) for x in ldpc_design_file.readline().split(' ')]
[max_vnode_deg, max_cnode_deg] = [int(x) for x in ldpc_design_file.readline().split(' ')]
vnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_adj_list = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_adj_list = -np.ones([n_vnodes, max_vnode_deg], int)
for vnode_idx in range(n_vnodes):
vnode_adj_list[vnode_idx, 0:vnode_deg_list[vnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
for cnode_idx in range(n_cnodes):
cnode_adj_list[cnode_idx, 0:cnode_deg_list[cnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
cnode_vnode_map = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_cnode_map = -np.ones([n_vnodes, max_vnode_deg], int)
cnode_list = np.arange(n_cnodes)
vnode_list = np.arange(n_vnodes)
for cnode in range(n_cnodes):
for i, vnode in enumerate(cnode_adj_list[cnode, 0:cnode_deg_list[cnode]]):
cnode_vnode_map[cnode, i] = cnode_list[np.where(vnode_adj_list[vnode, :] == cnode)]
for vnode in range(n_vnodes):
for i, cnode in enumerate(vnode_adj_list[vnode, 0:vnode_deg_list[vnode]]):
vnode_cnode_map[vnode, i] = vnode_list[np.where(cnode_adj_list[cnode, :] == vnode)]
cnode_adj_list_1d = cnode_adj_list.flatten().astype(np.int32)
vnode_adj_list_1d = vnode_adj_list.flatten().astype(np.int32)
cnode_vnode_map_1d = cnode_vnode_map.flatten().astype(np.int32)
vnode_cnode_map_1d = vnode_cnode_map.flatten().astype(np.int32)
pmat = np.zeros([n_cnodes, n_vnodes], int)
for cnode_idx in range(n_cnodes):
pmat[cnode_idx, cnode_adj_list[cnode_idx, :]] = 1
ldpc_code_params['n_vnodes'] = n_vnodes
ldpc_code_params['n_cnodes'] = n_cnodes
ldpc_code_params['max_cnode_deg'] = max_cnode_deg
ldpc_code_params['max_vnode_deg'] = max_vnode_deg
ldpc_code_params['cnode_adj_list'] = cnode_adj_list_1d
ldpc_code_params['cnode_vnode_map'] = cnode_vnode_map_1d
ldpc_code_params['vnode_adj_list'] = vnode_adj_list_1d
ldpc_code_params['vnode_cnode_map'] = vnode_cnode_map_1d
ldpc_code_params['cnode_deg_list'] = cnode_deg_list
ldpc_code_params['vnode_deg_list'] = vnode_deg_list
ldpc_design_file.close()
return ldpc_code_params
def _limit_llr(in_llr):
out_llr = in_llr
if in_llr > MAX_POS_LLR:
out_llr = MAX_POS_LLR
if in_llr < MIN_NEG_LLR:
out_llr = MIN_NEG_LLR
return out_llr
def sum_product_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg):
start_idx = cnode_idx*max_cnode_deg
offset = cnode_deg_list[cnode_idx]
vnode_list = cnode_adj_list[start_idx:start_idx+offset]
vnode_list_msgs_tanh = np.tanh(vnode_msgs[vnode_list*max_vnode_deg +
cnode_vnode_map[start_idx:start_idx+offset]]/2.0)
msg_prod = np.prod(vnode_list_msgs_tanh)
# Compute messages on outgoing edges using the incoming message product
cnode_msgs[start_idx:start_idx+offset]= 2.0*np.arctanh(msg_prod/vnode_list_msgs_tanh)
def min_sum_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg):
start_idx = cnode_idx*max_cnode_deg
offset = cnode_deg_list[cnode_idx]
vnode_list = cnode_adj_list[start_idx:start_idx+offset]
vnode_list_msgs = vnode_msgs[vnode_list*max_vnode_deg +
cnode_vnode_map[start_idx:start_idx+offset]]
vnode_list_msgs = np.ma.array(vnode_list_msgs, mask=False)
# Compute messages on outgoing edges using the incoming messages
for i in range(start_idx, start_idx+offset):
vnode_list_msgs.mask[i-start_idx] = True
cnode_msgs[i] = np.prod(np.sign(vnode_list_msgs))*np.min(np.abs(vnode_list_msgs))
vnode_list_msgs.mask[i-start_idx] = False
#print cnode_msgs[i]
def ldpc_bp_decode(llr_vec, ldpc_code_params, decoder_algorithm, n_iters):
"""
LDPC Decoder using Belief Propagation (BP).
Parameters
----------
llr_vec : 1D array of float
Received codeword LLR values from the channel.
ldpc_code_params : dictionary
Parameters of the LDPC code.
decoder_algorithm: string
Specify the decoder algorithm type.
SPA for Sum-Product Algorithm
MSA for Min-Sum Algorithm
n_iters : int
Max. number of iterations of decoding to be done.
Returns
-------
dec_word : 1D array of 0's and 1's
The codeword after decoding.
out_llrs : 1D array of float
LLR values corresponding to the decoded output.
"""
n_cnodes = ldpc_code_params['n_cnodes']
n_vnodes = ldpc_code_params['n_vnodes']
max_cnode_deg = ldpc_code_params['max_cnode_deg']
max_vnode_deg = ldpc_code_params['max_vnode_deg']
cnode_adj_list = ldpc_code_params['cnode_adj_list']
cnode_vnode_map = ldpc_code_params['cnode_vnode_map']
vnode_adj_list = ldpc_code_params['vnode_adj_list']
vnode_cnode_map = ldpc_code_params['vnode_cnode_map']
cnode_deg_list = ldpc_code_params['cnode_deg_list']
vnode_deg_list = ldpc_code_params['vnode_deg_list']
dec_word = np.zeros(n_vnodes, int)
out_llrs = np.zeros(n_vnodes, int)
cnode_msgs = np.zeros(n_cnodes*max_cnode_deg)
vnode_msgs = np.zeros(n_vnodes*max_vnode_deg)
_limit_llr_v = np.vectorize(_limit_llr)
if decoder_algorithm == 'SPA':
check_node_update = sum_product_update
elif decoder_algorithm == 'MSA':
check_node_update = min_sum_update
else:
raise NameError('Please input a valid decoder_algorithm string.')
# Initialize vnode messages with the LLR values received
for vnode_idx in range(n_vnodes):
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
vnode_msgs[start_idx : start_idx+offset] = llr_vec[vnode_idx]
# Main loop of Belief Propagation (BP) decoding iterations
for iter_cnt in range(n_iters):
continue_flag = 0
# Check Node Update
for cnode_idx in range(n_cnodes):
check_node_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg)
# Variable Node Update
for vnode_idx in range(n_vnodes):
# Compute sum of all incoming messages at the variable node
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
cnode_list = vnode_adj_list[start_idx:start_idx+offset]
cnode_list_msgs = cnode_msgs[cnode_list*max_cnode_deg + vnode_cnode_map[start_idx:start_idx+offset]]
msg_sum = np.sum(cnode_list_msgs)
# Compute messages on outgoing edges using the incoming message sum
vnode_msgs[start_idx:start_idx+offset] = _limit_llr_v(llr_vec[vnode_idx] + msg_sum -
cnode_list_msgs)
# Update output LLRs and decoded word
out_llrs[vnode_idx] = llr_vec[vnode_idx] + msg_sum
if out_llrs[vnode_idx] > 0:
dec_word[vnode_idx] = 0
else:
dec_word[vnode_idx] = 1
# Compute if early termination using parity check matrix
for cnode_idx in range(n_cnodes):
p_sum = 0
for i in range(cnode_deg_list[cnode_idx]):
p_sum ^= dec_word[cnode_adj_list[cnode_idx*max_cnode_deg + i]]
if p_sum != 0:
continue_flag = 1
break
# Stop iterations
if continue_flag == 0:
break
return dec_word, out_llrs

View File

@ -0,0 +1,21 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
from numpy import array
from numpy.testing import assert_array_equal
from commpy.channelcoding.algcode import cyclic_code_genpoly
class TestAlgebraicCoding(object):
def test_cyclic_code_gen_poly(self):
code_lengths = array([15, 31])
code_dims = array([4, 21])
desired_genpolys = array([[2479, 3171, 3929],
[1653, 1667, 1503, 1207, 1787, 1561, 1903,
1219, 1137, 2013, 1453, 1897, 1975, 1395, 1547]])
count = 0
for n, k in zip(code_lengths, code_dims):
genpolys = cyclic_code_genpoly(n, k)
assert_array_equal(genpolys, desired_genpolys[count])
count += 1

View File

@ -0,0 +1,87 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
from numpy import array
from numpy.random import randint
from numpy.testing import assert_array_equal
from commpy.channelcoding.convcode import Trellis, conv_encode, viterbi_decode
class TestConvCode(object):
@classmethod
def setup_class(cls):
# Convolutional Code 1: G(D) = [1+D^2, 1+D+D^2]
memory = array([2])
g_matrix = array([[0o5, 0o7]])
cls.code_type_1 = 'default'
cls.trellis_1 = Trellis(memory, g_matrix, 0, cls.code_type_1)
cls.desired_next_state_table_1 = array([[0, 2],
[0, 2],
[1, 3],
[1, 3]])
cls.desired_output_table_1 = array([[0, 3],
[3, 0],
[1, 2],
[2, 1]])
# Convolutional Code 2: G(D) = [1 1+D+D^2/1+D]
memory = array([2])
g_matrix = array([[0o1, 0o7]])
feedback = 0o5
cls.code_type_2 = 'rsc'
cls.trellis_2 = Trellis(memory, g_matrix, feedback, cls.code_type_2)
cls.desired_next_state_table_2 = array([[0, 2],
[2, 0],
[1, 3],
[3, 1]])
cls.desired_output_table_2 = array([[0, 3],
[0, 3],
[1, 2],
[1, 2]])
@classmethod
def teardown_class(cls):
pass
def test_next_state_table(self):
assert_array_equal(self.trellis_1.next_state_table, self.desired_next_state_table_1)
assert_array_equal(self.trellis_2.next_state_table, self.desired_next_state_table_2)
def test_output_table(self):
assert_array_equal(self.trellis_1.output_table, self.desired_output_table_1)
assert_array_equal(self.trellis_2.output_table, self.desired_output_table_2)
def test_conv_encode(self):
pass
def test_viterbi_decode(self):
pass
def test_conv_encode_viterbi_decode(self):
niters = 10
blocklength = 1000
for i in range(niters):
msg = randint(0, 2, blocklength)
coded_bits = conv_encode(msg, self.trellis_1)
decoded_bits = viterbi_decode(coded_bits.astype(float), self.trellis_1, 15)
assert_array_equal(decoded_bits[:-2], msg)
coded_bits = conv_encode(msg, self.trellis_1)
coded_syms = 2.0*coded_bits - 1
decoded_bits = viterbi_decode(coded_syms, self.trellis_1, 15, 'unquantized')
assert_array_equal(decoded_bits[:-2], msg)
coded_bits = conv_encode(msg, self.trellis_2)
decoded_bits = viterbi_decode(coded_bits.astype(float), self.trellis_2, 15)
assert_array_equal(decoded_bits[:-2], msg)
coded_bits = conv_encode(msg, self.trellis_2)
coded_syms = 2.0*coded_bits - 1
decoded_bits = viterbi_decode(coded_syms, self.trellis_2, 15, 'unquantized')
assert_array_equal(decoded_bits[:-2], msg)

View File

@ -0,0 +1,68 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3 clause
from numpy import array, ones_like, arange
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_, assert_equal
from commpy.channelcoding.gfields import GF
class TestGaloisFields(object):
def test_closure(self):
for m in arange(1, 9):
x = GF(arange(2**m), m)
for a in x.elements:
for b in x.elements:
assert_((GF(array([a]), m) + GF(array([b]), m)).elements[0] in x.elements)
assert_((GF(array([a]), m) * GF(array([b]), m)).elements[0] in x.elements)
def test_addition(self):
m = 3
x = GF(arange(2**m), m)
y = GF(array([6, 4, 3, 1, 2, 0, 5, 7]), m)
z = GF(array([6, 5, 1, 2, 6, 5, 3, 0]), m)
assert_array_equal((x+y).elements, z.elements)
def test_multiplication(self):
m = 3
x = GF(array([7, 6, 5, 4, 3, 2, 1, 0]), m)
y = GF(array([6, 4, 3, 1, 2, 0, 5, 7]), m)
z = GF(array([4, 5, 4, 4, 6, 0, 5, 0]), m)
assert_array_equal((x*y).elements, z.elements)
def test_tuple_form(self):
m = 3
x = GF(arange(0, 2**m-1), m)
y = x.power_to_tuple()
z = GF(array([1, 2, 4, 3, 6, 7, 5]), m)
assert_array_equal(y.elements, z.elements)
def test_power_form(self):
m = 3
x = GF(arange(1, 2**m), m)
y = x.tuple_to_power()
z = GF(array([0, 1, 3, 2, 6, 4, 5]), m)
assert_array_equal(y.elements, z.elements)
m = 4
x = GF(arange(1, 2**m), m)
y = x.tuple_to_power()
z = GF(array([0, 1, 4, 2, 8, 5, 10, 3, 14, 9, 7, 6, 13, 11, 12]), m)
assert_array_equal(y.elements, z.elements)
def test_order(self):
m = 4
x = GF(arange(1, 2**m), m)
y = x.order()
z = array([1, 15, 15, 15, 15, 3, 3, 5, 15, 5, 15, 5, 15, 15, 5])
assert_array_equal(y, z)
def test_minpols(self):
m = 4
x = GF(arange(2**m), m)
z = array([2, 3, 19, 19, 19, 19, 7, 7, 31, 25, 31, 25, 31, 25, 25, 31])
assert_array_equal(x.minpolys(), z)
m = 6
x = GF(array([2, 8, 32, 6, 24, 35, 10, 40, 59, 41, 14, 37]), m)
z = array([67, 87, 103, 73, 13, 109, 91, 117, 7, 115, 11, 97])
assert_array_equal(x.minpolys(), z)

View File

@ -0,0 +1,62 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
from numpy import array, sqrt, zeros
from numpy.random import randn
from numpy.testing import assert_allclose
from commpy.channelcoding.ldpc import get_ldpc_code_params, ldpc_bp_decode
from commpy.utilities import hamming_dist
import os
from nose.plugins.attrib import attr
@attr('slow')
class TestLDPCCode(object):
@classmethod
def setup_class(cls):
dir = os.path.dirname(__file__)
ldpc_design_file_1 = os.path.join(dir, '../designs/ldpc/gallager/96.33.964.txt')
#ldpc_design_file_1 = "../designs/ldpc/gallager/96.33.964.txt"
cls.ldpc_code_params = get_ldpc_code_params(ldpc_design_file_1)
@classmethod
def teardown_class(cls):
pass
def test_ldpc_bp_decode(self):
N = 96
k = 48
rate = 0.5
Es = 1.0
snr_list = array([2.0, 2.5])
niters = 10000000
tx_codeword = zeros(N, int)
ldpcbp_iters = 100
fer_array_ref = array([200.0/1000, 200.0/2000])
fer_array_test = zeros(len(snr_list))
for idx, ebno in enumerate(snr_list):
noise_std = 1/sqrt((10**(ebno/10.0))*rate*2/Es)
fer_cnt_bp = 0
for iter_cnt in range(niters):
awgn_array = noise_std * randn(N)
rx_word = 1-(2*tx_codeword) + awgn_array
rx_llrs = 2.0*rx_word/(noise_std**2)
[dec_word, out_llrs] = ldpc_bp_decode(rx_llrs, self.ldpc_code_params, 'SPA',
ldpcbp_iters)
num_bit_errors = hamming_dist(tx_codeword, dec_word)
if num_bit_errors > 0:
fer_cnt_bp += 1
if fer_cnt_bp >= 200:
fer_array_test[idx] = float(fer_cnt_bp)/(iter_cnt+1)
break
assert_allclose(fer_array_test, fer_array_ref, rtol=2e-1, atol=0)

View File

@ -0,0 +1,332 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
""" Turbo Codes """
from numpy import array, append, zeros, exp, pi, log, empty
from commpy.channelcoding import Trellis, conv_encode
from commpy.utilities import dec2bitarray, bitarray2dec
#from commpy.channelcoding.map_c import backward_recursion, forward_recursion_decoding
def turbo_encode(msg_bits, trellis1, trellis2, interleaver):
""" Turbo Encoder.
Encode Bits using a parallel concatenated rate-1/3
turbo code consisting of two rate-1/2 systematic
convolutional component codes.
Parameters
----------
msg_bits : 1D ndarray containing {0, 1}
Stream of bits to be turbo encoded.
trellis1 : Trellis object
Trellis representation of the
first code in the parallel concatenation.
trellis2 : Trellis object
Trellis representation of the
second code in the parallel concatenation.
interleaver : Interleaver object
Interleaver used in the turbo code.
Returns
-------
[sys_stream, non_sys_stream1, non_sys_stream2] : list of 1D ndarrays
Encoded bit streams corresponding
to the systematic output
and the two non-systematic
outputs from the two component codes.
"""
stream = conv_encode(msg_bits, trellis1, 'rsc')
sys_stream = stream[::2]
non_sys_stream_1 = stream[1::2]
interlv_msg_bits = interleaver.interlv(sys_stream)
puncture_matrix = array([[0, 1]])
non_sys_stream_2 = conv_encode(interlv_msg_bits, trellis2, 'rsc', puncture_matrix)
sys_stream = sys_stream[0:-trellis1.total_memory]
non_sys_stream_1 = non_sys_stream_1[0:-trellis1.total_memory]
non_sys_stream_2 = non_sys_stream_2[0:-trellis2.total_memory]
return [sys_stream, non_sys_stream_1, non_sys_stream_2]
def _compute_branch_prob(code_bit_0, code_bit_1, rx_symbol_0, rx_symbol_1,
noise_variance):
#cdef np.float64_t code_symbol_0, code_symbol_1, branch_prob, x, y
code_symbol_0 = 2*code_bit_0 - 1
code_symbol_1 = 2*code_bit_1 - 1
x = rx_symbol_0 - code_symbol_0
y = rx_symbol_1 - code_symbol_1
# Normalized branch transition probability
branch_prob = exp(-(x*x + y*y)/(2*noise_variance))
return branch_prob
def _backward_recursion(trellis, msg_length, noise_variance,
sys_symbols, non_sys_symbols, branch_probs,
priors, b_state_metrics):
n = trellis.n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
codeword_array = empty(n, 'int')
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Backward recursion
for reverse_time_index in reversed(xrange(1, msg_length+1)):
for current_state in xrange(number_states):
for current_input in xrange(number_inputs):
next_state = next_state_table[current_state, current_input]
code_symbol = output_table[current_state, current_input]
codeword_array = dec2bitarray(code_symbol, n)
parity_bit = codeword_array[1]
msg_bit = codeword_array[0]
rx_symbol_0 = sys_symbols[reverse_time_index-1]
rx_symbol_1 = non_sys_symbols[reverse_time_index-1]
branch_prob = _compute_branch_prob(msg_bit, parity_bit,
rx_symbol_0, rx_symbol_1,
noise_variance)
branch_probs[current_input, current_state, reverse_time_index-1] = branch_prob
b_state_metrics[current_state, reverse_time_index-1] += \
(b_state_metrics[next_state, reverse_time_index] * branch_prob *
priors[current_input, reverse_time_index-1])
b_state_metrics[:,reverse_time_index-1] /= \
b_state_metrics[:,reverse_time_index-1].sum()
def _forward_recursion_decoding(trellis, mode, msg_length, noise_variance,
sys_symbols, non_sys_symbols, b_state_metrics,
f_state_metrics, branch_probs, app, L_int,
priors, L_ext, decoded_bits):
n = trellis.n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
codeword_array = empty(n, 'int')
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Forward Recursion
for time_index in xrange(1, msg_length+1):
app[:] = 0
for current_state in xrange(number_states):
for current_input in xrange(number_inputs):
next_state = next_state_table[current_state, current_input]
branch_prob = branch_probs[current_input, current_state, time_index-1]
# Compute the forward state metrics
f_state_metrics[next_state, 1] += (f_state_metrics[current_state, 0] *
branch_prob *
priors[current_input, time_index-1])
# Compute APP
app[current_input] += (f_state_metrics[current_state, 0] *
branch_prob *
b_state_metrics[next_state, time_index])
lappr = L_int[time_index-1] + log(app[1]/app[0])
L_ext[time_index-1] = lappr
if mode == 'decode':
if lappr > 0:
decoded_bits[time_index-1] = 1
else:
decoded_bits[time_index-1] = 0
# Normalization of the forward state metrics
f_state_metrics[:,1] = f_state_metrics[:,1]/f_state_metrics[:,1].sum()
f_state_metrics[:,0] = f_state_metrics[:,1]
f_state_metrics[:,1] = 0.0
def map_decode(sys_symbols, non_sys_symbols, trellis, noise_variance, L_int, mode='decode'):
""" Maximum a-posteriori probability (MAP) decoder.
Decodes a stream of convolutionally encoded
(rate 1/2) bits using the MAP algorithm.
Parameters
----------
sys_symbols : 1D ndarray
Received symbols corresponding to
the systematic (first output) bits in
the codeword.
non_sys_symbols : 1D ndarray
Received symbols corresponding to the non-systematic
(second output) bits in the codeword.
trellis : Trellis object
Trellis representation of the convolutional code.
noise_variance : float
Variance (power) of the AWGN channel.
L_int : 1D ndarray
Array representing the initial intrinsic
information for all received
symbols.
Typically all zeros,
corresponding to equal prior
probabilities of bits 0 and 1.
mode : str{'decode', 'compute'}, optional
The mode in which the MAP decoder is used.
'decode' mode returns the decoded bits
along with the extrinsic information.
'compute' mode returns only the
extrinsic information.
Returns
-------
[L_ext, decoded_bits] : list of two 1D ndarrays
The first element of the list is the extrinsic information.
The second element of the list is the decoded bits.
"""
k = trellis.k
n = trellis.n
rate = float(k)/n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
msg_length = len(sys_symbols)
# Initialize forward state metrics (alpha)
f_state_metrics = zeros([number_states, 2])
f_state_metrics[0][0] = 1
#print f_state_metrics
# Initialize backward state metrics (beta)
b_state_metrics = zeros([number_states, msg_length+1])
b_state_metrics[:,msg_length] = 1
# Initialize branch transition probabilities (gamma)
branch_probs = zeros([number_inputs, number_states, msg_length+1])
app = zeros(number_inputs)
lappr = 0
decoded_bits = zeros(msg_length, 'int')
L_ext = zeros(msg_length)
priors = empty([2, msg_length])
priors[0,:] = 1/(1 + exp(L_int))
priors[1,:] = 1 - priors[0,:]
# Backward recursion
_backward_recursion(trellis, msg_length, noise_variance, sys_symbols,
non_sys_symbols, branch_probs, priors, b_state_metrics)
# Forward recursion
_forward_recursion_decoding(trellis, mode, msg_length, noise_variance, sys_symbols,
non_sys_symbols, b_state_metrics, f_state_metrics,
branch_probs, app, L_int, priors, L_ext, decoded_bits)
return [L_ext, decoded_bits]
def turbo_decode(sys_symbols, non_sys_symbols_1, non_sys_symbols_2, trellis,
noise_variance, number_iterations, interleaver, L_int = None):
""" Turbo Decoder.
Decodes a stream of convolutionally encoded
(rate 1/3) bits using the BCJR algorithm.
Parameters
----------
sys_symbols : 1D ndarray
Received symbols corresponding to
the systematic (first output) bits in the codeword.
non_sys_symbols_1 : 1D ndarray
Received symbols corresponding to
the first parity bits in the codeword.
non_sys_symbols_2 : 1D ndarray
Received symbols corresponding to the
second parity bits in the codeword.
trellis : Trellis object
Trellis representation of the convolutional codes
used in the Turbo code.
noise_variance : float
Variance (power) of the AWGN channel.
number_iterations : int
Number of the iterations of the
BCJR algorithm used in turbo decoding.
interleaver : Interleaver object.
Interleaver used in the turbo code.
L_int : 1D ndarray
Array representing the initial intrinsic
information for all received
symbols.
Typically all zeros,
corresponding to equal prior
probabilities of bits 0 and 1.
Returns
-------
decoded_bits : 1D ndarray of ints containing {0, 1}
Decoded bit stream.
"""
if L_int is None:
L_int = zeros(len(sys_symbols))
L_int_1 = L_int
# Interleave systematic symbols for input to second decoder
sys_symbols_i = interleaver.interlv(sys_symbols)
for iteration_count in xrange(number_iterations):
# MAP Decoder - 1
[L_ext_1, decoded_bits] = map_decode(sys_symbols, non_sys_symbols_1,
trellis, noise_variance, L_int_1, 'compute')
L_ext_1 = L_ext_1 - L_int_1
L_int_2 = interleaver.interlv(L_ext_1)
if iteration_count == number_iterations - 1:
mode = 'decode'
else:
mode = 'compute'
# MAP Decoder - 2
[L_2, decoded_bits] = map_decode(sys_symbols_i, non_sys_symbols_2,
trellis, noise_variance, L_int_2, mode)
L_ext_2 = L_2 - L_int_2
L_int_1 = interleaver.deinterlv(L_ext_2)
decoded_bits = interleaver.deinterlv(decoded_bits)
return decoded_bits

175
scripts/commpy/channels.py Normal file
View File

@ -0,0 +1,175 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
============================================
Channel Models (:mod:`commpy.channels`)
============================================
.. autosummary::
:toctree: generated/
bec -- Binary Erasure Channel.
bsc -- Binary Symmetric Channel.
awgn -- Additive White Gaussian Noise Channel.
"""
from numpy import complex, sum, pi, arange, array, size, shape, real, sqrt
from numpy import matrix, sqrt, sum, zeros, concatenate, sinc
from numpy.random import randn, seed, random
#from scipy.special import gamma, jn
#from scipy.signal import hamming, convolve, resample
#from scipy.fftpack import ifft, fftshift, fftfreq
#from scipy.interpolate import interp1d
__all__=['bec', 'bsc', 'awgn']
def bec(input_bits, p_e):
"""
Binary Erasure Channel.
Parameters
----------
input_bits : 1D ndarray containing {0, 1}
Input arrary of bits to the channel.
p_e : float in [0, 1]
Erasure probability of the channel.
Returns
-------
output_bits : 1D ndarray containing {0, 1}
Output bits from the channel.
"""
output_bits = input_bits.copy()
output_bits[random(len(output_bits)) <= p_e] = -1
return output_bits
def bsc(input_bits, p_t):
"""
Binary Symmetric Channel.
Parameters
----------
input_bits : 1D ndarray containing {0, 1}
Input arrary of bits to the channel.
p_t : float in [0, 1]
Transition/Error probability of the channel.
Returns
-------
output_bits : 1D ndarray containing {0, 1}
Output bits from the channel.
"""
output_bits = input_bits.copy()
flip_locs = (random(len(output_bits)) <= p_t)
output_bits[flip_locs] = 1 ^ output_bits[flip_locs]
return output_bits
def awgn(input_signal, snr_dB, rate=1.0):
"""
Addditive White Gaussian Noise (AWGN) Channel.
Parameters
----------
input_signal : 1D ndarray of floats
Input signal to the channel.
snr_dB : float
Output SNR required in dB.
rate : float
Rate of the a FEC code used if any, otherwise 1.
Returns
-------
output_signal : 1D ndarray of floats
Output signal from the channel with the specified SNR.
"""
avg_energy = sum(abs(input_signal) * abs(input_signal))/len(input_signal)
snr_linear = 10**(snr_dB/10.0)
noise_variance = avg_energy/(2*rate*snr_linear)
if input_signal.dtype == complex:
noise = (sqrt(noise_variance) * randn(len(input_signal))) + (sqrt(noise_variance) * randn(len(input_signal))*1j)
else:
noise = sqrt(2*noise_variance) * randn(len(input_signal))
output_signal = input_signal + noise
return output_signal
# =============================================================================
# Incomplete code to implement fading channels
# =============================================================================
#def doppler_jakes(max_doppler, filter_length):
# fs = 32.0*max_doppler
# ts = 1/fs
# m = arange(0, filter_length/2)
# Generate the Jakes Doppler Spectrum impulse response h[m]
# h_jakes_left = (gamma(3.0/4) *
# pow((max_doppler/(pi*abs((m-(filter_length/2))*ts))), 0.25) *
# jn(0.25, 2*pi*max_doppler*abs((m-(filter_length/2))*ts)))
# h_jakes_center = array([(gamma(3.0/4)/gamma(5.0/4)) * pow(max_doppler, 0.5)])
# h_jakes = concatenate((h_jakes_left[0:filter_length/2-1],
# h_jakes_center, h_jakes_left[::-1]))
# h_jakes = h_jakes*hamming(filter_length)
# h_jakes = h_jakes/(sum(h_jakes**2)**0.5)
# -----------------------------------------------------------------------------
# jakes_psd_right = (1/(pi*max_doppler*(1-(freqs/max_doppler)**2)**0.5))**0.5
# zero_pad = zeros([(fft_size-filter_length)/2, ])
# jakes_psd = concatenate((zero_pad, jakes_psd_right[::-1],
# jakes_psd_right, zero_pad))
#print size(jakes_psd)
# jakes_impulse = real(fftshift(ifft(jakes_psd, fft_size)))
# h_jakes = jakes_impulse[(fft_size-filter_length)/2 + 1 : (fft_size-filter_length)/2 + filter_length + 1]
# h_jakes = h_jakes*hamming(filter_length)
# h_jakes = h_jakes/(sum(h_jakes**2)**0.5)
# -----------------------------------------------------------------------------
# return h_jakes
#def rayleigh_channel(ts_input, max_doppler, block_length, path_gains,
# path_delays):
# fs_input = 1.0/ts_input
# fs_channel = 32.0*max_doppler
# ts_channel = 1.0/fs_channel
# interp_factor = fs_input/fs_channel
# channel_length = block_length/interp_factor
# n1 = -10
# n2 = 10
# filter_length = 1024
# Generate the Jakes Doppler Spectrum impulse response h[m]
# h_jakes = doppler_jakes(max_doppler, filter_length)
# Generate the complex Gaussian Random Process
# g_var = 0.5
# gain_process = zeros([len(path_gains), block_length], dtype=complex)
# delay_process = zeros([n2+1-n1, len(path_delays)])
# for k in xrange(len(path_gains)):
# g = (g_var**0.5) * (randn(channel_length) + 1j*randn(channel_length))
# g_filt = convolve(g, h_jakes, mode='same')
# g_filt_interp = resample(g_filt, block_length)
# gain_process[k,:] = pow(10, (path_gains[k]/10.0)) * g_filt_interp
# delay_process[:,k] = sinc((path_delays[k]/ts_input) - arange(n1, n2+1))
#channel_matrix = 0
# channel_matrix = matrix(delay_process)*matrix(gain_process)
# return channel_matrix, gain_process, h_jakes

View File

@ -0,0 +1,57 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
import numpy as np
import commpy.channelcoding.convcode as cc
from commpy.utilities import *
# =============================================================================
# Example showing the encoding and decoding of convolutional codes
# =============================================================================
# G(D) corresponding to the convolutional encoder
generator_matrix = np.array([[05, 07]])
#generator_matrix = np.array([[03, 00, 02], [07, 04, 06]])
# Number of delay elements in the convolutional encoder
M = np.array([2])
# Create trellis data structure
trellis = cc.Trellis(M, generator_matrix)
# Traceback depth of the decoder
tb_depth = 5*(M.sum() + 1)
for i in range(10):
# Generate random message bits to be encoded
message_bits = np.random.randint(0, 2, 1000)
# Encode message bits
coded_bits = cc.conv_encode(message_bits, trellis)
# Introduce bit errors (channel)
#coded_bits[4] = 0
#coded_bits[7] = 0
# Decode the received bits
decoded_bits = cc.viterbi_decode(coded_bits.astype(float), trellis, tb_depth)
num_bit_errors = hamming_dist(message_bits, decoded_bits[:-M])
#num_bit_errors = 1
if num_bit_errors !=0:
#print num_bit_errors, "Bit Errors found!"
#print message_bits
#print decoded_bits[tb_depth+3:]
#print decoded_bits
break
else:
print "No Bit Errors :)"
#print "==== Message Bits ==="
#print message_bits
#print "==== Coded Bits ====="
#print coded_bits
#print "==== Decoded Bits ==="
#print decoded_bits[tb_depth:]

187
scripts/commpy/filters.py Normal file
View File

@ -0,0 +1,187 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
============================================
Pulse Shaping Filters (:mod:`commpy.filters`)
============================================
.. autosummary::
:toctree: generated/
rcosfilter -- Raised Cosine (RC) Filter.
rrcosfilter -- Root Raised Cosine (RRC) Filter.
gaussianfilter -- Gaussian Filter.
rectfilter -- Rectangular Filter.
"""
import numpy as np
__all__=['rcosfilter', 'rrcosfilter', 'gaussianfilter', 'rectfilter']
def rcosfilter(N, alpha, Ts, Fs):
"""
Generates a raised cosine (RC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_rc : 1-D ndarray (float)
Impulse response of the raised cosine filter.
time_idx : 1-D ndarray (float)
Array containing the time indices, in seconds, for the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rc[x] = 1.0
elif alpha != 0 and t == Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
elif alpha != 0 and t == -Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
else:
h_rc[x] = (np.sin(np.pi*t/Ts)/(np.pi*t/Ts))* \
(np.cos(np.pi*alpha*t/Ts)/(1-(((2*alpha*t)/Ts)*((2*alpha*t)/Ts))))
return time_idx, h_rc
def rrcosfilter(N, alpha, Ts, Fs):
"""
Generates a root raised cosine (RRC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
---------
h_rrc : 1-D ndarray of floats
Impulse response of the root raised cosine filter.
time_idx : 1-D ndarray of floats
Array containing the time indices, in seconds, for
the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rrc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rrc[x] = 1.0 - alpha + (4*alpha/np.pi)
elif alpha != 0 and t == Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
elif alpha != 0 and t == -Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
else:
h_rrc[x] = (np.sin(np.pi*t*(1-alpha)/Ts) + \
4*alpha*(t/Ts)*np.cos(np.pi*t*(1+alpha)/Ts))/ \
(np.pi*t*(1-(4*alpha*t/Ts)*(4*alpha*t/Ts))/Ts)
return time_idx, h_rrc
def gaussianfilter(N, alpha, Ts, Fs):
"""
Generates a gaussian filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_gaussian : 1-D ndarray of floats
Impulse response of the gaussian filter.
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
h_gaussian = (np.sqrt(np.pi)/alpha)*np.exp(-((np.pi*time_idx/alpha)*(np.pi*time_idx/alpha)))
return time_idx, h_gaussian
def rectfilter(N, Ts, Fs):
"""
Generates a rectangular filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_rect : 1-D ndarray of floats
Impulse response of the rectangular filter.
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
"""
h_rect = np.ones(N)
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
return time_idx, h_rect

View File

@ -0,0 +1,43 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
============================================
Impairments (:mod:`commpy.impairments`)
============================================
.. autosummary::
:toctree: generated/
add_frequency_offset -- Add frequency offset impairment.
"""
from numpy import exp, pi, arange
__all__ = ['add_frequency_offset']
def add_frequency_offset(waveform, Fs, delta_f):
"""
Add frequency offset impairment to input signal.
Parameters
----------
waveform : 1D ndarray of floats
Input signal.
Fs : float
Sampling frequency (in Hz).
delta_f : float
Frequency offset (in Hz).
Returns
-------
output_waveform : 1D ndarray of floats
Output signal with frequency offset.
"""
output_waveform = waveform*exp(1j*2*pi*(delta_f/Fs)*arange(len(waveform)))
return output_waveform

View File

@ -0,0 +1,194 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
==================================================
Modulation Demodulation (:mod:`commpy.modulation`)
==================================================
.. autosummary::
:toctree: generated/
PSKModem -- Phase Shift Keying (PSK) Modem.
QAMModem -- Quadrature Amplitude Modulation (QAM) Modem.
mimo_ml -- MIMO Maximum Likelihood (ML) Detection.
"""
from numpy import arange, array, zeros, pi, cos, sin, sqrt, log2, argmin, \
hstack, repeat, tile, dot, sum, shape, concatenate, exp, log
from itertools import product
from commpy.utilities import bitarray2dec, dec2bitarray
from numpy.fft import fft, ifft
__all__ = ['PSKModem', 'QAMModem', 'mimo_ml']
class Modem:
def modulate(self, input_bits):
""" Modulate (map) an array of bits to constellation symbols.
Parameters
----------
input_bits : 1D ndarray of ints
Inputs bits to be modulated (mapped).
Returns
-------
baseband_symbols : 1D ndarray of complex floats
Modulated complex symbols.
"""
index_list = map(lambda i: bitarray2dec(input_bits[i:i+self.num_bits_symbol]), \
xrange(0, len(input_bits), self.num_bits_symbol))
baseband_symbols = self.constellation[index_list]
return baseband_symbols
def demodulate(self, input_symbols, demod_type, noise_var = 0):
""" Demodulate (map) a set of constellation symbols to corresponding bits.
Supports hard-decision demodulation only.
Parameters
----------
input_symbols : 1D ndarray of complex floats
Input symbols to be demodulated.
demod_type : string
'hard' for hard decision output (bits)
'soft' for soft decision output (LLRs)
noise_var : float
AWGN variance. Needs to be specified only if demod_type is 'soft'
Returns
-------
demod_bits : 1D ndarray of ints
Corresponding demodulated bits.
"""
if demod_type == 'hard':
index_list = map(lambda i: argmin(abs(input_symbols[i] - self.constellation)), \
xrange(0, len(input_symbols)))
demod_bits = hstack(map(lambda i: dec2bitarray(i, self.num_bits_symbol),
index_list))
elif demod_type == 'soft':
demod_bits = zeros(len(input_symbols) * self.num_bits_symbol)
for i in arange(len(input_symbols)):
current_symbol = input_symbols[i]
for bit_index in arange(self.num_bits_symbol):
llr_num = 0
llr_den = 0
for const_index in self.symbol_mapping:
if (const_index >> bit_index) & 1:
llr_num = llr_num + exp((-abs(current_symbol - self.constellation[const_index])**2)/noise_var)
else:
llr_den = llr_den + exp((-abs(current_symbol - self.constellation[const_index])**2)/noise_var)
demod_bits[i*self.num_bits_symbol + self.num_bits_symbol - 1 - bit_index] = log(llr_num/llr_den)
else:
pass
# throw an error
return demod_bits
class PSKModem(Modem):
""" Creates a Phase Shift Keying (PSK) Modem object. """
def _constellation_symbol(self, i):
return cos(2*pi*(i-1)/self.m) + sin(2*pi*(i-1)/self.m)*(0+1j)
def __init__(self, m):
""" Creates a Phase Shift Keying (PSK) Modem object.
Parameters
----------
m : int
Size of the PSK constellation.
"""
self.m = m
self.num_bits_symbol = int(log2(self.m))
self.symbol_mapping = arange(self.m)
self.constellation = array(map(self._constellation_symbol,
self.symbol_mapping))
class QAMModem(Modem):
""" Creates a Quadrature Amplitude Modulation (QAM) Modem object."""
def _constellation_symbol(self, i):
return (2*i[0]-1) + (2*i[1]-1)*(1j)
def __init__(self, m):
""" Creates a Quadrature Amplitude Modulation (QAM) Modem object.
Parameters
----------
m : int
Size of the QAM constellation.
"""
self.m = m
self.num_bits_symbol = int(log2(self.m))
self.symbol_mapping = arange(self.m)
mapping_array = arange(1, sqrt(self.m)+1) - (sqrt(self.m)/2)
self.constellation = array(map(self._constellation_symbol,
list(product(mapping_array, repeat=2))))
def ofdm_tx(x, nfft, nsc, cp_length):
""" OFDM Transmit signal generation """
nfft = float(nfft)
nsc = float(nsc)
cp_length = float(cp_length)
ofdm_tx_signal = array([])
for i in xrange(0, shape(x)[1]):
symbols = x[:,i]
ofdm_sym_freq = zeros(nfft, dtype=complex)
ofdm_sym_freq[1:(nsc/2)+1] = symbols[nsc/2:]
ofdm_sym_freq[-(nsc/2):] = symbols[0:nsc/2]
ofdm_sym_time = ifft(ofdm_sym_freq)
cp = ofdm_sym_time[-cp_length:]
ofdm_tx_signal = concatenate((ofdm_tx_signal, cp, ofdm_sym_time))
return ofdm_tx_signal
def ofdm_rx(y, nfft, nsc, cp_length):
""" OFDM Receive Signal Processing """
num_ofdm_symbols = int(len(y)/(nfft + cp_length))
x_hat = zeros([nsc, num_ofdm_symbols], dtype=complex)
for i in xrange(0, num_ofdm_symbols):
ofdm_symbol = y[i*nfft + (i+1)*cp_length:(i+1)*(nfft + cp_length)]
symbols_freq = fft(ofdm_symbol)
x_hat[:,i] = concatenate((symbols_freq[-nsc/2:], symbols_freq[1:(nsc/2)+1]))
return x_hat
def mimo_ml(y, h, constellation):
""" MIMO ML Detection.
parameters
----------
y : 1D ndarray of complex floats
Received complex symbols (shape: num_receive_antennas x 1)
h : 2D ndarray of complex floats
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)
constellation : 1D ndarray of complex floats
Constellation used to modulate the symbols
"""
m = len(constellation)
x_ideal = array([tile(constellation, m), repeat(constellation, m)])
y_vector = tile(y, m*m)
min_idx = argmin(sum(abs(y_vector - dot(h, x_ideal)), axis=0))
x_r = x_ideal[:, min_idx]
return x_r

View File

@ -0,0 +1,84 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
==================================================
Sequences (:mod:`commpy.sequences`)
==================================================
.. autosummary::
:toctree: generated/
pnsequence -- PN Sequence Generator.
zcsequence -- Zadoff-Chu (ZC) Sequence Generator.
"""
__all__ = ['pnsequence', 'zcsequence']
from numpy import array, empty, zeros, roll, exp, pi, arange
def pnsequence(pn_order, pn_seed, pn_mask, seq_length):
"""
Generate a PN (Pseudo-Noise) sequence using a Linear Feedback Shift Register (LFSR).
Parameters
----------
pn_order : int
Number of delay elements used in the LFSR.
pn_seed : string containing 0's and 1's
Seed for the initialization of the LFSR delay elements.
The length of this string must be equal to 'pn_order'.
pn_mask : string containing 0's and 1's
Mask representing which delay elements contribute to the feedback
in the LFSR. The length of this string must be equal to 'pn_order'.
seq_length : int
Length of the PN sequence to be generated. Usually (2^pn_order - 1)
Returns
-------
pnseq : 1D ndarray of ints
PN sequence generated.
"""
# Check if pn_order is equal to the length of the strings 'pn_seed' and 'pn_mask'
pnseq = zeros(seq_length)
# Initialize shift register with the pn_seed
sr = array(map(lambda i: int(pn_seed[i]), xrange(0, len(pn_seed))))
for i in xrange(seq_length):
new_bit = 0
for j in xrange(pn_order):
if int(pn_mask[j]) == 1:
new_bit = new_bit ^ sr[j]
pnseq[i] = sr[pn_order-1]
sr = roll(sr, 1)
sr[0] = new_bit
return pnseq.astype(int)
def zcsequence(u, seq_length):
"""
Generate a Zadoff-Chu (ZC) sequence.
Parameters
----------
u : int
Root index of the the ZC sequence.
seq_length : int
Length of the sequence to be generated. Usually a prime number.
Returns
-------
zcseq : 1D ndarray of complex floats
ZC sequence generated.
"""
zcseq = exp((-1j * pi * u * arange(seq_length) * (arange(seq_length)+1)) / seq_length)
return zcseq

144
scripts/commpy/utilities.py Normal file
View File

@ -0,0 +1,144 @@
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3-Clause
"""
============================================
Utilities (:mod:`commpy.utilities`)
============================================
.. autosummary::
:toctree: generated/
dec2bitarray -- Integer to binary (bit array).
bitarray2dec -- Binary (bit array) to integer.
hamming_dist -- Hamming distance.
euclid_dist -- Squared Euclidean distance.
upsample -- Upsample by an integral factor (zero insertion).
"""
import numpy as np
__all__ = ['dec2bitarray', 'bitarray2dec', 'hamming_dist', 'euclid_dist', 'upsample']
def dec2bitarray(in_number, bit_width):
"""
Converts a positive integer to NumPy array of the specified size containing
bits (0 and 1).
Parameters
----------
in_number : int
Positive integer to be converted to a bit array.
bit_width : int
Size of the output bit array.
Returns
-------
bitarray : 1D ndarray of ints
Array containing the binary representation of the input decimal.
"""
binary_string = bin(in_number)
length = len(binary_string)
bitarray = np.zeros(bit_width, 'int')
for i in range(length-2):
bitarray[bit_width-i-1] = int(binary_string[length-i-1])
return bitarray
def bitarray2dec(in_bitarray):
"""
Converts an input NumPy array of bits (0 and 1) to a decimal integer.
Parameters
----------
in_bitarray : 1D ndarray of ints
Input NumPy array of bits.
Returns
-------
number : int
Integer representation of input bit array.
"""
number = 0
for i in range(len(in_bitarray)):
number = number + in_bitarray[i]*pow(2, len(in_bitarray)-1-i)
return number
def hamming_dist(in_bitarray_1, in_bitarray_2):
"""
Computes the Hamming distance between two NumPy arrays of bits (0 and 1).
Parameters
----------
in_bit_array_1 : 1D ndarray of ints
NumPy array of bits.
in_bit_array_2 : 1D ndarray of ints
NumPy array of bits.
Returns
-------
distance : int
Hamming distance between input bit arrays.
"""
distance = 0
for i, j in zip(in_bitarray_1, in_bitarray_2):
# Jinghao: 2016-10-19: handle "don't care" bits
if i in [0, 1] and j in [0, 1] and i != j:
distance += 1
return distance
def euclid_dist(in_array1, in_array2):
"""
Computes the squared euclidean distance between two NumPy arrays
Parameters
----------
in_array1 : 1D ndarray of floats
NumPy array of real values.
in_array2 : 1D ndarray of floats
NumPy array of real values.
Returns
-------
distance : float
Squared Euclidean distance between two input arrays.
"""
distance = ((in_array1 - in_array2)*(in_array1 - in_array2)).sum()
return distance
def upsample(x, n):
"""
Upsample the input array by a factor of n
Adds n-1 zeros between consecutive samples of x
Parameters
----------
x : 1D ndarray
Input array.
n : int
Upsampling factor
Returns
-------
y : 1D ndarray
Output upsampled array.
"""
y = np.empty(len(x)*n, dtype=complex)
y[0::n] = x
zero_array = np.zeros(len(x), dtype=complex)
for i in range(1, n):
y[i::n] = zero_array
return y

69
scripts/condense.py Executable file
View File

@ -0,0 +1,69 @@
#!/usr/bin/env python
"""
Condense a raw sample file generated by USPR, removing idle periods.
"""
import os
import argparse
import scipy
import itertools
import struct
WINDOW = 80
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', help="Raw sample file")
parser.add_argument('--thres', type=int, default=1000, help="Power threshold")
parser.add_argument('--out', help="Output file")
parser.add_argument('--skip', type=int, default=int(0.1*20e6),
help="Inital samples to skip (USRP stabilization period)")
args = parser.parse_args()
wave = scipy.fromfile(args.file, dtype=scipy.int16)
raw_samples = [complex(i, q) for i, q in
itertools.izip(wave[::2], wave[1::2])]
print "%d raw samples" % (len(raw_samples))
if args.out is None:
args.out = '_condensed'.join(os.path.splitext(args.file))
fh = open(args.out, 'wb')
state = 'idle'
count = 0
countdown = 0
for i in xrange(0, len(raw_samples), WINDOW):
if i < args.skip:
continue
if state == 'idle':
if any([abs(c.real) > args.thres for c in raw_samples[i:i+WINDOW]]):
state = 'trigger'
countdown = 80*10
count += 1
if state == 'trigger':
if countdown > 0:
do_write = True
elif any([abs(c.real) > args.thres for c in raw_samples[i:i+WINDOW]]):
do_write = True
else:
do_write = False
state = 'idle'
if do_write:
for c in raw_samples[i:i+WINDOW]:
fh.write(struct.pack('<hh', int(c.real), int(c.imag)))
countdown -= WINDOW
fh.close()
print "%d packets" % (count)
print "Output written to %s" % (args.out)
if __name__ == '__main__':
main()

726
scripts/decode.py Normal file
View File

@ -0,0 +1,726 @@
#!/usr/bin/env python
import os
import numpy as np
import cmath
import collections
import itertools
import array
import math
from cStringIO import StringIO
import commpy.channelcoding.convcode as cc
from wltrace import dot11
LONG_PREAMBLE_TXT =\
"""
0 -0.078 0.000 40 0.098 -0.026 80 0.062 0.062 120 -0.035 -0.151
1 0.012 -0.098 41 0.053 0.004 81 0.119 0.004 121 -0.122 -0.017
2 0.092 -0.106 42 0.001 -0.115 82 -0.022 -0.161 122 -0.127 -0.021
3 -0.092 -0.115 43 -0.137 -0.047 83 0.059 0.015 123 0.075 -0.074
4 -0.003 -0.054 44 0.024 -0.059 84 0.024 0.059 124 -0.003 0.054
5 0.075 0.074 45 0.059 -0.015 85 -0.137 0.047 125 -0.092 0.115
6 -0.127 0.021 46 -0.022 0.161 86 0.001 0.115 126 0.092 0.106
7 -0.122 0.017 47 0.119 -0.004 87 0.053 -0.004 127 0.012 0.098
8 -0.035 0.151 48 0.062 -0.062 88 0.098 0.026 128 -0.156 0.000
9 -0.056 0.022 49 0.037 0.098 89 -0.038 0.106 129 0.012 -0.098
10 -0.060 -0.081 50 -0.057 0.039 90 -0.115 0.055 130 0.092 -0.106
11 0.070 -0.014 51 -0.131 0.065 91 0.060 0.088 131 -0.092 -0.115
12 0.082 -0.092 52 0.082 0.092 92 0.021 -0.028 132 -0.003 -0.054
13 -0.131 -0.065 53 0.070 0.014 93 0.097 -0.083 133 0.075 0.074
14 -0.057 -0.039 54 -0.060 0.081 94 0.040 0.111 134 -0.127 0.021
15 0.037 -0.098 55 -0.056 -0.022 95 -0.005 0.120 135 -0.122 0.017
16 0.062 0.062 56 -0.035 -0.151 96 0.156 0.000 136 -0.035 0.151
17 0.119 0.004 57 -0.122 -0.017 97 -0.005 -0.120 137 -0.056 0.022
18 -0.022 -0.161 58 -0.127 -0.021 98 0.040 -0.111 138 -0.060 -0.081
19 0.059 0.015 59 0.075 -0.074 99 0.097 0.083 139 0.070 -0.014
20 0.024 0.059 60 -0.003 0.054 100 0.021 0.028 140 0.082 -0.092
21 -0.137 0.047 61 -0.092 0.115 101 0.060 -0.088 141 -0.131 -0.065
22 0.001 0.115 62 0.092 0.106 102 -0.115 -0.055 142 -0.057 -0.039
23 0.053 -0.004 63 0.012 0.098 103 -0.038 -0.106 143 0.037 -0.098
24 0.098 0.026 64 -0.156 0.000 104 0.098 -0.026 144 0.062 0.062
25 -0.038 0.106 65 0.012 -0.098 105 0.053 0.004 145 0.119 0.004
26 -0.115 0.055 66 0.092 -0.106 106 0.001 -0.115 146 -0.022 -0.161
27 0.060 0.088 67 -0.092 -0.115 107 -0.137 -0.047 147 0.059 0.015
28 0.021 -0.028 68 -0.003 -0.054 108 0.024 -0.059 148 0.024 0.059
29 0.097 -0.083 69 0.075 0.074 109 0.059 -0.015 149 -0.137 0.047
30 0.040 0.111 70 -0.127 0.021 110 -0.022 0.161 150 0.001 0.115
31 -0.005 0.120 71 -0.122 0.017 111 0.119 -0.004 151 0.053 -0.004
32 0.156 0.000 72 -0.035 0.151 112 0.062 -0.062 152 0.098 0.026
33 -0.005 -0.120 73 -0.056 0.022 113 0.037 0.098 153 -0.038 0.106
34 0.040 -0.111 74 -0.060 -0.081 114 -0.057 0.039 154 -0.115 0.055
35 0.097 0.083 75 0.070 -0.014 115 -0.131 0.065 155 0.060 0.088
36 0.021 0.028 76 0.082 -0.092 116 0.082 0.092 156 0.021 -0.028
37 0.060 -0.088 77 -0.131 -0.065 117 0.070 0.014 157 0.097 -0.083
38 -0.115 -0.055 78 -0.057 -0.039 118 -0.060 0.081 158 0.040 0.111
39 -0.038 -0.106 79 0.037 -0.098 119 -0.056 -0.022 159 -0.005 0.120
"""
SHORT_PREAMBLE_TXT =\
"""
16 0.046 0.046 56 0.046 0.046 96 0.046 0.046 136 0.046 0.046
17 -0.132 0.002 57 0.002 -0.132 97 -0.132 0.002 137 0.002 -0.132
18 -0.013 -0.079 58 -0.079 -0.013 98 -0.013 -0.079 138 -0.079 -0.013
19 0.143 -0.013 59 -0.013 0.143 99 0.143 -0.013 139 -0.013 0.143
20 0.092 0.000 60 0.000 0.092 100 0.092 0.000 140 0.000 0.092
21 0.143 -0.013 61 -0.013 0.143 101 0.143 -0.013 141 -0.013 0.143
22 -0.013 -0.079 62 -0.079 -0.013 102 -0.013 -0.079 142 -0.079 -0.013
23 -0.132 0.002 63 0.002 -0.132 103 -0.132 0.002 143 0.002 -0.132
24 0.046 0.046 64 0.046 0.046 104 0.046 0.046 144 0.046 0.046
25 0.002 -0.132 65 -0.132 0.002 105 0.002 -0.132 145 -0.132 0.002
26 -0.079 -0.013 66 -0.013 -0.079 106 -0.079 -0.013 146 -0.013 -0.079
27 -0.013 0.143 67 0.143 -0.013 107 -0.013 0.143 147 0.143 -0.013
28 0.000 0.092 68 0.092 0.000 108 0.000 0.092 148 0.092 0.000
29 -0.013 0.143 69 0.143 -0.013 109 -0.013 0.143 149 0.143 -0.013
30 -0.079 -0.013 70 -0.013 -0.079 110 -0.079 -0.013 150 -0.013 -0.079
31 0.002 -0.132
"""
LONG_PREAMBLE = dict()
for idx, i, q in zip(*(iter(LONG_PREAMBLE_TXT.split()),)*3):
LONG_PREAMBLE[int(idx)] = complex(float(i), float(q))
LONG_PREAMBLE = [LONG_PREAMBLE[i] for i in sorted(LONG_PREAMBLE.keys())]
SHORT_PREAMBLE = dict()
for idx, i, q in zip(*(iter(SHORT_PREAMBLE_TXT.split()),)*3):
idx = int(idx)
if idx >= 16 and idx <= 31:
SHORT_PREAMBLE[idx-16] = complex(float(i), float(q))
SHORT_PREAMBLE = [SHORT_PREAMBLE[i] for i in sorted(SHORT_PREAMBLE.keys())]
# Table 78 - Rate-dependent parameters
RATE_PARAMETERS = {
# rate : (n_bpsc, n_cbps, n_dbps),
6: (1, 48, 24),
9: (1, 48, 36),
12: (2, 96, 48),
18: (2, 96, 72),
24: (4, 192, 96),
36: (4, 192, 144),
48: (6, 288, 192),
54: (6, 288, 216),
}
HT_MCS_PARAMETERS = {
0: (1, 52, 26),
1: (2, 104, 52),
2: (2, 104, 78),
3: (4, 208, 104),
4: (4, 208, 156),
5: (6, 312, 208),
6: (6, 312, 234),
7: (6, 312, 260),
}
PILOT_SUBCARRIES = [-21, -7, 7, 21]
PHASE_SCALE = 256
# 802.11a/g
SUBCARRIERS = range(-26, 0) + range(1, 27)
FFT_MAPPING = dict((c, c if c > 0 else 64 + c) for c in SUBCARRIERS)
LTS_REF = dict(
zip(SUBCARRIERS,
[1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1,
-1, 1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1,
-1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1]))
polarity = [1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1,
1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1,
1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1,
-1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1,
-1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1,
1, 1, 1, -1, -1, -1, -1, -1, -1, -1]
RATE_BITS = {
'1101': 6,
'1111': 9,
'0101': 12,
'0111': 18,
'1001': 24,
'1011': 36,
'0001': 48,
'0011': 54,
}
# 802.11n
HT_SUBCARRIERS = range(-28, 0) + range(1, 29)
HT_FFT_MAPPING = dict((c, c if c > 0 else 64 + c) for c in HT_SUBCARRIERS)
# append 1 for negative sub carriers, -1 for positive sub carriers
HT_LTS_REF = dict(
zip(HT_SUBCARRIERS,
[1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1,
1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1,
-1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1,
-1]))
def to_hex(n, n_bits):
return hex((n + (1<<n_bits)) % (1<<n_bits))
class ChannelEstimator(object):
"""
Frequency offset correction and channel equalization.
"""
def __init__(self, samples):
# samples[0] is the first sample of STS/L-STS
self.samples = samples
self.ht = False
self.subcarriers = SUBCARRIERS
self.fft_mapping = FFT_MAPPING
self.short_gi = False
self.fix_freq_offset()
assert len(self.lts_samples) == 128
lts1 = self.do_fft(self.lts_samples[:64])
lts2 = self.do_fft(self.lts_samples[64:128])
# print '[RAW LTS] (%s)' % (self.samples[160+32:160+32+4])
for i in range(0):
print '[%d] (%d, %d) -> (%d, %d) phase: %f (diff: %d)' %\
(i,
int(self.samples[192+i].real),
int(self.samples[192+i].imag),
int(self.lts_samples[i].real),
int(self.lts_samples[i].imag),
cmath.phase(self.lts_samples[i]),
int((cmath.phase(self.lts_samples[i])-cmath.phase(samples[192+i]))*PHASE_SCALE),
)
self.gain = dict((c, (lts1[c]+lts2[c])/2*LTS_REF[c])
for c in self.subcarriers)
self.idx = 0
self.polarity = itertools.cycle(polarity)
def next_symbol(self):
if self.short_gi:
symbol = self.do_fft(self.data_samples[self.idx+8:self.idx+72])
self.idx += 72
else:
symbol = self.do_fft(self.data_samples[self.idx+16:self.idx+80])
self.idx += 80
# remove randomness of pilot carriers
p = self.polarity.next()
if not self.ht:
polarity = [p, p, p, -p]
else:
polarity = [p*p1 for p1 in self.ht_polarity]
self.ht_polarity.rotate(-1)
# print '[POLARITY] %d: %s' % (p, polarity)
for c, p in zip(PILOT_SUBCARRIES, polarity):
symbol[c] *= p
# Correct Residual Carrier Frequency Offset
prod_sum = 0
for c in PILOT_SUBCARRIES:
prod = symbol[c].conjugate()*self.gain[c]
prod_sum += prod
beta = cmath.phase(prod_sum)
print "[PILOT OFFSET] %f (%d)" % (beta, int(beta*PHASE_SCALE))
carriers = []
for c in self.subcarriers:
if c in PILOT_SUBCARRIES:
continue
symbol[c] *= cmath.exp(complex(0, beta))
carriers.append(symbol[c] / self.gain[c])
return carriers
def switch_ht(self):
"""
Switch to HT channel estimator.
"""
self.ht = True
self.subcarriers = HT_SUBCARRIERS
self.fft_mapping = HT_FFT_MAPPING
ht_sts = self.data_samples[self.idx:self.idx+80]
self.idx += 80
ht_offset = cmath.phase(sum([ht_sts[i].conjugate()*ht_sts[i+16]
for i in range(len(ht_sts)-16)]))/16
print '[HT OFFSET] %f (%d)' % (ht_offset, int(ht_offset*PHASE_SCALE))
ht_offset = 0
self.data_samples = [c*cmath.exp(complex(0, -n*ht_offset))
for n, c in enumerate(self.data_samples[self.idx:])]
self.idx = 0
ht_lts = self.do_fft(self.data_samples[self.idx+16:self.idx+80])
self.idx += 80
self.gain = dict((c, ht_lts[c]*HT_LTS_REF[c]) for c in self.subcarriers)
self.ht_polarity = collections.deque([1, 1, 1, -1])
def do_fft(self, samples):
assert len(samples) == 64, len(samples)
freq = np.fft.fft(samples)
return dict((c, freq[self.fft_mapping[c]]) for c in self.subcarriers)
def fix_freq_offset(self):
sts = self.samples[80:160]
lts = self.samples[160+32:160+160]
coarse_offset = cmath.phase(sum([sts[i]*sts[i+16].conjugate()
for i in range(len(sts)-16)]))/16
coarse_offset = int(coarse_offset*256)/256.0
print '[COARSE OFFSET] %f (%d)' % (coarse_offset, int(coarse_offset*PHASE_SCALE))
# coarse correction
lts = [c*cmath.exp(complex(0, n*coarse_offset))
for n, c in enumerate(lts)]
fine_offset = cmath.phase(sum([lts[i]*lts[i+64].conjugate()
for i in range(len(lts)-64)]))/64
print '[FINE OFFSET] %f (%d)' % (fine_offset, int(fine_offset*PHASE_SCALE))
fine_offset = 0
self.lts_samples = [c*cmath.exp(complex(0, n*fine_offset))
for n, c in enumerate(lts)]
self.freq_offset = coarse_offset + fine_offset
print '[FREQ OFFSET] %f (%d)' % (self.freq_offset, int(self.freq_offset*PHASE_SCALE))
self.data_samples = [c*cmath.exp(complex(0, n*self.freq_offset))
for n, c in enumerate(self.samples[320:], start=128)]
class Demodulator(object):
QAM16_MAPPING = {
0b00: -3,
0b01: -1,
0b11: 1,
0b10: 3,
}
QAM64_MAPPING = {
0b000: -7,
0b001: -5,
0b011: -3,
0b010: -1,
0b110: 1,
0b111: 3,
0b101: 5,
0b100: 7,
}
def __init__(self, rate=6, mcs=0, ht=False):
if (not ht and rate in [6, 9]) or (ht and mcs == 0):
# BPSK
self.scale = 1
self.bits_per_sym = 1
self.cons_points = np.array([complex(-1, 0), complex(1, 0)])
elif (not ht and rate in [12, 18]) or (ht and mcs in [1, 2]):
# QPSK
self.scale = 1
self.bits_per_sym = 2
self.cons_points = np.array(
[complex(-1, -1), complex(-1, 1),
complex(1, -1), complex(1, 1)]
)
elif (not ht and rate in [24, 36]) or (ht and mcs in [3, 4]):
# 16-QAM
self.scale = 3
self.bits_per_sym = 4
self.cons_points = np.array(
[complex(self.QAM16_MAPPING[i >> 2],
self.QAM16_MAPPING[i & 0b11]) for i in range(16)])
elif (not ht and rate in [48, 54]) or (ht and mcs in [5, 6, 7]):
# 64-QAM
self.scale = 7
self.bits_per_sym = 6
self.cons_points = np.array(
[complex(self.QAM64_MAPPING[i >> 3],
self.QAM64_MAPPING[i & 0b111]) for i in range(64)])
def demodulate(self, carriers):
bits = []
for sym in carriers:
idx = np.argmin(abs(sym*self.scale - self.cons_points))
bits.extend([int(b) for b in ('{0:0%db}' % (self.bits_per_sym)).format(idx)])
return bits
class Signal(object):
def __init__(self, bits):
assert len(bits) == 24
str_bits = ''.join([str(b) for b in bits])
self.rate_bits = str_bits[:4]
self.rsvd = str_bits[4]
self.len_bits = str_bits[5:17]
self.parity_bits = str_bits[17]
self.tail_bits = str_bits[18:]
self.mcs = 0
self.rate = RATE_BITS.get(self.rate_bits, 0)
self.length = int(self.len_bits[::-1], 2)
self.parity_ok = sum(bits[:18]) % 2 == 0
self.ht = False
self.mcs = 0
class HTSignal(object):
def __init__(self, bits):
assert len(bits) == 48
str_bits = ''.join([str(b) for b in bits])
self.mcs_bits = str_bits[:6]
self.cbw = str_bits[7]
self.len_bits = str_bits[8:24]
self.smoothing = str_bits[24]
self.not_sounding = str_bits[25]
self.rsvd = str_bits[26]
self.aggregation = str_bits[27]
self.stbc = str_bits[28:30]
self.fec = str_bits[30]
self.short_gi = str_bits[31]
self.num_ext_stream = str_bits[32:34]
self.crc = str_bits[34:42]
self.tail_bits = str_bits[42:48]
self.mcs = int(self.mcs_bits[::-1], 2)
try:
self.rate = dot11.mcs_to_rate(self.mcs)
except:
self.rate = 0
self.length = int(self.len_bits[::-1], 2)
self.expected_crc = ''.join(['%d' % c for c in self.calc_crc(bits[:34])])
self.crc_ok = self.expected_crc == self.crc
self.ht = True
def calc_crc(self, bits):
c = [1] * 8
for b in bits:
next_c = [0] * 8
next_c[0] = b ^ c[7]
next_c[1] = b ^ c[7] ^ c[0]
next_c[2] = b ^ c[7] ^ c[1]
next_c[3] = c[2]
next_c[4] = c[3]
next_c[5] = c[4]
next_c[6] = c[5]
next_c[7] = c[6]
c = next_c
return [1-b for b in c[::-1]]
class Decoder(object):
def __init__(self, path, power_thres=200, skip=1e6, window=160):
if path is not None:
self.fh = open(path, 'rb')
size = os.path.getsize(path)
if skip*4 < size:
self.fh.seek(skip*4)
else:
print "[WARN] try to seek beyond end of file %d/%d" % (skip*4, size)
self.power_thres = 200
self.window = window
self.count = skip
def decode_next(self, *args, **kwargs):
trigger = False
samples = []
while True:
chunk = array.array('h', self.fh.read(self.window))
chunk = [complex(i, q) for i, q in zip(chunk[::2], chunk[1::2])]
if not trigger and any([abs(c) > self.power_thres for c in chunk]):
trigger = True
samples = []
print "Power trigger at %d" % (self.count)
self.count += self.window
if trigger:
samples.extend(chunk)
if trigger and all([abs(c) < self.power_thres for c in chunk]):
start = self.find_pkt(samples)
if start is None:
trigger = False
else:
return self.decode(samples[start:], *args, **kwargs)
def find_pkt(self, samples):
"""
Returns the index of the first sample of STS
None if no packet was detected.
"""
peaks = np.array([abs(c) for c in np.convolve(
samples, LONG_PREAMBLE[32:96], mode='same')]).argsort()[-2:]
if (max(peaks) - min(peaks) == 64) and (min(peaks) - 64 - 160) > 0:
return min(peaks) - 64 - 160
else:
return None
def demodulate(self, carriers, signal=None):
if signal is None:
demod = Demodulator(rate=6)
else:
demod = Demodulator(signal.rate, signal.mcs, signal.ht)
return demod.demodulate(carriers)
def deinterleave(self, in_bits, rate=6, mcs=0, ht=False, verbose=False):
if not ht:
n_bpsc, n_cbps, n_dbps = RATE_PARAMETERS[rate]
n_col = 16
n_row = 3 * n_bpsc
else:
n_bpsc, n_cbps, n_dbps = HT_MCS_PARAMETERS[mcs]
n_col = 13
n_row = 4 * n_bpsc
s = max(n_bpsc/2, 1)
first_perm = dict()
for j in range(0, n_cbps):
first_perm[j] = (s * (j/s)) + ((j + n_col*j/n_cbps) % s)
second_perm = dict()
for i in range(0, n_cbps):
second_perm[i] = n_col*i - (n_cbps-1)*(i/n_row)
if verbose:
print "Bit rate: %f Mb/s" % (rate)
print "Bits per sub carrier: %d" % (n_bpsc)
print "Coded bits per symbol: %d" % (n_cbps)
print "Data bits per symbol %d" % (n_dbps)
print "S = %d" % (s)
print "====== Overall permutation ========="
for j in range(0, n_cbps):
print '%d -> %d -> %d' % (j, first_perm[j], second_perm[first_perm[j]])
if in_bits is None:
idx_map = []
for k in range(n_cbps):
idx_map.append((second_perm[first_perm[k]], k))
return sorted(idx_map)
if verbose:
print '%d bits, %f samples' % (len(in_bits), float(len(in_bits))/n_cbps)
out_bits = [0]*len(in_bits)
for n in range(len(in_bits)/n_cbps):
for j in range(n_cbps):
base = n*n_cbps
out_bits[base+second_perm[first_perm[j]]] = in_bits[base+j]
return out_bits
def viterbi_decode(self, bits, signal=None):
if signal is None:
ht = False
rate = 6
else:
ht, rate, mcs = signal.ht, signal.rate, signal.mcs
if (not ht and rate in [9, 18, 36, 54]) or (ht and mcs in [2, 4, 6]):
print '[PUNCTURE] 3/4'
# 3/4
new_bits = []
for i in range(0, len(bits), 12):
new_bits.extend(bits[i:i+3])
new_bits.extend([2, 2])
new_bits.extend(bits[i+3:i+7])
new_bits.extend([2, 2])
new_bits.extend(bits[i+7:i+11])
new_bits.extend([2, 2])
new_bits.extend(bits[i+11:i+12])
bits = new_bits
elif (not ht and rate in [48]) or (ht and mcs in [5]):
print '[PUNCTURE] 2/3'
# 2/3
new_bits = []
for i in range(0, len(bits), 9):
new_bits.extend(bits[i:i+3])
new_bits.append(2)
new_bits.extend(bits[i+3:i+6])
new_bits.append(2)
new_bits.extend(bits[i+6:i+9])
new_bits.append(2)
bits = new_bits
elif ht and mcs in [7]:
print '[PUNCTURE] 5/6'
# 5/6
new_bits = []
for i in range(0, len(bits), 6):
new_bits.extend(bits[i:i+3])
new_bits.extend([2, 2])
new_bits.extend(bits[i+3:i+5])
new_bits.extend([2, 2])
new_bits.append(bits[i+5])
bits = new_bits
else:
print '[NO PUNCTURE]'
extended_bits = np.array([0]*2 + bits + [0]*12)
trellis = cc.Trellis(np.array([7]), np.array([[0133, 0171]]))
return list(cc.viterbi_decode(extended_bits, trellis, tb_depth=35))[:-7]
def descramble(self, bits):
X = [0]*7
X[0] = bits[2] ^ bits[6]
X[1] = bits[1] ^ bits[5]
X[2] = bits[0] ^ bits[4]
X[3] = X[0] ^ bits[3]
X[4] = X[1] ^ bits[2]
X[5] = X[2] ^ bits[1]
X[6] = X[3] ^ bits[0]
out_bits = []
for i, b in enumerate(bits):
feedback = X[6] ^ X[3]
out_bits.append(feedback ^ b)
X = [feedback] + X[:-1]
return out_bits
def check_signal(self, signal):
if signal.rate_bits not in RATE_BITS:
print '[SIGNAL] invalid rate: %s' % (signal.rate_bits)
return False
if signal.rsvd != '0':
print '[SIGNAL] wrong rsvd'
return False
if not signal.parity_ok:
print '[SIGNAL] wrong parity'
return False
if not all([b == '0' for b in signal.tail_bits]):
print '[SIGNAL] wrong tail: %s' % (signal.tail_bits)
return False
return True
def check_ht_signal(self, signal):
if signal.mcs > 7:
print '[HT SIGNAL] mcs not supported: %d' % (signal.mcs)
return False
if signal.cbw != '0':
print '[HT SIGNAL] CBW not supported'
return False
if signal.rsvd != '1':
print '[HT SIGNAL] wrong rsvd'
return False
if signal.stbc != '00':
print '[HT SIGNAL] stbc not supported: %s' % (signal.stbc)
return False
if signal.fec != '0':
print '[HT SIGNAL] FEC not supported'
return False
if signal.num_ext_stream != '00':
print '[HT SIGNAL] EXT spatial stream not supported: %s' % (signal.num_ext_stream)
return False
if not all([b == '0' for b in signal.tail_bits]):
print '[HT SIGNAL] wrong tail: %s' % (signal.tail_bits)
return False
return True
def decode(self, samples, *args, **kwargs):
eq = ChannelEstimator(samples)
carriers = eq.next_symbol()
assert len(carriers) == 48
carrier_bits = self.demodulate(carriers)
raw_bits = self.deinterleave(carrier_bits)
bits = self.viterbi_decode(raw_bits)
signal = Signal(bits)
print "[SIGNAL] %s" % (signal.__dict__)
if not self.check_signal(signal):
return
cons = []
if signal.rate == 6:
# decode next two OFDM SYMs to detect potential 802.11n packets
demod_out = []
for _ in range(2):
carriers = eq.next_symbol()
assert len(carriers) == 48
cons.extend(carriers)
# HT-SIG was rotated by 90 degrees, rotate it back
carriers = [c*complex(0, -1) for c in carriers]
carrier_bits = self.demodulate(carriers)
demod_out.extend(carrier_bits)
raw_bits = self.deinterleave(demod_out)
signal_bits = self.viterbi_decode(raw_bits)
ht_signal = HTSignal(signal_bits)
if ht_signal.crc_ok:
print "[HT SIGNAL] %s" % (ht_signal.__dict__)
if not self.check_ht_signal(ht_signal):
return
signal = ht_signal
eq.switch_ht()
cons = []
eq.short_gi = ht_signal.short_gi == '1'
if signal.ht:
num_symbol = int(math.ceil(float((16+signal.length*8+6))/
HT_MCS_PARAMETERS[signal.mcs][2]))
else:
num_symbol = int(math.ceil(float((16+signal.length*8.0+6))/
RATE_PARAMETERS[signal.rate][2]))
print "%d DATA OFDM symbols to decode" % (num_symbol)
if signal.rate == 6 and not signal.ht:
num_symbol -= 2
for _ in range(num_symbol):
carriers = eq.next_symbol()
if signal.ht:
assert len(carriers) == 52, len(carriers)
else:
assert len(carriers) == 48, len(carriers)
cons.extend(carriers)
demod_out = self.demodulate(cons, signal)
deinter_out = self.deinterleave(demod_out, signal.rate, signal.mcs, signal.ht)
conv_out = self.viterbi_decode(deinter_out, signal)
descramble_out = self.descramble(conv_out)
if not all([b == 0 for b in descramble_out[:16]]):
print '[SERVICE] not all bits are 0'
# skip the SERVICE bits
data_bits = descramble_out[16:]
num_bytes = min(len(data_bits)/8, signal.length)
data_bytes = [self.array_to_int(data_bits[i*8:(i+1)*8])
for i in range(num_bytes)]
assert len(data_bytes) == num_bytes
for i in range(0, num_bytes, 16):
print '[%3d] %s' %\
(i, ' '.join([format(data_bytes[j], '02x')
for j in range(i, min(i+16, num_bytes))]))
fh = StringIO(''.join([chr(b) for b in data_bytes]))
pkt = dot11.Dot11Packet(fh)
return signal, cons, demod_out, deinter_out, conv_out, descramble_out, data_bytes, pkt
def array_to_int(self, arr):
assert len(arr) == 8
return int(''.join(['%d' % (b) for b in arr[::-1]]), 2)

50
scripts/gen_atan_lut.py Executable file
View File

@ -0,0 +1,50 @@
#!/usr/bin/env python
"""
Generate atan Look Up Table (LUT)
Key = math.tan(phase)*SIZE -- phase \in [0, math.pi/4)
Value = int(math.atan(phase)*SIZE*2)
SIZE is LUT size. The value is scaled up by SIZE*2 so that adjacent LUT values
can be distinguished.
"""
SIZE = 2**8
SCALE = 512
import argparse
import math
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out')
args = parser.parse_args()
if args.out is None:
args.out = os.path.join(os.getcwd(), 'atan_lut.mif')
coe_out = '%s.coe' % (os.path.splitext(args.out)[0])
data = []
with open(args.out, 'w') as f:
for i in range(SIZE):
key = float(i)/SIZE
val = int(round(math.atan(key)*SCALE))
data.append(val)
print '%f -> %d' % (key, val)
f.write('{0:09b}\n'.format(val))
print "LUT SIZE %d, SCALE %d" % (SIZE, SCALE)
print "MIL file saved as %s" % (args.out)
with open(coe_out, 'w') as f:
f.write('memory_initialization_radix=2;\n')
f.write('memory_initialization_vector=\n')
f.write(',\n'.join(['{0:09b}'.format(l) for l in data]))
f.write(';')
print "COE file saved as %s" % (coe_out)
if __name__ == '__main__':
main()

225
scripts/gen_deinter_lut.py Executable file
View File

@ -0,0 +1,225 @@
#!/usr/bin/env python
"""
Generate 802.11a/g/n Deinterleave LUT.
Output compensate for pucntureing.
"""
import argparse
import math
import decode
import os
"""
LUT ENTRY FORMAT
1 bit -- null_a
1 bit -- null_b
6 bit -- addra
6 bit -- addrb
3 bit -- bita
3 bit -- bitb
1 bit -- out_stb
1 bit -- done
-------------------
22 bits total
LUT FORMAT
+----------------+
| BASE ADDR |
| 32 ENTRY |
+----------------+
| 6 MBPS |
+----------------+
| 9 MBPS |
+----------------+
....
+----------------+
| MCS 0 |
+----------------+
...
+----------------+
| MCS 7 |
+----------------+
| PADDING |
+----------------+
"""
RATE_BITS = {
6: '1011',
9: '1111',
12: '1010',
18: '1110',
24: '1001',
36: '1101',
48: '1000',
54: '1100',
}
RATES = [
# (rate, mcs, ht)
(6, 0, False),
(9, 0, False),
(12, 0, False),
(18, 0, False),
(24, 0, False),
(36, 0, False),
(48, 0, False),
(54, 0, False),
(0, 0, True),
(0, 1, True),
(0, 2, True),
(0, 3, True),
(0, 4, True),
(0, 5, True),
(0, 6, True),
(0, 7, True),
]
def do_rate(rate=6, mcs=0, ht=False):
idx_map = decode.Decoder(None).deinterleave(None, rate=rate, mcs=mcs, ht=ht)
seq = [t[1] for t in idx_map]
erase = '1/2'
if ht:
n_bpsc = decode.HT_MCS_PARAMETERS[mcs][0]
if mcs in [2, 4, 6]:
erase = '3/4'
pass
elif mcs == 5:
erase = '2/3'
pass
elif mcs == 7:
erase = '5/6'
else:
n_bpsc = decode.RATE_PARAMETERS[rate][0]
if rate in [9, 18, 36]:
erase = '3/4'
elif rate == 48:
erase = '2/3'
data = []
i = 0
puncture = 0
while i < len(seq):
addra = seq[i]/n_bpsc
bita = seq[i]%n_bpsc
if i+1 < len(seq):
addrb = seq[i+1]/n_bpsc
bitb = seq[i+1]%n_bpsc
else:
addrb = 0
bitb = 0
base = (addra<<14) + (addrb<<8) + (bita<<5) + (bitb<<2) + (1<<1)
if erase == '1/2':
mask = base
data.append(mask)
elif erase == '3/4':
if puncture == 0:
mask = base
data.append(mask)
puncture = 1
else:
mask = (1<<20) + base
data.append(mask)
mask = (1<<21) + base
data.append(mask)
puncture = 0
elif erase == '2/3':
if puncture == 0:
mask = base
data.append(mask)
puncture = 1
else:
mask = (1<<20) + base
data.append(mask)
i -= 1
puncture = 0
elif erase == '5/6':
if puncture == 0:
mask = base
data.append(mask)
puncture = 1
elif puncture == 1:
mask = (1<<20) + base
data.append(mask)
mask = (1<<21) + base
data.append(mask)
puncture = 2
else:
mask = (1<<20) + base
data.append(mask)
mask = (1<<21) + base
data.append(mask)
puncture = 0
i += 2
# reset addra to NUM_SUBCARRIER/2
if ht:
mask = (26<<14) + 1
else:
mask = (24<<14) + 1
data.append(mask)
# sentinel
data.extend([0]*4)
return data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out')
args = parser.parse_args()
if args.out is None:
args.out = os.path.join(os.getcwd(), 'deinter_lut.mif')
coe_out = '%s.coe' % (os.path.splitext(args.out)[0])
header = [0]*32
lut = []
offset = 32
for rate, mcs, ht in RATES:
if ht:
idx = (1<<4) + mcs
else:
idx = int(RATE_BITS[rate], 2)
header[idx] = offset
print '[rate=%d, mcs=%d] -> %d' % (rate, mcs, offset)
data = do_rate(rate=rate, mcs=mcs, ht=ht)
offset += len(data)
lut.extend(data)
total = int(2**math.ceil(math.log(offset, 2)))
print 'Total row: %d (round up to %d)' % (offset, total)
lut.extend([0]*(total-offset))
with open(args.out, 'w') as f:
for l in header + lut:
f.write('{0:022b}\n'.format(l))
print "MIL file saved as %s" % (args.out)
with open(coe_out, 'w') as f:
f.write('memory_initialization_radix=2;\n')
f.write('memory_initialization_vector=\n')
f.write(',\n'.join(['{0:022b}'.format(l) for l in header+lut]))
f.write(';')
print "COE file saved as %s" % (coe_out)
if __name__ == '__main__':
main()

52
scripts/gen_rot_lut.py Executable file
View File

@ -0,0 +1,52 @@
#!/usr/bin/env python
"""
Generate the rotation vector LUT.
Key = phase -- phase \in [0, math.pi/4)
Value = SCALE*(math.cos(phase), math.sin(phase))
"""
import argparse
import math
import os
ATAN_LUT_SCALE = 512
SCALE = 2048
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out')
args = parser.parse_args()
if args.out is None:
args.out = os.path.join(os.getcwd(), 'rot_lut.mif')
coe_out = '%s.coe' % (os.path.splitext(args.out)[0])
MAX = int(round(math.pi/4*ATAN_LUT_SCALE))
SIZE = int(2**math.ceil(math.log(MAX, 2)))
data = []
with open(args.out, 'w') as f:
for i in range(SIZE):
key = float(i)/MAX*math.pi/4
I = int(round(math.cos(key)*SCALE))
Q = int(round(math.sin(key)*SCALE))
print '%f -> %d -> (%d, %d)' % (key, i, I, Q)
val = (I<<16) + Q
data.append(val)
f.write('{0:032b}\n'.format(val))
print "SIZE = %d, scale = %d" % (SIZE, SCALE)
print "MIL file saved as %s" % (args.out)
with open(coe_out, 'w') as f:
f.write('memory_initialization_radix=2;\n')
f.write('memory_initialization_vector=\n')
f.write(',\n'.join(['{0:032b}'.format(l) for l in data]))
f.write(';')
print "COE file saved as %s" % (coe_out)
if __name__ == '__main__':
main()

267
scripts/policy.py Executable file
View File

@ -0,0 +1,267 @@
#!/usr/bin/env python
import os
import argparse
import jsonschema
import json
import itertools
MCS_MAX = 7
LENGTH_MAX = 4095
MAX_ACTION_LEN = 256
HEADER_LEN = 16
SR_RATE_FILTER = 7
SR_LEN_FILTER = 8
SR_HEADER_FILTER = 9
SR_HEADER_LEN =10
SR_JAM_POLICY = 11
POLICY_JSON_SCHEMA = """
{
"title": "Jamming Policy Format Specification",
"type": "array",
"items": {
"$ref": "#/definitions/policy"
},
"minItems": 1,
"additionalProperties": false,
"definitions": {
"policy": {
"type": "object",
"properties": {
"length": {
"type": "integer",
"minimum": 0,
"maximum": 4095
},
"length_min": {
"type": "integer",
"minimum": 0,
"maximum": 4095
},
"length_max": {
"type": "integer",
"minimum": 0,
"maximum": 4095
},
"mcs": {
"type": "integer",
"minimum": 0,
"maximum": 7
},
"mcs_min": {
"type": "integer",
"minimum": 0,
"maximum": 7
},
"mcs_max": {
"type": "integer",
"minimum": 0,
"maximum": 7
},
"header": {
"type": "string",
"maxLength": 47
},
"actions": {
"type": "array",
"minItems": 1,
"maxItems": 256,
"items": {
"type": "string",
"enum": ["Jam", "Pass", "JamAck"]
}
}
},
"additionalProperties": false
}
},
"$schema": "http://json-schema.org/draft-04/schema#"
}
"""
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('file', help="JSON file that contains the policy")
parser.add_argument('--out', help="Output file")
return parser
def check_policy(args):
for policy in args.policies:
if 'length' in policy and ('length_min' in policy or
'length_max' in policy):
print "[ERROR] Policy can not contain both exact and range "\
"quantifiers for length"
return False
if 'mcs' in policy and ('mcs_min' in policy or 'mcs_max' in policy):
print "[ERROR] Policy can not contain both exact and range "\
"quantifiers for mcs"
return False
return True
def make_canonical(args):
for policy in args.policies:
has_mcs = any([k.startswith('mcs') for k in policy])
has_length = any([k.startswith('length') for k in policy])
has_header = 'header' in policy
if has_mcs:
if 'mcs' in policy:
policy['mcs_min'] = policy['mcs']
policy['mcs_max'] = policy['mcs']
del policy['mcs']
if 'mcs_min' not in policy:
policy['mcs_min'] = 0
if 'mcs_max' not in policy:
policy['mcs_max'] = MCS_MAX
policy['mcs_weight'] = 3
if has_length:
policy['mcs_weight'] -= 1
if has_header:
policy['mcs_weight'] -= 1
else:
policy['mcs_min'] = MCS_MAX
policy['mcs_max'] = 0
policy['mcs_weight'] = 0
if has_length:
if 'length' in policy:
policy['length_min'] = policy['length']
policy['length_max'] = policy['length']
del policy['length']
if 'length_min' not in policy:
policy['length_min'] = 0
if 'length_max' not in policy:
policy['length_max'] = LENGTH_MAX
policy['length_weight'] = 3 - policy['mcs_weight']
if has_header:
policy['mcs_weight'] -= 1
else:
policy['length_min'] = LENGTH_MAX
policy['length_max'] = 0
policy['length_weight'] = 0
if has_header:
bytes = []
for s in policy['header'].split():
if s == 'xx':
bytes.append(0x1ff)
else:
bytes.append(int(s, 16))
policy['header'] = bytes
else:
policy['header'] = []
iter = itertools.cycle(policy['actions'])
for _ in range(MAX_ACTION_LEN-len(policy['actions'])):
policy['actions'].append(iter.next())
args.max_header_len = max([len(p['header']) for p in args.policies])
for policy in args.policies:
for _ in range(args.max_header_len - len(policy['header'])):
policy['header'].append(0x1ff)
cano_file = '_canonical'.join(os.path.splitext(args.file))
with open(cano_file, 'w') as f:
f.write(json.dumps(args.policies, indent=2))
def translate(args):
rate_data = []
len_data = []
header_data = []
action_data = []
for id, policy in enumerate(args.policies):
val = (id<<28) + (policy['mcs_min']<<6) + (policy['mcs_max']<<2) +\
(policy['mcs_weight']&0x3)
rate_data.append(val)
val = (id<<28) + (policy['length_min']<<14) +\
(policy['length_max']<<2) + (policy['length_weight']&0x3)
len_data.append(val)
for addr, b in enumerate(policy['header']):
val = (id<<28) + (addr<<23) + (b&0x1ff)
header_data.append(val)
for addr, a in enumerate(policy['actions']):
val = (id<<28) + (addr<<20)
if a == 'Jam':
val += 1
elif a == 'Pass':
val += 0
elif a == 'JamAck':
val += 2
action_data.append(val)
with open(args.out, 'w') as f:
f.write('0x%x\n' % (SR_RATE_FILTER))
f.write('0x%x\n' % (len(rate_data)))
for d in rate_data:
f.write('0x%08x\n' % (d))
f.write('0x%x\n' % (SR_LEN_FILTER))
f.write('0x%x\n' % (len(len_data)))
for d in len_data:
f.write('0x%08x\n' % (d))
f.write('0x%x\n' % (SR_HEADER_FILTER))
f.write('0x%x\n' % (len(header_data)))
for d in header_data:
f.write('0x%08x\n' % (d))
f.write('0x%x\n' % (SR_HEADER_LEN))
f.write('0x1\n')
f.write('0x%x\n' % (args.max_header_len))
f.write('0x%x\n' % (SR_JAM_POLICY))
f.write('0x%x\n' % (len(action_data)))
for d in action_data:
f.write('0x%08x\n' % (d))
print "Jam file written to %s" % (args.out)
def main():
args = arg_parser().parse_args()
if args.out is None:
args.out = "%s.jam" % (os.path.splitext(args.file)[0])
print "No output file specified, using %s" % (args.out)
schema = json.loads(POLICY_JSON_SCHEMA)
with open(args.file, 'r') as f:
args.policies = json.loads(f.read())
print "Validating policy ..."
jsonschema.validate(args.policies, schema)
if not check_policy(args):
return
print "Making canonical policy..."
make_canonical(args)
translate(args)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,63 @@
{
"title": "Jamming Policy Format Specification",
"type": "array",
"items": {
"$ref": "#/definitions/policy"
},
"minItems": 1,
"additionalProperties": false,
"definitions": {
"policy": {
"type": "object",
"properties": {
"length": {
"type": "integer",
"minimum": 0,
"maximum": 4095
},
"length_min": {
"type": "integer",
"minimum": 0,
"maximum": 4095
},
"length_max": {
"type": "integer",
"minimum": 0,
"maximum": 4095
},
"mcs": {
"type": "integer",
"minimum": 0,
"maximum": 7
},
"mcs_min": {
"type": "integer",
"minimum": 0,
"maximum": 7
},
"mcs_max": {
"type": "integer",
"minimum": 0,
"maximum": 7
},
"header": {
"type": "string",
"maxLength": 47
},
"actions": {
"type": "array",
"minItems": 1,
"maxItems": 256,
"items": {
"type": "string",
"enum": ["Jam", "Pass", "JamAck"]
}
}
},
"additionalProperties": false
}
},
"$schema": "http://json-schema.org/draft-04/schema#"
}

195
scripts/test.py Executable file
View File

@ -0,0 +1,195 @@
#!/usr/bin/env python
import os
import argparse
import scipy
import subprocess
import decode
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SIM_OUT_DIR = os.path.join(os.path.dirname(SCRIPT_DIR), 'sim_out')
def test():
parser = argparse.ArgumentParser()
parser.add_argument('file', help="Sample file to test.")
parser.add_argument('--no_sim', action='store_true', default=False)
parser.add_argument('--stop', type=int)
args = parser.parse_args()
with open(args.file, 'rb') as f:
samples = scipy.fromfile(f, dtype=scipy.int16)
samples = [complex(i, q) for i, q in zip(samples[::2], samples[1::2])]
print "Using file %s (%d samples)" % (args.file, len(samples))
memory_file = '%s.txt' % (os.path.splitext(args.file)[0])
if not os.path.isfile(memory_file) or\
os.path.getmtime(memory_file) < os.path.getmtime(args.file):
subprocess.check_call('python %s/bin_to_mem.py %s --out %s' %\
(SCRIPT_DIR, args.file, memory_file), shell=True)
if not args.no_sim:
if args.stop is None:
stop = len(samples)
else:
stop = min(args.stop, len(samples))
print "Stop after %d samples" % (stop)
try:
subprocess.check_call('rm -rfv %s/*' % (SIM_OUT_DIR), shell=True)
except:
pass
try:
subprocess.check_call(
'iverilog -DDEBUG_PRINT -DSAMPLE_FILE="\\"%s\\"" -DNUM_SAMPLE=%d -c dot11_modules.list dot11_tb.v'
% (memory_file, stop), shell=True)
subprocess.check_call('vvp -n a.out', shell=True)
except KeyboardInterrupt:
try:
subprocess.check_call('pgrep -f "vvp" | xargs kill -9', shell=True)
except:
pass
return
with open('%s/signal_out.txt' % (SIM_OUT_DIR), 'r') as f:
signal_out = [c[::-1] for c in f.read().strip().split()]
deinterleave_out = []
with open('%s/deinterleave_out.txt' % (SIM_OUT_DIR), 'r') as f:
deinterleave_out = f.read().replace('\n', '')
with open('%s/descramble_out.txt' % (SIM_OUT_DIR), 'r') as f:
descramble_out = f.read().replace('\n', '')
print "Decoding..."
expected_signal, cons, expected_demod_out, expected_deinterleave_out,\
expected_conv_out, expected_descramble_out, expected_byte_out, pkt =\
decode.Decoder(args.file, skip=0).decode_next()
if not expected_signal.ht:
signal_error = False
for idx, attr in enumerate(['rate_bits', 'rsvd', 'len_bits',
'parity_bits', 'tail_bits']):
if getattr(expected_signal, attr) == signal_out[idx]:
print "Signal.%s works" % (attr)
else:
print "Wrong Signal.%s: expect %s, got %s" %\
(attr, getattr(expected_signal, attr), signal_out[idx])
signal_error = True
if signal_error:
return
else:
print "== DECODE SIGNAL WORKS =="
if expected_signal.ht:
n_bpsc, n_cbps, n_dbps = decode.HT_MCS_PARAMETERS[expected_signal.mcs]
else:
n_bpsc, n_cbps, n_dbps = decode.RATE_PARAMETERS[expected_signal.rate]
# swap pos/neg sub carriers
expected_demod_out = ''.join([str(b) for b in expected_demod_out])
temp = []
for i in range(0, len(expected_demod_out), n_cbps):
temp.extend(expected_demod_out[i+n_cbps/2:i+n_cbps])
temp.extend(expected_demod_out[i:i+n_cbps/2])
expected_demod_out = ''.join(temp)
expected_deinterleave_out = ''.join([str(b) for b in expected_deinterleave_out])
expected_conv_out = ''.join([str(b) for b in expected_conv_out])
expected_descramble_out = ''.join([str(b) for b in expected_descramble_out])
with open('%s/conv_out.txt' % (SIM_OUT_DIR), 'r') as f:
conv_out = f.read().replace('\n', '')
demod_out = []
with open('%s/demod_out.txt' % (SIM_OUT_DIR), 'r') as f:
for line in f.readlines():
demod_out.append(line.strip()[-n_bpsc:][::-1])
demod_out = ''.join(demod_out)
num_symbol = min(len(demod_out)/n_cbps, len(expected_demod_out)/n_cbps)
print "%d OFDM symbols" % (num_symbol)
print "Checking DEMOD.."
error = False
for idx in range(num_symbol):
expected = expected_demod_out[idx*n_cbps:(idx+1)*n_cbps]
got = demod_out[idx*n_cbps:(idx+1)*n_cbps]
print "%10s: %s" % ("Expected", expected)
print "%10s: %s" % ("Got", got)
if expected != got:
print "Demod error at SYM %d, diff: %d" %\
(idx, len([i for i in range(len(got)) if expected[i] != got[i]]))
error = True
if not error:
print "DEMOD works!"
print "Checking DEINTER..."
error = False
for idx in range(num_symbol):
expected = expected_deinterleave_out[idx*n_cbps:(idx+1)*n_cbps]
got = deinterleave_out[idx*n_cbps:(idx+1)*n_cbps]
print "%10s: %s" % ("Expected", expected)
print "%10s: %s" % ("Got", got)
if expected != got:
print "Deinter error at SYM %d, diff: %d" %\
(idx, len([i for i in range(len(got)) if expected[i] != got[i]]))
error = True
if not error:
print "DEINTER works!"
print "Checking CONV..."
error = False
for idx in range(num_symbol):
expected = expected_conv_out[idx*n_dbps:(idx+1)*n_dbps]
got = conv_out[idx*n_dbps:(idx+1)*n_dbps]
print "%10s: %s" % ("Expected", expected)
print "%10s: %s" % ("Got", got)
if expected != got:
print "Convolutional decoder error at symbol %d (diff %d)" %\
(idx, len([i for i in range(len(got)) if expected[i] != got[i]]))
error = True
if not error:
print "CONV works!"
descramble_out = '0'*7 + descramble_out # compensate for the first 7 bits
print "Checking DESCRAMBLE..."
error = False
for idx in range(num_symbol):
expected = expected_descramble_out[idx*n_dbps:(idx+1)*n_dbps]
got = descramble_out[idx*n_dbps:(idx+1)*n_dbps]
print "%10s: %s" % ("Expected", expected)
print "%10s: %s" % ("Got", got)
if expected != got:
print "Descramble error at SYM %d, diff: %d" %\
(idx, len([i for i in range(len(got)) if expected[i] != got[i]]))
error = True
if not error:
print "DESCRAMBLE works!"
with open('%s/byte_out.txt' % (SIM_OUT_DIR), 'r') as f:
byte_out = [int(b, 16) for b in f.read().strip().split('\n')]
print "Checking BYTE..."
error = False
for idx in range(min(len(byte_out), len(expected_byte_out))):
expected = expected_byte_out[idx]
got = byte_out[idx]
print "%10s: %02x" % ("Expected", expected)
print "%10s: %02x" % ("Got", got)
if expected != got:
print "BYTE error"
error = True
if not error:
print "BYTE works!"
if __name__ == '__main__':
test()