mirror of
https://github.com/saitohirga/WSJT-X.git
synced 2024-11-03 16:01:18 -05:00
b22f1c6d2e
git-svn-id: svn+ssh://svn.code.sf.net/p/wsjt/wsjt/branches/wsjtx@6353 ab8295b8-cf94-4d9e-aec4-7959e3be5d79
2227 lines
52 KiB
Plaintext
2227 lines
52 KiB
Plaintext
#LyX 2.1 created this file. For more info see http://www.lyx.org/
|
|
\lyxformat 474
|
|
\begin_document
|
|
\begin_header
|
|
\textclass paper
|
|
\use_default_options true
|
|
\maintain_unincluded_children false
|
|
\language english
|
|
\language_package default
|
|
\inputencoding auto
|
|
\fontencoding global
|
|
\font_roman default
|
|
\font_sans default
|
|
\font_typewriter default
|
|
\font_math auto
|
|
\font_default_family default
|
|
\use_non_tex_fonts false
|
|
\font_sc false
|
|
\font_osf false
|
|
\font_sf_scale 100
|
|
\font_tt_scale 100
|
|
\graphics default
|
|
\default_output_format default
|
|
\output_sync 0
|
|
\bibtex_command default
|
|
\index_command default
|
|
\float_placement H
|
|
\paperfontsize 12
|
|
\spacing onehalf
|
|
\use_hyperref false
|
|
\papersize default
|
|
\use_geometry true
|
|
\use_package amsmath 1
|
|
\use_package amssymb 1
|
|
\use_package cancel 1
|
|
\use_package esint 1
|
|
\use_package mathdots 1
|
|
\use_package mathtools 1
|
|
\use_package mhchem 1
|
|
\use_package stackrel 1
|
|
\use_package stmaryrd 1
|
|
\use_package undertilde 1
|
|
\cite_engine basic
|
|
\cite_engine_type default
|
|
\biblio_style plain
|
|
\use_bibtopic false
|
|
\use_indices false
|
|
\paperorientation portrait
|
|
\suppress_date false
|
|
\justification true
|
|
\use_refstyle 1
|
|
\index Index
|
|
\shortcut idx
|
|
\color #008000
|
|
\end_index
|
|
\leftmargin 1in
|
|
\topmargin 1in
|
|
\rightmargin 1in
|
|
\bottommargin 1in
|
|
\secnumdepth 3
|
|
\tocdepth 3
|
|
\paragraph_separation indent
|
|
\paragraph_indentation default
|
|
\quotes_language english
|
|
\papercolumns 1
|
|
\papersides 1
|
|
\paperpagestyle default
|
|
\tracking_changes false
|
|
\output_changes false
|
|
\html_math_output 0
|
|
\html_css_as_file 0
|
|
\html_be_strict false
|
|
\end_header
|
|
|
|
\begin_body
|
|
|
|
\begin_layout Title
|
|
Open Source Soft-Decision Decoder for the JT65 (63,12) Reed-Solomon code
|
|
\end_layout
|
|
|
|
\begin_layout Author
|
|
Steven J.
|
|
Franke, K9AN and Joseph H.
|
|
Taylor, K1JT
|
|
\end_layout
|
|
|
|
\begin_layout Section
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "sec:Introduction-and-Motivation"
|
|
|
|
\end_inset
|
|
|
|
Introduction and Motivation
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The JT65 protocol has revolutionized amateur-radio weak-signal communication
|
|
by enabling operators with small or compromise antennas and relatively
|
|
low-power transmitters to communicate over propagation paths not usable
|
|
with traditional technologies.
|
|
The protocol was developed in 2003 for Earth-Moon-Earth (EME, or
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
moonbounce
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
) communication, where the scattered return signals are always weak.
|
|
It was soon found that JT65 also enables worldwide communication on the
|
|
HF bands with low power, modest antennas, and efficient spectral usage.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
A major reason for the success and popularity of JT65 is its use of a strong
|
|
error-correction code: a short block-length, low-rate Reed-Solomon code
|
|
based on a 64-symbol alphabet.
|
|
Until now, nearly all programs implementing JT65 have used the patented
|
|
Koetter-Vardy (KV) algebraic soft-decision decoder
|
|
\begin_inset CommandInset citation
|
|
LatexCommand cite
|
|
key "kv2001"
|
|
|
|
\end_inset
|
|
|
|
, licensed to and implemented by K1JT as a closed-source executable for
|
|
use only in amateur radio applications.
|
|
Since 2001 the KV decoder has been considered the best available soft-decision
|
|
decoder for Reed Solomon codes.
|
|
We describe here a new open-source alternative called the Franke-Taylor
|
|
(FT, or K9AN-K1JT) algorithm.
|
|
It is conceptually simple, built around the well-known Berlekamp-Massey
|
|
errors-and-erasures algorithm, and in this application it performs even
|
|
better than the KV decoder.
|
|
The FT algorithm is implemented in the popular program
|
|
\emph on
|
|
WSJT-X
|
|
\emph default
|
|
, widely used for amateur weak-signal communication with JT65 and other
|
|
specialized digital modes.
|
|
The program is freely available and licensed under the GNU General Public
|
|
License.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The JT65 protocol specifies transmissions that normally start one second
|
|
into a UTC minute and last for 46.8 seconds.
|
|
Receiving software therefore has up to several seconds to decode a message,
|
|
before the operator sends a reply at the start of the next minute.
|
|
With today's personal computers, this relatively long time available for
|
|
decoding a short message encourages experimentation with decoders of high
|
|
computational complexity.
|
|
As a result, on a typical fading channel the FT algorithm can extend the
|
|
decoding threshold by many dB over the hard-decision Berlekamp-Massey decoder,
|
|
and by a meaningful amount over the KV decoder.
|
|
In addition to its excellent performance, the new algorithm has other desirable
|
|
properties, not least of which is its conceptual simplicity.
|
|
Decoding performance and complexity scale in a convenient way, providing
|
|
steadily increasing soft-decision decoding gain as a tunable computational
|
|
complexity parameter is increased over more than 5 orders of magnitude.
|
|
Appreciable gain is available from our decoder even on very simple (and
|
|
relatively slow) computers.
|
|
On the other hand, because the algorithm benefits from a large number of
|
|
independent decoding trials, further performance gains should be achievable
|
|
through parallelization on high-performance computers.
|
|
\end_layout
|
|
|
|
\begin_layout Section
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "sec:JT65-messages-and"
|
|
|
|
\end_inset
|
|
|
|
JT65 messages and Reed Solomon Codes
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
JT65 message frames consist of a short compressed message encoded for transmissi
|
|
on with a Reed-Solomon code.
|
|
Reed-Solomon codes are block codes characterized by
|
|
\begin_inset Formula $n$
|
|
\end_inset
|
|
|
|
, the length of their codewords,
|
|
\begin_inset Formula $k$
|
|
\end_inset
|
|
|
|
, the number of message symbols conveyed by the codeword, and the number
|
|
of possible values for each symbol in the codewords.
|
|
The codeword length and the number of message symbols are specified with
|
|
the notation
|
|
\begin_inset Formula $(n,k)$
|
|
\end_inset
|
|
|
|
.
|
|
JT65 uses a (63,12) Reed-Solomon code with 64 possible values for each
|
|
symbol.
|
|
Each of the 12 message symbols represents
|
|
\begin_inset Formula $\log_{2}64=6$
|
|
\end_inset
|
|
|
|
message bits.
|
|
The source-encoded messages conveyed by a 63-symbol JT65 frame thus consist
|
|
of 72 information bits.
|
|
The JT65 code is systematic, which means that the 12 message symbols are
|
|
embedded in the codeword without modification and another 51 parity symbols
|
|
derived from the message symbols are added to form a codeword of 63 symbols.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
In coding theory the concept of Hamming distance is used as a measure of
|
|
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
distance
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
between different codewords, or between a received word and a codeword.
|
|
Hamming distance is the number of code symbols that differ in two words
|
|
being compared.
|
|
Reed-Solomon codes have minimum Hamming distance
|
|
\begin_inset Formula $d$
|
|
\end_inset
|
|
|
|
, where
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
d=n-k+1.\label{eq:minimum_distance}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
The minimum Hamming distance of the JT65 code is
|
|
\begin_inset Formula $d=52$
|
|
\end_inset
|
|
|
|
, which means that any particular codeword differs from all other codewords
|
|
in at least 52 symbol positions.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Given a received word containing some incorrect symbols (errors), the received
|
|
word can be decoded into the correct codeword using a deterministic, algebraic
|
|
algorithm provided that no more than
|
|
\begin_inset Formula $t$
|
|
\end_inset
|
|
|
|
symbols were received incorrectly, where
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
t=\left\lfloor \frac{n-k}{2}\right\rfloor .\label{eq:t}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
For the JT65 code
|
|
\begin_inset Formula $t=25$
|
|
\end_inset
|
|
|
|
, so it is always possible to decode a received word having 25 or fewer
|
|
symbol errors.
|
|
Any one of several well-known algebraic algorithms, such as the widely
|
|
used Berlekamp-Massey (BM) algorithm, can carry out the decoding.
|
|
Two steps are necessarily involved in this process.
|
|
We must (1) determine which symbols were received incorrectly, and (2)
|
|
find the correct value of the incorrect symbols.
|
|
If we somehow know that certain symbols are incorrect, that information
|
|
can be used to reduce the work involved in step 1 and allow step 2 to correct
|
|
more than
|
|
\begin_inset Formula $t$
|
|
\end_inset
|
|
|
|
errors.
|
|
In the unlikely event that the location of every error is known and if
|
|
no correct symbols are accidentally labeled as errors, the BM algorithm
|
|
can correct up to
|
|
\begin_inset Formula $d-1=n-k$
|
|
\end_inset
|
|
|
|
errors.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The FT algorithm creates lists of symbols suspected of being incorrect and
|
|
sends them to the BM decoder.
|
|
Symbols flagged in this way are called
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
erasures,
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
while other incorrect symbols will be called
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
errors.
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
With perfect erasure information up to 51 incorrect symbols can be corrected
|
|
for the JT65 code.
|
|
Imperfect erasure information means that some erased symbols may be correct,
|
|
and some other symbols in error.
|
|
If
|
|
\begin_inset Formula $s$
|
|
\end_inset
|
|
|
|
symbols are erased and the remaining
|
|
\begin_inset Formula $n-s$
|
|
\end_inset
|
|
|
|
symbols contain
|
|
\begin_inset Formula $e$
|
|
\end_inset
|
|
|
|
errors, the BM algorithm can find the correct codeword as long as
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
s+2e\le d-1.\label{eq:erasures_and_errors}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
If
|
|
\begin_inset Formula $s=0$
|
|
\end_inset
|
|
|
|
, the decoder is said to be an
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
errors-only
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
decoder.
|
|
If
|
|
\begin_inset Formula $0<s\le d-1$
|
|
\end_inset
|
|
|
|
, the decoder is called an
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
errors-and-erasures
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
decoder.
|
|
The possibility of doing errors-and-erasures decoding lies at the heart
|
|
of the FT algorithm.
|
|
On that foundation we have built a capability for using
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
soft
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
information on the reliability of individual symbols, thereby producing
|
|
a soft-decision decoder.
|
|
\end_layout
|
|
|
|
\begin_layout Section
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "sec:Statistical Framework"
|
|
|
|
\end_inset
|
|
|
|
Statistical Framework
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The FT algorithm uses the estimated quality of received symbols to generate
|
|
lists of symbols considered likely to be in error, thus enabling decoding
|
|
of received words with more than 25 errors using the errors-and-erasures
|
|
capability of the BM decoder.
|
|
Algorithms of this type are generally called
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
reliability based
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
or
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
probabilistic
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
decoding methods
|
|
\begin_inset CommandInset citation
|
|
LatexCommand cite
|
|
after "Chapter 10"
|
|
key "key-1"
|
|
|
|
\end_inset
|
|
|
|
.
|
|
Such algorithms involve some amount of educating guessing about which received
|
|
symbols are in error or, alternatively, about which received symbols are
|
|
correct.
|
|
The guesses are informed by
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
soft-symbol
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
quality metrics associated with the received symbols.
|
|
To illustrate why it is absolutely essential to use such soft-symbol informatio
|
|
n in these algorithms it helps to consider what would happen if we tried
|
|
to use completely random guesses, ignoring any available soft-symbol informatio
|
|
n.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
As a specific example, consider a received JT65 word with 23 correct symbols
|
|
and 40 errors.
|
|
We do not know which symbols are in error.
|
|
Suppose that the decoder randomly selects
|
|
\begin_inset Formula $s=40$
|
|
\end_inset
|
|
|
|
symbols for erasure, leaving 23 unerased symbols.
|
|
According to Eq.
|
|
(
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "eq:erasures_and_errors"
|
|
|
|
\end_inset
|
|
|
|
), the BM decoder can successfully decode this word as long as
|
|
\begin_inset Formula $e$
|
|
\end_inset
|
|
|
|
, the number of errors present in the 23 unerased symbols, is 5 or less.
|
|
The number of errors captured in the set of 40 erased symbols must therefore
|
|
be at least 35.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The probability of selecting some particular number of incorrect symbols
|
|
in a randomly selected subset of received symbols is governed by the hypergeome
|
|
tric probability distribution.
|
|
Let us define
|
|
\begin_inset Formula $N$
|
|
\end_inset
|
|
|
|
as the number of symbols from which erasures will be selected,
|
|
\begin_inset Formula $X$
|
|
\end_inset
|
|
|
|
as the number of incorrect symbols in the set of
|
|
\begin_inset Formula $N$
|
|
\end_inset
|
|
|
|
symbols, and
|
|
\begin_inset Formula $x$
|
|
\end_inset
|
|
|
|
as the number of errors in the symbols actually erased.
|
|
In an ensemble of many received words
|
|
\begin_inset Formula $X$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $x$
|
|
\end_inset
|
|
|
|
will be random variables but for this example we will assume that
|
|
\begin_inset Formula $X$
|
|
\end_inset
|
|
|
|
is known and that only
|
|
\begin_inset Formula $x$
|
|
\end_inset
|
|
|
|
is random.
|
|
The conditional probability mass function for
|
|
\begin_inset Formula $x$
|
|
\end_inset
|
|
|
|
with stated values of
|
|
\begin_inset Formula $N$
|
|
\end_inset
|
|
|
|
,
|
|
\begin_inset Formula $X$
|
|
\end_inset
|
|
|
|
, and
|
|
\begin_inset Formula $s$
|
|
\end_inset
|
|
|
|
may be written as
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
P(x=\epsilon|N,X,s)=\frac{\binom{X}{\epsilon}\binom{N-X}{s-\epsilon}}{\binom{N}{s}}\label{eq:hypergeometric_pdf}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
where
|
|
\begin_inset Formula $\binom{n}{k}=\frac{n!}{k!(n-k)!}$
|
|
\end_inset
|
|
|
|
is the binomial coefficient.
|
|
The binomial coefficient can be calculated using the function
|
|
\family typewriter
|
|
nchoosek(n,k)
|
|
\family default
|
|
in the interpreted language GNU Octave, or with one of many free online
|
|
calculators.
|
|
The hypergeometric probability mass function defined in Eq.
|
|
(
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "eq:hypergeometric_pdf"
|
|
|
|
\end_inset
|
|
|
|
) is available in GNU Octave as function
|
|
\family typewriter
|
|
hygepdf
|
|
\family default
|
|
(x,N,X,s).
|
|
The cumulative probability that at least
|
|
\begin_inset Formula $\epsilon$
|
|
\end_inset
|
|
|
|
errors are captured in a subset of
|
|
\begin_inset Formula $s$
|
|
\end_inset
|
|
|
|
erased symbols selected from a group of
|
|
\begin_inset Formula $N$
|
|
\end_inset
|
|
|
|
symbols containing
|
|
\begin_inset Formula $X$
|
|
\end_inset
|
|
|
|
errors is
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
P(x\ge\epsilon|N,X,s)=\sum_{j=\epsilon}^{s}P(x=j|N,X,s).\label{eq:cumulative_prob}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Paragraph
|
|
Example 1:
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Suppose a received word contains
|
|
\begin_inset Formula $X=40$
|
|
\end_inset
|
|
|
|
incorrect symbols.
|
|
In an attempt to decode using an errors-and-erasures decoder,
|
|
\begin_inset Formula $s=40$
|
|
\end_inset
|
|
|
|
symbols are randomly selected for erasure from the full set of
|
|
\begin_inset Formula $N=n=63$
|
|
\end_inset
|
|
|
|
symbols.
|
|
The probability that
|
|
\begin_inset Formula $x=35$
|
|
\end_inset
|
|
|
|
of the erased symbols are actually incorrect is then
|
|
\begin_inset Formula
|
|
\[
|
|
P(x=35)=\frac{\binom{40}{35}\binom{63-40}{40-35}}{\binom{63}{40}}\simeq2.4\times10^{-7}.
|
|
\]
|
|
|
|
\end_inset
|
|
|
|
Similarly, the probability that
|
|
\begin_inset Formula $x=36$
|
|
\end_inset
|
|
|
|
of the erased symbols are incorrect is
|
|
\begin_inset Formula
|
|
\[
|
|
P(x=36)\simeq8.6\times10^{-9}.
|
|
\]
|
|
|
|
\end_inset
|
|
|
|
Since the probability of erasing 36 errors is so much smaller than that
|
|
for erasing 35 errors, we may safely conclude that the probability of randomly
|
|
choosing an erasure vector that can decode the received word is approximately
|
|
|
|
\begin_inset Formula $P(x=35)\simeq2.4\times10^{-7}$
|
|
\end_inset
|
|
|
|
.
|
|
The odds of producing a valid codeword on the first try are very poor,
|
|
about 1 in 4 million.
|
|
\end_layout
|
|
|
|
\begin_layout Paragraph
|
|
Example 2:
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
How might we best choose the number of symbols to erase, in order to maximize
|
|
the probability of successful decoding? By exhaustive search over all possible
|
|
values up to
|
|
\begin_inset Formula $s=51$
|
|
\end_inset
|
|
|
|
, it turns out that for
|
|
\begin_inset Formula $X=40$
|
|
\end_inset
|
|
|
|
the best strategy is to erase
|
|
\begin_inset Formula $s=45$
|
|
\end_inset
|
|
|
|
symbols.
|
|
Decoding will then be assured if the set of erased symbols contains at
|
|
least 37 errors.
|
|
With
|
|
\begin_inset Formula $N=63$
|
|
\end_inset
|
|
|
|
,
|
|
\begin_inset Formula $X=40$
|
|
\end_inset
|
|
|
|
, and
|
|
\begin_inset Formula $s=45$
|
|
\end_inset
|
|
|
|
, the probability of successful decode in a single try is
|
|
\begin_inset Formula
|
|
\[
|
|
P(x\ge37)\simeq1.9\times10^{-6}.
|
|
\]
|
|
|
|
\end_inset
|
|
|
|
This probability is about 8 times higher than the probability of success
|
|
when only 40 symbols were erased.
|
|
Nevertheless, the odds of successfully decoding on the first try are still
|
|
only about 1 in 500,000.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Paragraph
|
|
Example 3:
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Examples 1 and 2 show that a random strategy for selecting symbols to erase
|
|
is unlikely to be successful unless we are prepared to wait a long time
|
|
for an answer.
|
|
So let's modify the strategy to tip the odds in our favor.
|
|
Let the received word contain
|
|
\begin_inset Formula $X=40$
|
|
\end_inset
|
|
|
|
incorrect symbols, as before, but suppose we know that 10 received symbols
|
|
are significantly more reliable than the other 53.
|
|
We might therefore protect the 10 most reliable symbols from erasure, selecting
|
|
erasures from the smaller set of
|
|
\begin_inset Formula $N=53$
|
|
\end_inset
|
|
|
|
less reliable symbols.
|
|
If
|
|
\begin_inset Formula $s=45$
|
|
\end_inset
|
|
|
|
symbols are chosen randomly for erasure in this way, it is still necessary
|
|
for the erased symbols to include at least 37 errors, as in Example 2.
|
|
However, the probabilities are now much more favorable: with
|
|
\begin_inset Formula $N=53$
|
|
\end_inset
|
|
|
|
,
|
|
\begin_inset Formula $X=40$
|
|
\end_inset
|
|
|
|
, and
|
|
\begin_inset Formula $s=45$
|
|
\end_inset
|
|
|
|
, Eq.
|
|
(
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "eq:hypergeometric_pdf"
|
|
|
|
\end_inset
|
|
|
|
) yields
|
|
\begin_inset Formula $P(x\ge37)=0.016$
|
|
\end_inset
|
|
|
|
.
|
|
Even better odds are obtained by choosing
|
|
\begin_inset Formula $s=47$
|
|
\end_inset
|
|
|
|
, which requires
|
|
\begin_inset Formula $x\ge38$
|
|
\end_inset
|
|
|
|
.
|
|
With
|
|
\begin_inset Formula $N=53$
|
|
\end_inset
|
|
|
|
,
|
|
\begin_inset Formula $X=40$
|
|
\end_inset
|
|
|
|
, and
|
|
\begin_inset Formula $s=47$
|
|
\end_inset
|
|
|
|
,
|
|
\begin_inset Formula $P(x\ge38)=0.027$
|
|
\end_inset
|
|
|
|
.
|
|
The odds for producing a codeword on the first try are now about 1 in 38.
|
|
A few hundred independently randomized tries would be enough to all-but-guarant
|
|
ee production of a valid codeword by the BM decoder.
|
|
\end_layout
|
|
|
|
\begin_layout Section
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "sec:The-decoding-algorithm"
|
|
|
|
\end_inset
|
|
|
|
The Franke-Taylor decoding algorithm
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Example 3 shows how statistical information about symbol quality should
|
|
make it possible to decode received frames having a large number of errors.
|
|
In practice the number of errors in the received word is unknown, so we
|
|
use a stochastic algorithm to assign high erasure probability to low-quality
|
|
symbols and relatively low probability to high-quality symbols.
|
|
As illustrated by Example 3, a good choice of erasure probabilities can
|
|
increase by many orders of magnitude the chance of producing a codeword.
|
|
Note that at this stage we must treat any codeword obtained by errors-and-erasu
|
|
res decoding as no more than a
|
|
\emph on
|
|
candidate
|
|
\emph default
|
|
.
|
|
Our next task is to find a metric that can reliably select one of many
|
|
proffered candidates as the codeword actually transmitted.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The FT algorithm uses quality indices made available by a noncoherent 64-FSK
|
|
demodulator.
|
|
The demodulator computes the power spectrum
|
|
\begin_inset Formula $S(i,j)$
|
|
\end_inset
|
|
|
|
for each signalling interval; for the JT65 protocol the frequency index
|
|
and symbol index have values
|
|
\begin_inset Formula $i=$
|
|
\end_inset
|
|
|
|
1 to 64 and
|
|
\begin_inset Formula $j=$
|
|
\end_inset
|
|
|
|
1 to 63.
|
|
The most likely value for symbol
|
|
\begin_inset Formula $j$
|
|
\end_inset
|
|
|
|
is taken as the frequency bin with largest signal-plus-noise power over
|
|
all values of
|
|
\begin_inset Formula $i$
|
|
\end_inset
|
|
|
|
.
|
|
The fractions of total power in the two bins containing the largest and
|
|
second-largest powers, denoted respectively by
|
|
\begin_inset Formula $p_{1}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $p_{2}$
|
|
\end_inset
|
|
|
|
, are computed and passed from demodulator to decoder as soft-symbol information.
|
|
The FT decoder derives two metrics from
|
|
\begin_inset Formula $p_{1}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $p_{2}$
|
|
\end_inset
|
|
|
|
, namely
|
|
\end_layout
|
|
|
|
\begin_layout Itemize
|
|
\begin_inset Formula $p_{1}$
|
|
\end_inset
|
|
|
|
-rank: the rank
|
|
\begin_inset Formula $\{1,2,\ldots,63\}$
|
|
\end_inset
|
|
|
|
of the symbol's fractional power
|
|
\begin_inset Formula $p_{1,\,j}$
|
|
\end_inset
|
|
|
|
in a sorted list of
|
|
\begin_inset Formula $p_{1}$
|
|
\end_inset
|
|
|
|
values.
|
|
High ranking symbols have larger signal-to-noise ratio than those with
|
|
lower rank.
|
|
\end_layout
|
|
|
|
\begin_layout Itemize
|
|
\begin_inset Formula $p_{2}/p_{1}$
|
|
\end_inset
|
|
|
|
: when
|
|
\begin_inset Formula $p_{2}/p_{1}$
|
|
\end_inset
|
|
|
|
is not small compared to 1, the most likely symbol value is only slightly
|
|
more reliable than the second most likely one.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
We use an empirical table of symbol error probabilities derived from a large
|
|
dataset of received words that were successfully decoded.
|
|
The table provides an estimate of the
|
|
\emph on
|
|
a priori
|
|
\emph default
|
|
probability of symbol error based on the
|
|
\begin_inset Formula $p_{1}$
|
|
\end_inset
|
|
|
|
-rank and
|
|
\begin_inset Formula $p_{2}/p_{1}$
|
|
\end_inset
|
|
|
|
metrics.
|
|
These probabilities are close to 1 for low-quality symbols and close to
|
|
0 for high-quality symbols.
|
|
Recall from Examples 2 and 3 that candidate codewords are produced with
|
|
higher probability when
|
|
\begin_inset Formula $s>X$
|
|
\end_inset
|
|
|
|
.
|
|
Correspondingly, the FT algorithm works best when the probability of erasing
|
|
a symbol is somewhat larger than the probability that the symbol is incorrect.
|
|
For the JT65 code we found empirically that good decoding performance is
|
|
obtained when the symbol erasure probability is about 1.3 times the symbol
|
|
error probability.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The FT algorithm tries successively to decode the received word using independen
|
|
t educated guesses to select symbols for erasure.
|
|
For each iteration a stochastic erasure vector is generated based on the
|
|
symbol erasure probabilities.
|
|
The erasure vector is sent to the BM decoder along with the full set of
|
|
63 hard-decision symbol values.
|
|
When the BM decoder finds a candidate codeword it is assigned a quality
|
|
metric
|
|
\begin_inset Formula $d_{s}$
|
|
\end_inset
|
|
|
|
, the soft distance between the received word and the codeword:
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
d_{s}=\sum_{j=1}^{n}\alpha_{j}\,(1+p_{1,\,j}).\label{eq:soft_distance}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
Here
|
|
\begin_inset Formula $\alpha_{j}=0$
|
|
\end_inset
|
|
|
|
if received symbol
|
|
\begin_inset Formula $j$
|
|
\end_inset
|
|
|
|
is the same as the corresponding symbol in the codeword,
|
|
\begin_inset Formula $\alpha_{j}=1$
|
|
\end_inset
|
|
|
|
if the received symbol and codeword symbol are different, and
|
|
\begin_inset Formula $p_{1,\,j}$
|
|
\end_inset
|
|
|
|
is the fractional power associated with received symbol
|
|
\begin_inset Formula $j$
|
|
\end_inset
|
|
|
|
.
|
|
Think of the soft distance as made up of two terms: the first is the Hamming
|
|
distance between the received word and the codeword, and the second ensures
|
|
that if two candidate codewords have the same Hamming distance from the
|
|
received word, a smaller soft distance will be assigned to the one where
|
|
differences occur in symbols of lower estimated reliability.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
In practice we find that
|
|
\begin_inset Formula $d_{s}$
|
|
\end_inset
|
|
|
|
can reliably indentify the correct codeword if the signal-to-noise ratio
|
|
for individual symbols is greater than about 4 in linear power units.
|
|
We also find that significantly weaker signals can be decoded by using
|
|
soft-symbol information beyond that contained in
|
|
\begin_inset Formula $p_{1}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $p_{2}$
|
|
\end_inset
|
|
|
|
.
|
|
To this end we define an additional metric
|
|
\begin_inset Formula $u$
|
|
\end_inset
|
|
|
|
, the average signal-plus-noise power in all symbols according to a candidate
|
|
codeword's symbol values:
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
u=\frac{1}{n}\sum_{j=1}^{n}S(c_{j},\,j).\label{eq:u-metric}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
Here the
|
|
\begin_inset Formula $c_{j}$
|
|
\end_inset
|
|
|
|
's are the symbol values for the candidate codeword being tested.
|
|
The correct JT65 codeword produces a value for
|
|
\begin_inset Formula $u$
|
|
\end_inset
|
|
|
|
equal to the average of
|
|
\begin_inset Formula $n=63$
|
|
\end_inset
|
|
|
|
bins containing both signal and noise power.
|
|
Incorrect codewords have at most
|
|
\begin_inset Formula $k-1=11$
|
|
\end_inset
|
|
|
|
such bins and at least
|
|
\begin_inset Formula $n-k+1=52$
|
|
\end_inset
|
|
|
|
bins containing noise only.
|
|
Thus, if the spectral array
|
|
\begin_inset Formula $S(i,\,j)$
|
|
\end_inset
|
|
|
|
has been normalized so that its median value (essentially the average noise
|
|
level) is unity,
|
|
\begin_inset Formula $u$
|
|
\end_inset
|
|
|
|
for the correct codeword has expectation value (average over many random
|
|
realizations)
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
\bar{u}_{1}=1+y,\label{eq:u1-exp}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
where
|
|
\begin_inset Formula $y$
|
|
\end_inset
|
|
|
|
is the signal-to-noise ratio in linear power units.
|
|
If we assume Gaussian statistics and a large number of trials, the standard
|
|
deviation of measured values of
|
|
\begin_inset Formula $u_{1}$
|
|
\end_inset
|
|
|
|
is
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
\sigma_{1}=\left(\frac{1+2y}{n}\right)^{1/2}.\label{eq:sigma1}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
In contrast, worst-case incorrect codewords will yield
|
|
\begin_inset Formula $u$
|
|
\end_inset
|
|
|
|
-metrics with expectation value and standard deviation given by
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
\bar{u}_{2}=1+\left(\frac{k-1}{n}\right)y,\label{eq:u2-exp}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
\sigma_{2}=\frac{1}{n}\left[n+2y(k-1)\right]^{1/2}.\label{eq:sigma2}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
If tests on a number of tested candidate codewords yield largest and second-larg
|
|
est metrics
|
|
\begin_inset Formula $u_{1}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $u_{2},$
|
|
\end_inset
|
|
|
|
respectively, we expect the ratio
|
|
\begin_inset Formula $r=u_{2}/u_{1}$
|
|
\end_inset
|
|
|
|
to be significantly smaller in cases where the candidate associated with
|
|
|
|
\begin_inset Formula $u_{1}$
|
|
\end_inset
|
|
|
|
is in fact the correct codeword.
|
|
On the other hand, if none of the tested candidates is correct,
|
|
\begin_inset Formula $r$
|
|
\end_inset
|
|
|
|
will likely be close to 1.
|
|
We therefore apply a ratio threshold test, say
|
|
\begin_inset Formula $r<r_{1}$
|
|
\end_inset
|
|
|
|
, to identify codewords with high probability of being correct.
|
|
As described in Section
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "sec:Theory,-Simulation,-and"
|
|
|
|
\end_inset
|
|
|
|
, we use simulations to set an empirical acceptance threshold
|
|
\begin_inset Formula $r_{1}$
|
|
\end_inset
|
|
|
|
that maximizes the probability of correct decodes while ensuring a low
|
|
rate of false decodes.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Technically the FT algorithm is a list decoder.
|
|
Among the list of candidate codewords found by the stochastic search algorithm,
|
|
only the one with the largest
|
|
\begin_inset Formula $u$
|
|
\end_inset
|
|
|
|
is retained.
|
|
As with all such algorithms, a stopping criterion is necessary.
|
|
FT accepts a codeword unconditionally if the Hamming distance
|
|
\begin_inset Formula $X$
|
|
\end_inset
|
|
|
|
and soft distance
|
|
\begin_inset Formula $d_{s}$
|
|
\end_inset
|
|
|
|
are less than specified limits
|
|
\begin_inset Formula $X_{0}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $d_{0}$
|
|
\end_inset
|
|
|
|
.
|
|
Secondary acceptance criteria
|
|
\begin_inset Formula $d_{s}<d_{1}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $r<r_{1}$
|
|
\end_inset
|
|
|
|
are used to validate additional codewords that did not pass the first test.
|
|
A timeout is used to limit the algorithm's execution time if no acceptable
|
|
codeword is found in a reasonable number of trials,
|
|
\begin_inset Formula $T$
|
|
\end_inset
|
|
|
|
.
|
|
Today's personal computers are fast enough that
|
|
\begin_inset Formula $T$
|
|
\end_inset
|
|
|
|
can be set as large as
|
|
\begin_inset Formula $10^{5},$
|
|
\end_inset
|
|
|
|
or even higher.
|
|
Pseudo-code for the FT algorithm is presented in an accompanying text box.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Float algorithm
|
|
wide false
|
|
sideways false
|
|
status open
|
|
|
|
\begin_layout Plain Layout
|
|
\begin_inset Caption Standard
|
|
|
|
\begin_layout Plain Layout
|
|
Pseudo-code for the FT algorithm.
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
For each received symbol, define the erasure probability as 1.3 times the
|
|
|
|
\emph on
|
|
a priori
|
|
\emph default
|
|
symbol-error probability determined from soft-symbol information
|
|
\begin_inset Formula $\{p_{1}\textrm{-rank},\,p_{2}/p_{1}\}$
|
|
\end_inset
|
|
|
|
.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
Make independent stochastic decisions about whether to erase each symbol
|
|
by using the symbol's erasure probability, allowing a maximum of 51 erasures.
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
Attempt errors-and-erasures decoding by using the BM algorithm and the set
|
|
of erasures determined in step 2.
|
|
If the BM decoder produces a candidate codeword, go to step 5.
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
If BM decoding was not successful, go to step 2.
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
Calculate the hard-decision Hamming distance
|
|
\begin_inset Formula $X$
|
|
\end_inset
|
|
|
|
between the candidate codeword and the received symbols, the corresponding
|
|
soft distance
|
|
\begin_inset Formula $d_{s}$
|
|
\end_inset
|
|
|
|
, and the quality metric
|
|
\begin_inset Formula $u$
|
|
\end_inset
|
|
|
|
.
|
|
If
|
|
\begin_inset Formula $u$
|
|
\end_inset
|
|
|
|
is the largest one encountered so far, preserve any previous value of
|
|
\begin_inset Formula $u_{1}$
|
|
\end_inset
|
|
|
|
as
|
|
\begin_inset Formula $u_{2}$
|
|
\end_inset
|
|
|
|
and then set
|
|
\begin_inset Formula $u_{1}=u$
|
|
\end_inset
|
|
|
|
.
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
If
|
|
\begin_inset Formula $X<X_{0}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $d_{s}<d_{0}$
|
|
\end_inset
|
|
|
|
, go to step 10.
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
If the number of trials is less than the timeout limit
|
|
\begin_inset Formula $T,$
|
|
\end_inset
|
|
|
|
go to 2.
|
|
|
|
\begin_inset Formula $ $
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
If
|
|
\begin_inset Formula $d_{s}<d_{1}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $r<r_{1},$
|
|
\end_inset
|
|
|
|
go to step 10.
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
Otherwise, declare decoding failure and exit.
|
|
\end_layout
|
|
|
|
\begin_layout Enumerate
|
|
An acceptable codeword has been found.
|
|
Declare a successful decode and return this codeword.
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Inspiration for the FT decoding algorithm came from a number of sources,
|
|
particularly references
|
|
\begin_inset CommandInset citation
|
|
LatexCommand cite
|
|
key "lhmg2010"
|
|
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset CommandInset citation
|
|
LatexCommand cite
|
|
key "lk2008"
|
|
|
|
\end_inset
|
|
|
|
and the textbook by Lin and Costello
|
|
\begin_inset CommandInset citation
|
|
LatexCommand cite
|
|
key "lc2004"
|
|
|
|
\end_inset
|
|
|
|
.
|
|
After developing this algorithm, we became aware that our approach is conceptua
|
|
lly similar to a
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
stochastic erasures-only list decoding algorithm
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
, described in reference
|
|
\begin_inset CommandInset citation
|
|
LatexCommand cite
|
|
key "ls2009"
|
|
|
|
\end_inset
|
|
|
|
.
|
|
The algorithm in
|
|
\begin_inset CommandInset citation
|
|
LatexCommand cite
|
|
key "ls2009"
|
|
|
|
\end_inset
|
|
|
|
is applied to higher-rate Reed-Solomon codes on a binary-input channel
|
|
with BPSK-modulated symbols.
|
|
Our 64-ary input channel with 64-FSK modulation required us to develop
|
|
unique methods for assigning erasure probabilities and for defining acceptance
|
|
criteria to select the best codeword from the list of candidates.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Section
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "sec:Hinted-Decoding"
|
|
|
|
\end_inset
|
|
|
|
Hinted Decoding
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The FT algorithm is completely general: with equal sensitivity it recovers
|
|
any one of the
|
|
\begin_inset Formula $2^{72}\approx4.7\times10^{21}$
|
|
\end_inset
|
|
|
|
different messages that can be transmitted with the JT65 protocol.
|
|
In some circumstances it's easy to imagine a
|
|
\emph on
|
|
much
|
|
\emph default
|
|
smaller list of messages (say, a few thousand messages or less) that we
|
|
can guess may be among the most likely ones to be received.
|
|
One such situation exists when making short ham-radio contacts that exchange
|
|
minimal information including callsigns, signal reports, perhaps Maidenhead
|
|
locators, and acknowledgments.
|
|
On the EME path or on a VHF or UHF band with limited geographical coverage,
|
|
the most likely received messages often originate from callsigns that have
|
|
been decoded before.
|
|
Saving a list of previously decoded callsigns and associated locators makes
|
|
it easy to generate lists of hypothetical messages and their corresponding
|
|
codewords at very little computational expense.
|
|
The resulting candidate codewords can be tested in the same way as those
|
|
generated by the probabilistic method described in Setcion
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "sec:The-decoding-algorithm"
|
|
|
|
\end_inset
|
|
|
|
.
|
|
We call this approach
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
hinted decoding;
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
it is sometimes referred to as the
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
deep search
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
algorithm.
|
|
In certain limited situations it can provide enhanced sensitivity for the
|
|
principal task of any decoder, namely to determine precisely what message
|
|
was sent.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
For hinted decoding we again invoke a ratio threshold test, but in this
|
|
case we use it to answer a more limited question.
|
|
Over the full list of messages considered likely, we want to know whether
|
|
|
|
\begin_inset Formula $r=u_{2}/u_{1}$
|
|
\end_inset
|
|
|
|
, the ratio of second-largest to largest
|
|
\begin_inset Formula $u$
|
|
\end_inset
|
|
|
|
-metric, is small enough for us to be confident the codeword associated
|
|
with
|
|
\begin_inset Formula $u_{1}$
|
|
\end_inset
|
|
|
|
is the one that was transmitted.
|
|
Once again we will set an empirical limit, say
|
|
\begin_inset Formula $r_{2},$
|
|
\end_inset
|
|
|
|
that is small enough to establish adequate confidence, while still ensuring
|
|
that false decodes are rare.
|
|
Because tested candidate codewords are drawn from a list typically no longer
|
|
than a few thousand, rather than
|
|
\begin_inset Formula $2^{72},$
|
|
\end_inset
|
|
|
|
|
|
\begin_inset Formula $r_{2}$
|
|
\end_inset
|
|
|
|
can be a more relaxed limit than the
|
|
\begin_inset Formula $r_{1}$
|
|
\end_inset
|
|
|
|
used in the FT algorithm.
|
|
For the limited subset of messages that operator experience suggests to
|
|
be likely, hinted decodes can be obtained at lower signal levels than required
|
|
for those obtained from the full universe of
|
|
\begin_inset Formula $2^{72}$
|
|
\end_inset
|
|
|
|
possible messages.
|
|
\end_layout
|
|
|
|
\begin_layout Section
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "sec:Theory,-Simulation,-and"
|
|
|
|
\end_inset
|
|
|
|
Decoder Performance Evaluation
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Comparisons of decoding performance are usually presented in the professional
|
|
literature as plots of word error rate versus
|
|
\begin_inset Formula $E_{b}/N_{0}$
|
|
\end_inset
|
|
|
|
, the ratio of the energy collected per information bit to the one-sided
|
|
noise power spectral density.
|
|
For weak-signal amateur radio work, performance is more conveniently presented
|
|
as the probability of successfully decoding a received word plotted against
|
|
signal-to-noise ratio in a 2500 Hz reference bandwidth,
|
|
\begin_inset Formula $\mathrm{SNR}{}_{2500}$
|
|
\end_inset
|
|
|
|
.
|
|
The relationship between
|
|
\begin_inset Formula $E_{b}/N_{o}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $\mathrm{SNR}{}_{2500}$
|
|
\end_inset
|
|
|
|
is described in Appendix
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "sec:Appendix:SNR"
|
|
|
|
\end_inset
|
|
|
|
.
|
|
Examples of both presentations are included in the following discussion,
|
|
where we describe simulations carried out to compare performance of FT
|
|
with other algorithms, and with theoretical expectations.
|
|
We have also used simulations to establish suitable default values for
|
|
the acceptance parameters
|
|
\begin_inset Formula $X_{0},$
|
|
\end_inset
|
|
|
|
|
|
\begin_inset Formula $d_{0},$
|
|
\end_inset
|
|
|
|
|
|
\begin_inset Formula $d_{1},$
|
|
\end_inset
|
|
|
|
|
|
\begin_inset Formula $r_{1},$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $r_{2}.$
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Subsection
|
|
Simulated results on the AWGN channel
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Results of simulations using the BM, FT, and KV decoding algorithms on the
|
|
JT65 code are presented in terms of word error rate versus
|
|
\begin_inset Formula $E_{b}/N_{o}$
|
|
\end_inset
|
|
|
|
in Figure
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "fig:bodide"
|
|
|
|
\end_inset
|
|
|
|
.
|
|
For these tests we generated at least 1000 signals at each signal-to-noise
|
|
ratio, assuming the additive white gaussian noise (AWGN) channel, and we
|
|
processed the data using each algorithm.
|
|
For word error rates less than 0.1 it was necessary to process 10,000 or
|
|
even 100,000 simulated signals in order to capture enough errors to make
|
|
the measurements statistically meaningful.
|
|
As a test of the fidelity of our numerical simulations, Figure
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "fig:bodide"
|
|
|
|
\end_inset
|
|
|
|
also shows results calculated from theory for comparison with the BM results.
|
|
The simulated BM results agree with theory to within about 0.1 dB.
|
|
This difference between simulated BM results and theory is caused by small
|
|
errors in the estimates of time- and frequency-offset of the received signal
|
|
in the simulated data.
|
|
Such
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
sync losses
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
are not accounted for in the idealized theoretical results.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
As expected, the soft-decision algorithms, FT and KV, are about 2 dB better
|
|
than the hard-decision BM algorithm.
|
|
In addition, FT has a slight edge (about 0.2 dB) over KV.
|
|
On the other hand, the execution time for FT with timeout parameter
|
|
\begin_inset Formula $T=10^{5}$
|
|
\end_inset
|
|
|
|
is longer than the execution time for the KV algorithm.
|
|
Nevertheless, the execution time required for the FT algorithm with
|
|
\begin_inset Formula $T=10^{5}$
|
|
\end_inset
|
|
|
|
is small enough to be practical on most of today's home computers.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Float figure
|
|
wide false
|
|
sideways false
|
|
status open
|
|
|
|
\begin_layout Plain Layout
|
|
\align center
|
|
\begin_inset Graphics
|
|
filename fig_bodide.pdf
|
|
|
|
\end_inset
|
|
|
|
|
|
\begin_inset Caption Standard
|
|
|
|
\begin_layout Plain Layout
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "fig:bodide"
|
|
|
|
\end_inset
|
|
|
|
Word error rates as a function of
|
|
\begin_inset Formula $E_{b}/N_{0},$
|
|
\end_inset
|
|
|
|
the signal-to-noise ratio per information bit.
|
|
Theory: theoretical prediction for the hard-decision BM decoder.
|
|
The remaining curves represent simulation results on an AWGN channel for
|
|
the BM, KV, and FT decoders.
|
|
The KV algorithm was executed with complexity coefficient
|
|
\begin_inset Formula $\lambda=15$
|
|
\end_inset
|
|
|
|
, the most aggressive setting historically used in the
|
|
\emph on
|
|
WSJT
|
|
\emph default
|
|
programs.
|
|
The FT alrithm was run with timeout setting
|
|
\begin_inset Formula $T=10^{5}.$
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Because of the importance of error-free transmission in commercial applications,
|
|
plots like that in Figure
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "fig:bodide"
|
|
|
|
\end_inset
|
|
|
|
often extend downward to even smaller error rates, say
|
|
\begin_inset Formula $10^{-6}$
|
|
\end_inset
|
|
|
|
or less.
|
|
The circumstances for minimal amateur-radio QSOs are very different, however.
|
|
Error rates of order 0.1 or higher may be acceptable.
|
|
In this case the essential information is better presented in a plot showing
|
|
the percentage of transmissions copied correctly as a function of signal-to-noi
|
|
se ratio.
|
|
Figure
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "fig:WER2"
|
|
|
|
\end_inset
|
|
|
|
shows in this format the FT results for
|
|
\begin_inset Formula $T=10^{5}$
|
|
\end_inset
|
|
|
|
and the KV results from Figure
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "fig:bodide"
|
|
|
|
\end_inset
|
|
|
|
, along with additional FT results for
|
|
\begin_inset Formula $T=10^{4},\:10^{3},\:10^{2}$
|
|
\end_inset
|
|
|
|
and
|
|
\begin_inset Formula $10$
|
|
\end_inset
|
|
|
|
.
|
|
It is apparent that the FT decoder produces more decodes than KV when
|
|
\begin_inset Formula $T=10^{4}$
|
|
\end_inset
|
|
|
|
or larger.
|
|
It also provides a very significant gain over the hard-decision BM decoder
|
|
even when limited to 10 or fewer trials.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Float figure
|
|
wide false
|
|
sideways false
|
|
status open
|
|
|
|
\begin_layout Plain Layout
|
|
\align center
|
|
\begin_inset Graphics
|
|
filename fig_wer2.pdf
|
|
lyxscale 120
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Plain Layout
|
|
\begin_inset Caption Standard
|
|
|
|
\begin_layout Plain Layout
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "fig:WER2"
|
|
|
|
\end_inset
|
|
|
|
Percent of JT65 messages copied as a function of SNR in 2500 Hz bandwidth.
|
|
Solid lines with filled circles are results from the FT decoder; numbers
|
|
adjacent to the curves specify values of the timeout parameter
|
|
\begin_inset Formula $T.$
|
|
\end_inset
|
|
|
|
The dotted line with open squares is the KV decoder with complexity coefficient
|
|
|
|
\begin_inset Formula $\lambda=15$
|
|
\end_inset
|
|
|
|
.
|
|
Results from the BM algorithm are shown with a dashed line and crosses.
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Timeout parameter
|
|
\begin_inset Formula $T$
|
|
\end_inset
|
|
|
|
is the maximum number of symbol-erasure trials allowed for a particular
|
|
attempt at decoding a received word.
|
|
Most successful decodes take only a small fraction of the maximum allowed
|
|
number of trials.
|
|
Figure
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "fig:N_vs_X"
|
|
|
|
\end_inset
|
|
|
|
shows the number of stochastic erasure trials required to find the correct
|
|
codeword vs.
|
|
the number of hard-decision errors in the received word, for a run with
|
|
1000 simulated transmissions at
|
|
\begin_inset Formula $\mathrm{SNR}=-24$
|
|
\end_inset
|
|
|
|
dB, just slightly above the decoding threshold.
|
|
The timeout parameter was
|
|
\begin_inset Formula $T=10^{5}$
|
|
\end_inset
|
|
|
|
for this run.
|
|
No points are shown for
|
|
\begin_inset Formula $X\le25$
|
|
\end_inset
|
|
|
|
because all such words are successfully decoded by the BM algorithm.
|
|
Figure
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "fig:N_vs_X"
|
|
|
|
\end_inset
|
|
|
|
shows that the FT algorithm decoded received words with as many as
|
|
\begin_inset Formula $X=43$
|
|
\end_inset
|
|
|
|
symbol errors.
|
|
The results also show that, on average, the number of trials increases
|
|
with the number of errors in the received word.
|
|
The variability of the decoding time also increases dramatically with the
|
|
number of errors in the received word.
|
|
These results provide insight into the mean and variance of the execution
|
|
time for the FT algorithm, since execution time is roughly proportional
|
|
to the number of required trials.
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Float figure
|
|
wide false
|
|
sideways false
|
|
status open
|
|
|
|
\begin_layout Plain Layout
|
|
\align center
|
|
\begin_inset Graphics
|
|
filename fig_ntrials_vs_nhard.pdf
|
|
lyxscale 120
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Plain Layout
|
|
\begin_inset Caption Standard
|
|
|
|
\begin_layout Plain Layout
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "fig:N_vs_X"
|
|
|
|
\end_inset
|
|
|
|
Number of trials needed to decode a received word versus Hamming distance
|
|
|
|
\begin_inset Formula $X$
|
|
\end_inset
|
|
|
|
between the received word and the decoded codeword, for 1000 simulated
|
|
frames on an AWGN channel with no fading.
|
|
The SNR in 2500 Hz bandwidth is
|
|
\begin_inset Formula $-24$
|
|
\end_inset
|
|
|
|
dB, which corresponds to
|
|
\begin_inset Formula $E_{b}/N_{o}=5.1$
|
|
\end_inset
|
|
|
|
dB.
|
|
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Subsection
|
|
Simulated results for Rayleigh fading and hinted decoding
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
Figure
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "fig:Psuccess"
|
|
|
|
\end_inset
|
|
|
|
presents the results of simulations for signal-to-noise ratios ranging
|
|
from
|
|
\begin_inset Formula $-18$
|
|
\end_inset
|
|
|
|
to
|
|
\begin_inset Formula $-30$
|
|
\end_inset
|
|
|
|
dB, again using 1000 simulated signals for each plotted point.
|
|
We include three curves for each decoding algorithm: one for the AWGN channel
|
|
and no fading, and two more for simulated Doppler spreads of 0.2 and 1.0
|
|
Hz.
|
|
These simulated Doppler spreads are comparable to those encountered on
|
|
HF ionospheric paths and also for EME at VHF and the lower UHF bands.
|
|
For reference, we note that the JT65 symbol rate is about 2.69 Hz.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
(*** A little more description is needed here, along with new data for the
|
|
DS curves.***)
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Float figure
|
|
wide false
|
|
sideways false
|
|
status open
|
|
|
|
\begin_layout Plain Layout
|
|
\align center
|
|
\begin_inset Graphics
|
|
filename fig_psuccess.pdf
|
|
lyxscale 90
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Plain Layout
|
|
\begin_inset Caption Standard
|
|
|
|
\begin_layout Plain Layout
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "fig:Psuccess"
|
|
|
|
\end_inset
|
|
|
|
Percentage of JT65 messages successfully decoded as a function of SNR in
|
|
2500 Hz bandwidth.
|
|
Results are shown for the hard-decision Berlekamp-Massey (BM) and soft-decision
|
|
Franke-Taylor (FT) decoding algorithms.
|
|
Curves labeled DS correspond to the hinted-decode (
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
Deep Search
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
) algorithm.
|
|
Numbers adjacent to the curves are the simulated Doppler spreads in Hz.
|
|
The curve labeled Sync illustrates the dependence of proper time and frequency
|
|
synchronization in the decoder presently implemented in
|
|
\emph on
|
|
WSJT-X
|
|
\emph default
|
|
.
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Section
|
|
Summary
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
...
|
|
Still to come ...
|
|
\end_layout
|
|
|
|
\begin_layout Bibliography
|
|
\begin_inset CommandInset bibitem
|
|
LatexCommand bibitem
|
|
label "1"
|
|
key "kv2001"
|
|
|
|
\end_inset
|
|
|
|
“Algebraic soft-decision decoding of Reed-Solomon codes,” R.
|
|
Köetter and A.
|
|
Vardy, IEEE Trans.
|
|
Inform.
|
|
Theory, Vol.
|
|
49, Nov.
|
|
2003.
|
|
\end_layout
|
|
|
|
\begin_layout Bibliography
|
|
\begin_inset CommandInset bibitem
|
|
LatexCommand bibitem
|
|
label "2"
|
|
key "lhmg2010"
|
|
|
|
\end_inset
|
|
|
|
"Stochastic Chase Decoding of Reed-Solomon Codes", Camille Leroux, Saied
|
|
Hemati, Shie Mannor, Warren J.
|
|
Gross, IEEE Communications Letters, Vol.
|
|
14, No.
|
|
9, September 2010.
|
|
\end_layout
|
|
|
|
\begin_layout Bibliography
|
|
\begin_inset CommandInset bibitem
|
|
LatexCommand bibitem
|
|
label "3"
|
|
key "lk2008"
|
|
|
|
\end_inset
|
|
|
|
"Soft-Decision Decoding of Reed-Solomon Codes Using Successive Error-and-Erasure
|
|
Decoding," Soo-Woong Lee and B.
|
|
V.
|
|
K.
|
|
Vijaya Kumar, IEEE
|
|
\begin_inset Quotes eld
|
|
\end_inset
|
|
|
|
GLOBECOM
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
2008 proceedings.
|
|
\end_layout
|
|
|
|
\begin_layout Bibliography
|
|
\begin_inset CommandInset bibitem
|
|
LatexCommand bibitem
|
|
label "4"
|
|
key "lc2004"
|
|
|
|
\end_inset
|
|
|
|
Error Control Coding, 2nd edition, Shu Lin and Daniel J.
|
|
Costello, Pearson-Prentice Hall, 2004.
|
|
\end_layout
|
|
|
|
\begin_layout Bibliography
|
|
\begin_inset CommandInset bibitem
|
|
LatexCommand bibitem
|
|
label "5"
|
|
key "ls2009"
|
|
|
|
\end_inset
|
|
|
|
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
Stochastic Erasure-Only List Decoding Algorithms for Reed-Solomon Codes,
|
|
\begin_inset Quotes erd
|
|
\end_inset
|
|
|
|
Chang-Ming Lee and Yu T.
|
|
Su, IEEE Signal Processing Letters, Vol.
|
|
16, No.
|
|
8, August 2009.
|
|
\end_layout
|
|
|
|
\begin_layout Bibliography
|
|
\begin_inset CommandInset bibitem
|
|
LatexCommand bibitem
|
|
label "6"
|
|
key "karn"
|
|
|
|
\end_inset
|
|
|
|
Berlekamp-Massey decoder written by Phil Karn, http://www.ka9q.net/code/fec/
|
|
\end_layout
|
|
|
|
\begin_layout Section
|
|
\start_of_appendix
|
|
\begin_inset CommandInset label
|
|
LatexCommand label
|
|
name "sec:Appendix:SNR"
|
|
|
|
\end_inset
|
|
|
|
Appendix: Signal to Noise Ratios
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
The signal to noise ratio in a bandwidth,
|
|
\begin_inset Formula $B$
|
|
\end_inset
|
|
|
|
, that is at least as large as the bandwidth occupied by the signal is:
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
\mathrm{SNR}_{B}=\frac{P_{s}}{N_{o}B}\label{eq:SNR}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
where
|
|
\begin_inset Formula $P_{s}$
|
|
\end_inset
|
|
|
|
is the signal power (W),
|
|
\begin_inset Formula $N_{o}$
|
|
\end_inset
|
|
|
|
is one-sided noise power spectral density (W/Hz), and
|
|
\begin_inset Formula $B$
|
|
\end_inset
|
|
|
|
is the bandwidth in Hz.
|
|
In amateur radio applications, digital modes are often compared based on
|
|
the SNR defined in a 2.5 kHz reference bandwidth,
|
|
\begin_inset Formula $\mathrm{SNR}_{2500}$
|
|
\end_inset
|
|
|
|
.
|
|
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
In the professional literature, decoder performance is characterized in
|
|
terms of
|
|
\begin_inset Formula $E_{b}/N_{o}$
|
|
\end_inset
|
|
|
|
, the ratio of the energy collected per information bit,
|
|
\begin_inset Formula $E_{b}$
|
|
\end_inset
|
|
|
|
, to the one-sided noise power spectral density,
|
|
\begin_inset Formula $N_{o}$
|
|
\end_inset
|
|
|
|
.
|
|
Denote the duration of a channel symbol by
|
|
\begin_inset Formula $\tau_{s}$
|
|
\end_inset
|
|
|
|
(for JT65,
|
|
\begin_inset Formula $\tau_{s}=0.3715\,\mathrm{s}$
|
|
\end_inset
|
|
|
|
).
|
|
Signal power is related to the energy per symbol by
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
P_{s}=E_{s}/\tau_{s}.\label{eq:signal_power}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
The total energy in a received JT65 message consisting of
|
|
\begin_inset Formula $n=63$
|
|
\end_inset
|
|
|
|
channel symbols is
|
|
\begin_inset Formula $63E_{s}$
|
|
\end_inset
|
|
|
|
.
|
|
The energy collected for each of the 72 bits of information conveyed by
|
|
the message is then
|
|
\begin_inset Formula
|
|
\begin{equation}
|
|
E_{b}=\frac{63E_{s}}{72}=0.875E_{s.}\label{eq:Eb_Es}
|
|
\end{equation}
|
|
|
|
\end_inset
|
|
|
|
Using equations (
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "eq:SNR"
|
|
|
|
\end_inset
|
|
|
|
)-(
|
|
\begin_inset CommandInset ref
|
|
LatexCommand ref
|
|
reference "eq:Eb_Es"
|
|
|
|
\end_inset
|
|
|
|
),
|
|
\begin_inset Formula $\mathrm{SNR}_{2500}$
|
|
\end_inset
|
|
|
|
can be written in terms of
|
|
\begin_inset Formula $E_{b}/N_{o}$
|
|
\end_inset
|
|
|
|
:
|
|
\begin_inset Formula
|
|
\[
|
|
\mathrm{SNR}_{2500}=1.23\times10^{-3}\frac{E_{b}}{N_{o}}.
|
|
\]
|
|
|
|
\end_inset
|
|
|
|
If all quantities are expressed in dB, then:
|
|
\end_layout
|
|
|
|
\begin_layout Standard
|
|
\begin_inset Formula
|
|
\[
|
|
\mathrm{SNR}_{2500}=(E_{b}/N_{o})_{\mathrm{dB}}-29.1\,\mathrm{dB}.
|
|
\]
|
|
|
|
\end_inset
|
|
|
|
|
|
\end_layout
|
|
|
|
\end_body
|
|
\end_document
|