DMRlink/dmrlink.py

1329 lines
59 KiB
Python
Raw Normal View History

#!/usr/bin/env python
#
2013-08-02 16:31:55 -04:00
# This work is licensed under the Creative Commons Attribution-ShareAlike
# 3.0 Unported License.To view a copy of this license, visit
# http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
# Creative Commons, 444 Castro Street, Suite 900, Mountain View,
# California, 94041, USA.
2014-01-03 16:03:41 -05:00
#NOTE: This program uses a configuration file specified on the command line
# if none is specified, then dmrlink.cfg in the same directory as this
# file will be tried. Finally, if that does not exist, this process
# will terminate
2013-06-27 17:15:54 -04:00
from __future__ import print_function
import ConfigParser
import argparse
2013-07-11 20:45:09 -04:00
import sys
2013-06-27 17:15:54 -04:00
import binascii
2013-10-13 18:01:50 -04:00
import csv
import os
2013-12-15 10:45:39 -05:00
import logging
2014-05-17 14:18:45 -04:00
import signal
2013-07-11 20:45:09 -04:00
2013-12-15 10:45:39 -05:00
from logging.config import dictConfig
from hmac import new as hmac_new
from binascii import b2a_hex as h
from hashlib import sha1
from socket import inet_ntoa as IPAddr
2014-05-14 21:58:58 -04:00
from socket import inet_aton as IPHexStr
from socket import gethostbyname
2013-11-26 17:05:08 -05:00
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.internet import task
from random import randint
2015-06-12 09:28:48 -04:00
from time import time
from json import dumps as json_dump
2015-06-30 12:50:51 -04:00
from cPickle import dump as pickle_dump
2013-11-26 17:05:08 -05:00
2014-01-02 12:16:23 -05:00
__author__ = 'Cortney T. Buffington, N0MJS'
2015-03-24 12:06:42 -04:00
__copyright__ = 'Copyright (c) 2013 - 2015 Cortney T. Buffington, N0MJS and the K0USY Group'
2014-01-02 12:16:23 -05:00
__credits__ = 'Adam Fast, KC0YLK, Dave K, and he who wishes not to be named'
__license__ = 'Creative Commons Attribution-ShareAlike 3.0 Unported'
2014-08-31 14:18:34 -04:00
__version__ = '0.27b'
2014-01-02 12:16:23 -05:00
__maintainer__ = 'Cort Buffington, N0MJS'
__email__ = 'n0mjs@me.com'
2014-08-31 14:18:34 -04:00
__status__ = 'beta'
2014-01-02 12:16:23 -05:00
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', action='store', dest='CFG_FILE', help='/full/path/to/config.file (usually dmrlink.cfg)')
cli_args = parser.parse_args()
#************************************************
# PARSE THE CONFIG FILE AND BUILD STRUCTURE
#************************************************
NETWORK = {}
2013-12-12 19:20:21 -05:00
networks = {}
config = ConfigParser.ConfigParser()
2013-11-26 17:05:08 -05:00
if not cli_args.CFG_FILE:
cli_args.CFG_FILE = os.path.dirname(os.path.abspath(__file__))+'/dmrlink.cfg'
2013-11-26 17:05:08 -05:00
try:
if not config.read(cli_args.CFG_FILE):
sys.exit('Configuration file \''+cli_args.CFG_FILE+'\' is not a valid configuration file! Exiting...')
except:
sys.exit('Configuration file \''+cli_args.CFG_FILE+'\' is not a valid configuration file! Exiting...')
try:
for section in config.sections():
if section == 'GLOBAL':
2014-01-02 12:16:23 -05:00
# Process GLOBAL items in the configuration
2013-12-22 17:16:10 -05:00
PATH = config.get(section, 'PATH')
2013-12-15 10:45:39 -05:00
elif section == 'REPORTS':
2014-01-02 12:16:23 -05:00
# Process REPORTS items in the configuration
2013-12-15 10:45:39 -05:00
REPORTS = {
'REPORT_NETWORKS': config.get(section, 'REPORT_NETWORKS'),
2014-08-15 09:39:51 -04:00
'REPORT_INTERVAL': config.getint(section, 'REPORT_INTERVAL'),
'REPORT_PATH': config.get(section, 'REPORT_PATH'),
'PRINT_PEERS_INC_MODE': config.getboolean(section, 'PRINT_PEERS_INC_MODE'),
'PRINT_PEERS_INC_FLAGS': config.getboolean(section, 'PRINT_PEERS_INC_FLAGS')
2013-12-15 10:45:39 -05:00
}
elif section == 'LOGGER':
2014-01-02 12:16:23 -05:00
# Process LOGGER items in the configuration
2013-12-15 10:45:39 -05:00
LOGGER = {
'LOG_FILE': config.get(section, 'LOG_FILE'),
'LOG_HANDLERS': config.get(section, 'LOG_HANDLERS'),
'LOG_LEVEL': config.get(section, 'LOG_LEVEL'),
'LOG_NAME': config.get(section, 'LOG_NAME')
2013-12-15 10:45:39 -05:00
}
elif config.getboolean(section, 'ENABLED'):
2014-01-02 12:16:23 -05:00
# All other sections define indiviual IPSC Networks we connect to
# Each IPSC network config will contain the following three sections
NETWORK.update({section: {'LOCAL': {}, 'MASTER': {}, 'PEERS': {}}})
2014-01-02 12:16:23 -05:00
# LOCAL means we need to know this stuff to be a peer in the network
NETWORK[section]['LOCAL'].update({
2014-01-02 12:16:23 -05:00
# In case we want to keep config, but not actually connect to the network
'ENABLED': config.getboolean(section, 'ENABLED'),
# These items are used to create the MODE byte
'PEER_OPER': config.getboolean(section, 'PEER_OPER'),
'IPSC_MODE': config.get(section, 'IPSC_MODE'),
'TS1_LINK': config.getboolean(section, 'TS1_LINK'),
'TS2_LINK': config.getboolean(section, 'TS2_LINK'),
'MODE': '',
2014-01-02 12:16:23 -05:00
# These items are used to create the multi-byte FLAGS field
'AUTH_ENABLED': config.getboolean(section, 'AUTH_ENABLED'),
'CSBK_CALL': config.getboolean(section, 'CSBK_CALL'),
'RCM': config.getboolean(section, 'RCM'),
'CON_APP': config.getboolean(section, 'CON_APP'),
'XNL_CALL': config.getboolean(section, 'XNL_CALL'),
'XNL_MASTER': config.getboolean(section, 'XNL_MASTER'),
'DATA_CALL': config.getboolean(section, 'DATA_CALL'),
'VOICE_CALL': config.getboolean(section, 'VOICE_CALL'),
'MASTER_PEER': config.getboolean(section, 'MASTER_PEER'),
'FLAGS': '',
2014-01-02 12:16:23 -05:00
# Things we need to know to connect and be a peer in this IPSC
'RADIO_ID': hex(int(config.get(section, 'RADIO_ID')))[2:].rjust(8,'0').decode('hex'),
'IP': gethostbyname(config.get(section, 'IP')),
2014-01-02 12:16:23 -05:00
'PORT': config.getint(section, 'PORT'),
'ALIVE_TIMER': config.getint(section, 'ALIVE_TIMER'),
'MAX_MISSED': config.getint(section, 'MAX_MISSED'),
'AUTH_KEY': (config.get(section, 'AUTH_KEY').rjust(40,'0')).decode('hex'),
'NUM_PEERS': 0,
})
2014-01-02 12:16:23 -05:00
# Master means things we need to know about the master peer of the network
NETWORK[section]['MASTER'].update({
'RADIO_ID': '\x00\x00\x00\x00',
'MODE': '\x00',
2013-12-12 17:12:36 -05:00
'MODE_DECODE': '',
'FLAGS': '\x00\x00\x00\x00',
2013-12-12 17:12:36 -05:00
'FLAGS_DECODE': '',
'STATUS': {
2014-05-15 23:21:54 -04:00
'CONNECTED': False,
'PEER_LIST': False,
'KEEP_ALIVES_SENT': 0,
'KEEP_ALIVES_MISSED': 0,
'KEEP_ALIVES_OUTSTANDING': 0,
'KEEP_ALIVES_RECEIVED': 0,
'KEEP_ALIVE_RX_TIME': 0
},
'IP': '',
'PORT': ''
})
if not NETWORK[section]['LOCAL']['MASTER_PEER']:
NETWORK[section]['MASTER'].update({
'IP': gethostbyname(config.get(section, 'MASTER_IP')),
'PORT': config.getint(section, 'MASTER_PORT')
})
2014-01-02 12:16:23 -05:00
# Temporary locations for building MODE and FLAG data
MODE_BYTE = 0
FLAG_1 = 0
FLAG_2 = 0
# Construct and store the MODE field
if NETWORK[section]['LOCAL']['PEER_OPER']:
MODE_BYTE |= 1 << 6
if NETWORK[section]['LOCAL']['IPSC_MODE'] == 'ANALOG':
MODE_BYTE |= 1 << 4
elif NETWORK[section]['LOCAL']['IPSC_MODE'] == 'DIGITAL':
MODE_BYTE |= 1 << 5
if NETWORK[section]['LOCAL']['TS1_LINK']:
MODE_BYTE |= 1 << 3
else:
2014-01-02 12:16:23 -05:00
MODE_BYTE |= 1 << 2
if NETWORK[section]['LOCAL']['TS2_LINK']:
MODE_BYTE |= 1 << 1
else:
2014-01-02 12:16:23 -05:00
MODE_BYTE |= 1 << 0
NETWORK[section]['LOCAL']['MODE'] = chr(MODE_BYTE)
# Construct and store the FLAGS field
if NETWORK[section]['LOCAL']['CSBK_CALL']:
FLAG_1 |= 1 << 7
if NETWORK[section]['LOCAL']['RCM']:
FLAG_1 |= 1 << 6
if NETWORK[section]['LOCAL']['CON_APP']:
FLAG_1 |= 1 << 5
if NETWORK[section]['LOCAL']['XNL_CALL']:
FLAG_2 |= 1 << 7
if NETWORK[section]['LOCAL']['XNL_CALL'] and NETWORK[section]['LOCAL']['XNL_MASTER']:
FLAG_2 |= 1 << 6
elif NETWORK[section]['LOCAL']['XNL_CALL'] and not NETWORK[section]['LOCAL']['XNL_MASTER']:
FLAG_2 |= 1 << 5
if NETWORK[section]['LOCAL']['AUTH_ENABLED']:
FLAG_2 |= 1 << 4
if NETWORK[section]['LOCAL']['DATA_CALL']:
FLAG_2 |= 1 << 3
if NETWORK[section]['LOCAL']['VOICE_CALL']:
FLAG_2 |= 1 << 2
if NETWORK[section]['LOCAL']['MASTER_PEER']:
FLAG_2 |= 1 << 0
NETWORK[section]['LOCAL']['FLAGS'] = '\x00\x00'+chr(FLAG_1)+chr(FLAG_2)
except:
sys.exit('Could not parse configuration file, exiting...')
2014-05-14 15:19:31 -04:00
2013-12-15 10:45:39 -05:00
#************************************************
# CONFIGURE THE SYSTEM LOGGER
#************************************************
dictConfig({
'version': 1,
'disable_existing_loggers': False,
'filters': {
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'timed': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
2014-08-24 16:16:30 -04:00
'syslog': {
2014-08-24 16:20:10 -04:00
'format': '%(name)s (%(process)d): %(levelname)s %(message)s'
2014-08-24 16:16:30 -04:00
}
2013-12-15 10:45:39 -05:00
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'console-timed': {
'class': 'logging.StreamHandler',
'formatter': 'timed'
},
'file': {
'class': 'logging.FileHandler',
'formatter': 'simple',
'filename': LOGGER['LOG_FILE'],
},
'file-timed': {
'class': 'logging.FileHandler',
'formatter': 'timed',
'filename': LOGGER['LOG_FILE'],
},
'syslog': {
'class': 'logging.handlers.SysLogHandler',
2014-08-24 16:16:30 -04:00
'formatter': 'syslog',
2013-12-15 10:45:39 -05:00
}
},
'loggers': {
LOGGER['LOG_NAME']: {
2013-12-15 10:45:39 -05:00
'handlers': LOGGER['LOG_HANDLERS'].split(','),
'level': LOGGER['LOG_LEVEL'],
'propagate': True,
}
}
})
logger = logging.getLogger(LOGGER['LOG_NAME'])
2013-12-15 10:45:39 -05:00
2014-05-14 15:19:31 -04:00
2013-12-15 10:45:39 -05:00
#************************************************
# IMPORTING OTHER FILES - '#include'
#************************************************
# Import IPSC message types and version information
#
try:
from ipsc.ipsc_message_types import *
except ImportError:
sys.exit('IPSC message types file not found or invalid')
# Import IPSC flag mask values
#
try:
from ipsc.ipsc_mask import *
except ImportError:
sys.exit('IPSC mask values file not found or invalid')
# Import the Alias files for numeric ids. This is split to save
# time making lookups in one huge dictionary
#
curdir= os.path.dirname(__file__)
subscriber_ids = {}
peer_ids = {}
talkgroup_ids = {}
try:
2013-12-22 17:16:10 -05:00
with open(PATH+'subscriber_ids.csv', 'rU') as subscriber_ids_csv:
2013-12-15 10:45:39 -05:00
subscribers = csv.reader(subscriber_ids_csv, dialect='excel', delimiter=',')
for row in subscribers:
subscriber_ids[int(row[1])] = (row[0])
except ImportError:
logger.warning('subscriber_ids.csv not found: Subscriber aliases will not be available')
try:
2013-12-22 17:16:10 -05:00
with open(PATH+'peer_ids.csv', 'rU') as peer_ids_csv:
2013-12-15 10:45:39 -05:00
peers = csv.reader(peer_ids_csv, dialect='excel', delimiter=',')
for row in peers:
peer_ids[int(row[1])] = (row[0])
except ImportError:
logger.warning('peer_ids.csv not found: Peer aliases will not be available')
try:
2013-12-22 17:16:10 -05:00
with open(PATH+'talkgroup_ids.csv', 'rU') as talkgroup_ids_csv:
2013-12-15 10:45:39 -05:00
talkgroups = csv.reader(talkgroup_ids_csv, dialect='excel', delimiter=',')
for row in talkgroups:
talkgroup_ids[int(row[1])] = (row[0])
except ImportError:
logger.warning('talkgroup_ids.csv not found: Talkgroup aliases will not be available')
2013-11-26 17:05:08 -05:00
#************************************************
# UTILITY FUNCTIONS FOR INTERNAL USE
#************************************************
2014-05-14 21:58:58 -04:00
# Create a 2 byte hex string from an integer
#
2014-05-14 21:58:58 -04:00
def hex_str_2(_int_id):
try:
return hex(_int_id)[2:].rjust(4,'0').decode('hex')
except TypeError:
logger.error('hex_str_2: invalid integer length')
# Create a 3 byte hex string from an integer
#
def hex_str_3(_int_id):
try:
return hex(_int_id)[2:].rjust(6,'0').decode('hex')
except TypeError:
logger.error('hex_str_3: invalid integer length')
# Create a 4 byte hex string from an integer
#
def hex_str_4(_int_id):
try:
return hex(_int_id)[2:].rjust(8,'0').decode('hex')
except TypeError:
logger.error('hex_str_4: invalid integer length')
# Convert a hex string to an int (radio ID, etc.)
#
def int_id(_hex_string):
return int(h(_hex_string), 16)
# Re-Write Source Radio-ID (DMR NAT)
#
2013-11-21 19:34:06 -05:00
def dmr_nat(_data, _src_id, _nat_id):
_data = _data.replace(_src_id, _nat_id)
return _data
# Lookup text data for numeric IDs
#
def get_info(_id, _dict):
if _id in _dict:
2014-04-29 23:00:38 -04:00
return _dict[_id]
return _id
2013-07-29 22:12:12 -04:00
# Determine if the provided peer ID is valid for the provided network
#
def valid_peer(_peer_list, _peerid):
if _peerid in _peer_list:
return True
return False
2013-07-29 14:38:59 -04:00
2013-07-29 22:12:12 -04:00
# Determine if the provided master ID is valid for the provided network
#
def valid_master(_network, _peerid):
if NETWORK[_network]['MASTER']['RADIO_ID'] == _peerid:
return True
else:
return False
2013-11-26 17:05:08 -05:00
# De-register a peer from an IPSC by removing it's information
#
def de_register_peer(_network, _peerid):
# Iterate for the peer in our data
2013-11-26 17:05:08 -05:00
if _peerid in NETWORK[_network]['PEERS'].keys():
del NETWORK[_network]['PEERS'][_peerid]
2014-09-05 10:02:59 -04:00
logger.info('(%s) Peer De-Registration Requested for: %s', _network, int_id(_peerid))
return
else:
2014-09-05 10:02:59 -04:00
logger.warning('(%s) Peer De-Registration Requested for: %s, but we don\'t have a listing for this peer', _network, int_id(_peerid))
pass
2013-12-11 21:56:39 -05:00
# Process the MODE byte in registration/peer list packets for determining master and peer capabilities
#
def process_mode_byte(_hex_mode):
2013-12-12 08:50:47 -05:00
_mode = int(h(_hex_mode), 16)
# Determine whether or not the peer is operational
2013-12-12 08:42:56 -05:00
_peer_op = bool(_mode & PEER_OP_MSK)
# Determine whether or not timeslot 1 is linked
_ts1 = bool(_mode & IPSC_TS1_MSK)
# Determine whether or not timeslot 2 is linked
_ts2 = bool(_mode & IPSC_TS2_MSK)
# Determine the operational mode of the peer
if _mode & PEER_MODE_MSK == PEER_MODE_MSK:
_peer_mode = 'UNKNOWN'
elif not _mode & PEER_MODE_MSK:
_peer_mode = 'NO_RADIO'
elif _mode & PEER_MODE_ANALOG:
_peer_mode = 'ANALOG'
elif _mode & PEER_MODE_DIGITAL:
_peer_mode = 'DIGITAL'
2013-12-12 08:42:56 -05:00
return {
'PEER_OP': _peer_op,
'PEER_MODE': _peer_mode,
'TS_1': _ts1,
'TS_2': _ts2
}
2013-12-11 21:56:39 -05:00
# Process the FLAGS bytes in registration replies for determining what services are available
#
def process_flags_bytes(_hex_flags):
2013-12-12 08:42:56 -05:00
_byte3 = int(h(_hex_flags[2]), 16)
_byte4 = int(h(_hex_flags[3]), 16)
_csbk = bool(_byte3 & CSBK_MSK)
_rpt_mon = bool(_byte3 & RPT_MON_MSK)
_con_app = bool(_byte3 & CON_APP_MSK)
_xnl_con = bool(_byte4 & XNL_STAT_MSK)
_xnl_master = bool(_byte4 & XNL_MSTR_MSK)
_xnl_slave = bool(_byte4 & XNL_SLAVE_MSK)
_auth = bool(_byte4 & PKT_AUTH_MSK)
_data = bool(_byte4 & DATA_CALL_MSK)
_voice = bool(_byte4 & VOICE_CALL_MSK)
_master = bool(_byte4 & MSTR_PEER_MSK)
return {
2013-12-12 17:12:36 -05:00
'CSBK': _csbk,
'RCM': _rpt_mon,
'CON_APP': _con_app,
'XNL_CON': _xnl_con,
'XNL_MASTER': _xnl_master,
'XNL_SLAVE': _xnl_slave,
'AUTH': _auth,
'DATA': _data,
'VOICE': _voice,
'MASTER': _master
2013-12-12 08:42:56 -05:00
}
2014-05-17 14:18:45 -04:00
2013-11-26 17:05:08 -05:00
# Take a received peer list and the network it belongs to, process and populate the
# data structure in my_ipsc_config with the results, and return a simple list of peers.
2013-07-29 14:38:59 -04:00
#
def process_peer_list(_data, _network):
# Create a temporary peer list to track who we should have in our list -- used to find old peers we should remove.
_temp_peers = []
# Determine the length of the peer list for the parsing iterator
_peer_list_length = int(h(_data[5:7]), 16)
# Record the number of peers in the data structure... we'll use it later (11 bytes per peer entry)
NETWORK[_network]['LOCAL']['NUM_PEERS'] = _peer_list_length/11
2014-05-08 09:29:57 -04:00
logger.info('(%s) Peer List Received from Master: %s peers in this IPSC', _network, NETWORK[_network]['LOCAL']['NUM_PEERS'])
# Iterate each peer entry in the peer list. Skip the header, then pull the next peer, the next, etc.
2013-11-26 17:05:08 -05:00
for i in range(7, _peer_list_length +7, 11):
# Extract various elements from each entry...
_hex_radio_id = (_data[i:i+4])
_hex_address = (_data[i+4:i+8])
_ip_address = IPAddr(_hex_address)
_hex_port = (_data[i+8:i+10])
_port = int(h(_hex_port), 16)
_hex_mode = (_data[i+10:i+11])
# Add this peer to a temporary PeerID list - used to remove any old peers no longer with us
_temp_peers.append(_hex_radio_id)
# This is done elsewhere for the master too, so we use a separate function
_decoded_mode = process_mode_byte(_hex_mode)
# If this entry WAS already in our list, update everything except the stats
# in case this was a re-registration with a different mode, flags, etc.
if _hex_radio_id in NETWORK[_network]['PEERS'].keys():
NETWORK[_network]['PEERS'][_hex_radio_id]['IP'] = _ip_address
NETWORK[_network]['PEERS'][_hex_radio_id]['PORT'] = _port
NETWORK[_network]['PEERS'][_hex_radio_id]['MODE'] = _hex_mode
NETWORK[_network]['PEERS'][_hex_radio_id]['MODE_DECODE'] = _decoded_mode
NETWORK[_network]['PEERS'][_hex_radio_id]['FLAGS'] = ''
NETWORK[_network]['PEERS'][_hex_radio_id]['FLAGS_DECODE'] = ''
2014-09-05 15:17:29 -04:00
logger.debug('(%s) Peer Updated: %s', _network, NETWORK[_network]['PEERS'][_hex_radio_id])
# If this entry was NOT already in our list, add it.
if _hex_radio_id not in NETWORK[_network]['PEERS'].keys():
NETWORK[_network]['PEERS'][_hex_radio_id] = {
'IP': _ip_address,
'PORT': _port,
'MODE': _hex_mode,
'MODE_DECODE': _decoded_mode,
'FLAGS': '',
'FLAGS_DECODE': '',
'STATUS': {
'CONNECTED': False,
'KEEP_ALIVES_SENT': 0,
'KEEP_ALIVES_MISSED': 0,
2014-05-15 23:21:54 -04:00
'KEEP_ALIVES_OUTSTANDING': 0,
'KEEP_ALIVES_RECEIVED': 0,
'KEEP_ALIVE_RX_TIME': 0
}
}
2014-08-27 20:43:42 -04:00
logger.debug('(%s) Peer Added: %s', _network, NETWORK[_network]['PEERS'][_hex_radio_id])
# Finally, check to see if there's a peer already in our list that was not in this peer list
# and if so, delete it.
for peer in NETWORK[_network]['PEERS'].keys():
if peer not in _temp_peers:
de_register_peer(_network, peer)
logger.warning('(%s) Peer Deleted (not in new peer list): %s', _network, int_id(peer))
2014-05-17 11:40:19 -04:00
# Build a peer list - used when a peer registers, re-regiseters or times out
#
def build_peer_list(_peers):
concatenated_peers = ''
for peer in _peers:
hex_ip = IPHexStr(_peers[peer]['IP'])
hex_port = hex_str_2(_peers[peer]['PORT'])
mode = _peers[peer]['MODE']
concatenated_peers += peer + hex_ip + hex_port + mode
peer_list = hex_str_2(len(concatenated_peers)) + concatenated_peers
return peer_list
2013-11-26 17:05:08 -05:00
# Gratuitous print-out of the peer list.. Pretty much debug stuff.
#
def print_peer_list(_network):
_peers = NETWORK[_network]['PEERS']
_status = NETWORK[_network]['MASTER']['STATUS']['PEER_LIST']
#print('Peer List Status for {}: {}' .format(_network, _status))
if _status and not NETWORK[_network]['PEERS']:
print('We are the only peer for: %s' % _network)
print('')
return
2013-07-31 13:33:31 -04:00
print('Peer List for: %s' % _network)
for peer in _peers.keys():
_this_peer = _peers[peer]
_this_peer_stat = _this_peer['STATUS']
if peer == NETWORK[_network]['LOCAL']['RADIO_ID']:
me = '(self)'
else:
me = ''
2013-12-11 15:50:03 -05:00
print('\tRADIO ID: {} {}' .format(int_id(peer), me))
print('\t\tIP Address: {}:{}' .format(_this_peer['IP'], _this_peer['PORT']))
if _this_peer['MODE_DECODE'] and REPORTS['PRINT_PEERS_INC_MODE']:
2013-12-12 17:12:36 -05:00
print('\t\tMode Values:')
for name, value in _this_peer['MODE_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
if _this_peer['FLAGS_DECODE'] and REPORTS['PRINT_PEERS_INC_FLAGS']:
2013-12-12 17:12:36 -05:00
print('\t\tService Flags:')
for name, value in _this_peer['FLAGS_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
print('\t\tStatus: {}, KeepAlives Sent: {}, KeepAlives Outstanding: {}, KeepAlives Missed: {}' .format(_this_peer_stat['CONNECTED'], _this_peer_stat['KEEP_ALIVES_SENT'], _this_peer_stat['KEEP_ALIVES_OUTSTANDING'], _this_peer_stat['KEEP_ALIVES_MISSED']))
2014-05-15 23:21:54 -04:00
print('\t\t KeepAlives Received: {}, Last KeepAlive Received at: {}' .format(_this_peer_stat['KEEP_ALIVES_RECEIVED'], _this_peer_stat['KEEP_ALIVE_RX_TIME']))
2013-07-31 13:33:31 -04:00
print('')
2013-11-26 17:05:08 -05:00
# Gratuitous print-out of Master info.. Pretty much debug stuff.
#
def print_master(_network):
2014-05-08 09:39:41 -04:00
if NETWORK[_network]['LOCAL']['MASTER_PEER']:
print('DMRlink is the Master for %s' % _network)
else:
_master = NETWORK[_network]['MASTER']
print('Master for %s' % _network)
print('\tRADIO ID: {}' .format(int(h(_master['RADIO_ID']), 16)))
if _master['MODE_DECODE'] and REPORTS['PRINT_PEERS_INC_MODE']:
2014-05-08 09:39:41 -04:00
print('\t\tMode Values:')
for name, value in _master['MODE_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
if _master['FLAGS_DECODE'] and REPORTS['PRINT_PEERS_INC_FLAGS']:
2014-05-08 09:39:41 -04:00
print('\t\tService Flags:')
for name, value in _master['FLAGS_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
print('\t\tStatus: {}, KeepAlives Sent: {}, KeepAlives Outstanding: {}, KeepAlives Missed: {}' .format(_master['STATUS']['CONNECTED'], _master['STATUS']['KEEP_ALIVES_SENT'], _master['STATUS']['KEEP_ALIVES_OUTSTANDING'], _master['STATUS']['KEEP_ALIVES_MISSED']))
2014-05-15 23:21:54 -04:00
print('\t\t KeepAlives Received: {}, Last KeepAlive Received at: {}' .format(_master['STATUS']['KEEP_ALIVES_RECEIVED'], _master['STATUS']['KEEP_ALIVE_RX_TIME']))
2015-06-12 09:28:48 -04:00
# Timed loop used for reporting IPSC status
#
# REPORT BASED ON THE TYPE SELECTED IN THE MAIN CONFIG FILE
if REPORTS['REPORT_NETWORKS'] == 'PICKLE':
def reporting_loop():
logger.debug('Periodic Reporting Loop Started (PICKLE)')
try:
with open(REPORTS['REPORT_PATH']+'dmrlink_stats.pickle', 'wb') as file:
2015-06-30 12:50:51 -04:00
pickle_dump(NETWORK, file, 2)
2015-06-12 09:28:48 -04:00
file.close()
except IOError as detail:
logger.error('I/O Error: %s', detail)
2015-06-29 15:58:39 -04:00
2015-06-12 09:28:48 -04:00
elif REPORTS['REPORT_NETWORKS'] == 'JSON':
def reporting_loop():
logger.info('Periodic Reporting Loop Started (JSON)')
2015-06-29 15:58:39 -04:00
2015-06-12 09:28:48 -04:00
elif REPORTS['REPORT_NETWORKS'] == 'REDIS':
def reporting_loop():
logger.debug('Periodic Reporting Loop Started (REDIS)')
elif REPORTS['REPORT_NETWORKS'] == 'PRINT':
def reporting_loop():
logger.debug('Periodic Reporting Loop Started (PRINT)')
print_master(NETWORK)
print_peer_list(NETWORK)
else:
def reporting_loop():
logger.debug('Periodic Reporting Loop Started (NULL)')
2014-10-05 23:11:56 -04:00
2014-05-17 14:18:45 -04:00
# Shut ourselves down gracefully with the IPSC peers.
#
def handler(_signal, _frame):
logger.info('*** DMRLINK IS TERMINATING WITH SIGNAL %s ***', str(_signal))
for network in networks:
this_ipsc = networks[network]
logger.info('De-Registering from IPSC %s', network)
de_reg_req_pkt = this_ipsc.hashed_packet(this_ipsc._local['AUTH_KEY'], this_ipsc.DE_REG_REQ_PKT)
2015-04-10 01:41:44 -04:00
this_ipsc.send_to_ipsc(de_reg_req_pkt)
2014-05-17 14:18:45 -04:00
reactor.stop()
# Set signal handers so that we can gracefully exit if need be
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGQUIT]:
signal.signal(sig, handler)
#************************************************
#******** ***********
#******** IPSC Network 'Engine' ***********
#******** ***********
#************************************************
2013-07-11 20:45:09 -04:00
#************************************************
# Base Class (used nearly all of the time)
2013-07-11 20:45:09 -04:00
#************************************************
2013-06-27 17:15:54 -04:00
2013-06-27 17:15:54 -04:00
class IPSC(DatagramProtocol):
2014-05-14 15:18:33 -04:00
#************************************************
# IPSC INSTANCE INSTANTIATION
#************************************************
# Modify the initializer to set up our environment and build the packets
2013-11-26 17:05:08 -05:00
# we need to maintain connections
#
2013-06-27 17:15:54 -04:00
def __init__(self, *args, **kwargs):
if len(args) == 1:
# Housekeeping: create references to the configuration and status data for this IPSC instance.
# Some configuration objects that are used frequently and have lengthy names are shortened
# such as (self._master_sock) expands to (self._config['MASTER']['IP'], self._config['MASTER']['PORT']).
# Note that many of them reference each other... this is the Pythonic way.
#
self._network = args[0]
self._config = NETWORK[self._network]
#
self._local = self._config['LOCAL']
self._local_id = self._local['RADIO_ID']
#
self._master = self._config['MASTER']
self._master_stat = self._master['STATUS']
self._master_sock = self._master['IP'], self._master['PORT']
#
self._peers = self._config['PEERS']
#
# This is a regular list to store peers for the IPSC. At times, parsing a simple list is much less
# Spendy than iterating a list of dictionaries... Maybe I'll find a better way in the future. Also
# We have to know when we have a new peer list, so a variable to indicate we do (or don't)
#
2013-06-27 17:15:54 -04:00
args = ()
# Packet 'constructors' - builds the necessary control packets for this IPSC instance.
# This isn't really necessary for anything other than readability (reduction of code golf)
#
2014-05-12 22:18:04 -04:00
# General Items
self.TS_FLAGS = (self._local['MODE'] + self._local['FLAGS'])
#
# Peer Link Maintenance Packets
self.MASTER_REG_REQ_PKT = (MASTER_REG_REQ + self._local_id + self.TS_FLAGS + IPSC_VER)
self.MASTER_ALIVE_PKT = (MASTER_ALIVE_REQ + self._local_id + self.TS_FLAGS + IPSC_VER)
self.PEER_LIST_REQ_PKT = (PEER_LIST_REQ + self._local_id)
self.PEER_REG_REQ_PKT = (PEER_REG_REQ + self._local_id + IPSC_VER)
self.PEER_REG_REPLY_PKT = (PEER_REG_REPLY + self._local_id + IPSC_VER)
self.PEER_ALIVE_REQ_PKT = (PEER_ALIVE_REQ + self._local_id + self.TS_FLAGS)
self.PEER_ALIVE_REPLY_PKT = (PEER_ALIVE_REPLY + self._local_id + self.TS_FLAGS)
#
# Master Link Maintenance Packets
# self.MASTER_REG_REPLY_PKT is not static and must be generated when it is sent
2014-05-12 22:18:04 -04:00
self.MASTER_ALIVE_REPLY_PKT = (MASTER_ALIVE_REPLY + self._local_id + self.TS_FLAGS + IPSC_VER)
self.PEER_LIST_REPLY_PKT = (PEER_LIST_REPLY + self._local_id)
#
2014-05-17 14:18:45 -04:00
# General Link Maintenance Packets
self.DE_REG_REQ_PKT = (DE_REG_REQ + self._local_id)
self.DE_REG_REPLY_PKT = (DE_REG_REPLY + self._local_id)
#
2015-06-07 14:19:32 -04:00
logger.info('(%s) IPSC Instance Created: %s, %s:%s', self._network, int_id(self._local['RADIO_ID']), self._local['IP'], self._local['PORT'])
2013-06-27 17:15:54 -04:00
else:
# If we didn't get called correctly, log it!
#
logger.error('(%s) IPSC Instance Could Not be Created... Exiting', self._network)
sys.exit()
# Choose which set of fucntions to use - authenticated or not
if self._local['AUTH_ENABLED']:
self.hashed_packet = self.auth_hashed_packet
self.strip_hash = self.auth_strip_hash
self.validate_auth = self.auth_validate_auth
else:
self.hashed_packet = self.unauth_hashed_packet
self.strip_hash = self.unauth_strip_hash
self.validate_auth = self.unauth_validate_auth
#************************************************
# CALLBACK FUNCTIONS FOR USER PACKET TYPES
#************************************************
2014-05-18 16:28:27 -04:00
def call_mon_status(self, _network, _data):
logger.debug('(%s) Repeater Call Monitor Origin Packet Received: %s',_network, h(_data))
def call_mon_rpt(self, _network, _data):
logger.debug('(%s) Repeater Call Monitor Repeating Packet Received: %s', _network, h(_data))
def call_mon_nack(self, _network, _data):
logger.debug('(%s) Repeater Call Monitor NACK Packet Received: %s', _network, h(_data))
def xcmp_xnl(self, _network, _data):
logger.debug('(%s) XCMP/XNL Packet Received: %s', _network, h(_data))
2013-11-21 12:47:43 -05:00
def repeater_wake_up(self, _network, _data):
logger.debug('(%s) Repeater Wake-Up Packet Received: %s', _network, h(_data))
def group_voice(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
2014-09-05 15:08:23 -04:00
logger.debug('(%s) Group Voice Packet Received From: %s, IPSC Peer %s, Destination %s', _network, int_id(_src_sub), int_id(_peerid), int_id(_dst_sub))
def private_voice(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
2014-09-05 15:08:23 -04:00
logger.debug('(%s) Private Voice Packet Received From: %s, IPSC Peer %s, Destination %s', _network, int_id(_src_sub), int_id(_peerid), int_id(_dst_sub))
def group_data(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
2014-09-05 15:08:23 -04:00
logger.debug('(%s) Group Data Packet Received From: %s, IPSC Peer %s, Destination %s', _network, int_id(_src_sub), int_id(_peerid), int_id(_dst_sub))
def private_data(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
2014-09-05 15:08:23 -04:00
logger.debug('(%s) Private Data Packet Received From: %s, IPSC Peer %s, Destination %s', _network, int_id(_src_sub), int_id(_peerid), int_id(_dst_sub))
def unknown_message(self, _network, _packettype, _peerid, _data):
2014-09-05 16:57:08 -04:00
logger.error('(%s) Unknown Message - Type: %s From: %s Packet: %s', _network, h(_packettype), int_id(_peerid), h(_data))
2014-05-14 15:18:33 -04:00
#************************************************
# IPSC SPECIFIC MAINTENANCE FUNCTIONS
#************************************************
2014-09-05 14:57:47 -04:00
# Simple function to send packets - handy to have it all in one place for debugging
#
def send_packet(self, _packet, (_host, _port)):
self.transport.write(_packet, (_host, _port))
# USE THE FOLLOWING ONLY UNDER DIRE CIRCUMSTANCES -- PERFORMANCE IS ADVERSLY AFFECTED!
#logger.debug('(%s) TX Packet to %s on port %s: %s', self._network, _host, _port, h(_packet))
# Accept a complete packet, ready to be sent, and send it to all active peers + master in an IPSC
#
def send_to_ipsc(self, _packet):
# Send to the Master
if self._master['STATUS']['CONNECTED']:
self.send_packet(_packet, (self._master['IP'], self._master['PORT']))
# Send to each connected Peer
for peer in self._peers.keys():
if self._peers[peer]['STATUS']['CONNECTED']:
self.send_packet(_packet, (self._peers[peer]['IP'], self._peers[peer]['PORT']))
2014-09-05 14:57:47 -04:00
# FUNTIONS FOR IPSC MAINTENANCE ACTIVITIES WE RESPOND TO
2014-09-05 20:35:37 -04:00
# SOMEONE HAS SENT US A KEEP ALIVE - WE MUST ANSWER IT
2014-09-05 14:57:47 -04:00
def peer_alive_req(self, _data, _peerid, _host, _port):
_hex_mode = (_data[5])
_hex_flags = (_data[6:10])
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
self._peers[_peerid]['MODE'] = _hex_mode
self._peers[_peerid]['MODE_DECODE'] = _decoded_mode
self._peers[_peerid]['FLAGS'] = _hex_flags
self._peers[_peerid]['FLAGS_DECODE'] = _decoded_flags
# Generate a hashed packet from our template and send it.
peer_alive_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_ALIVE_REPLY_PKT)
self.send_packet(peer_alive_reply_packet, (_host, _port))
2014-09-05 14:57:47 -04:00
self.reset_keep_alive(_peerid) # Might as well reset our own counter, we know it's out there...
2015-06-07 14:19:32 -04:00
logger.debug('(%s) Keep-Alive reply sent to Peer %s, %s:%s', self._network, int_id(_peerid), _host, _port)
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# SOMEONE WANTS TO REGISTER WITH US - WE'RE COOL WITH THAT
2014-09-05 14:57:47 -04:00
def peer_reg_req(self, _peerid, _host, _port):
peer_reg_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_REG_REPLY_PKT)
self.send_packet(peer_reg_reply_packet, (_host, _port))
logger.info('(%s) Peer Registration Request From: %s, %s:%s', self._network, int_id(_peerid), _host, _port)
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# SOMEONE HAS ANSWERED OUR KEEP-ALIVE REQUEST - KEEP TRACK OF IT
2014-09-05 14:57:47 -04:00
def peer_alive_reply(self, _peerid):
self.reset_keep_alive(_peerid)
self._peers[_peerid]['STATUS']['KEEP_ALIVES_RECEIVED'] += 1
2015-06-12 09:28:48 -04:00
self._peers[_peerid]['STATUS']['KEEP_ALIVE_RX_TIME'] = int(time())
2015-06-07 14:19:32 -04:00
logger.debug('(%s) Keep-Alive Reply (we sent the request) Received from Peer %s, %s:%s', self._network, int_id(_peerid), self._peers[_peerid]['IP'], self._peers[_peerid]['PORT'])
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# SOMEONE HAS ANSWERED OUR REQEST TO REGISTER WITH THEM - KEEP TRACK OF IT
2014-09-05 14:57:47 -04:00
def peer_reg_reply(self, _peerid):
if _peerid in self._peers.keys():
self._peers[_peerid]['STATUS']['CONNECTED'] = True
2015-06-07 14:19:32 -04:00
logger.info('(%s) Registration Reply From: %s, %s:%s', self._network, int_id(_peerid), self._peers[_peerid]['IP'], self._peers[_peerid]['PORT'])
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# OUR MASTER HAS ANSWERED OUR KEEP-ALIVE REQUEST - KEEP TRACK OF IT
2014-09-05 14:57:47 -04:00
def master_alive_reply(self, _peerid):
self.reset_keep_alive(_peerid)
self._master['STATUS']['KEEP_ALIVES_RECEIVED'] += 1
2015-06-12 09:28:48 -04:00
self._master['STATUS']['KEEP_ALIVE_RX_TIME'] = int(time())
2015-06-07 14:19:32 -04:00
logger.debug('(%s) Keep-Alive Reply (we sent the request) Received from the Master %s, %s:%s', self._network, int_id(_peerid), self._master['IP'], self._master['PORT'])
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# OUR MASTER HAS SENT US A PEER LIST - PROCESS IT
2014-09-05 14:57:47 -04:00
def peer_list_reply(self, _data, _peerid):
NETWORK[self._network]['MASTER']['STATUS']['PEER_LIST'] = True
if len(_data) > 18:
process_peer_list(_data, self._network)
2015-06-07 14:19:32 -04:00
logger.debug('(%s) Peer List Reply Recieved From Master %s, %s:%s', self._network, int_id(_peerid), self._master['IP'], self._master['PORT'])
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# OUR MASTER HAS ANSWERED OUR REQUEST TO REGISTER - LOTS OF INFORMATION TO TRACK
2014-09-05 14:57:47 -04:00
def master_reg_reply(self, _data, _peerid):
_hex_mode = _data[5]
_hex_flags = _data[6:10]
_num_peers = _data[10:12]
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
self._local['NUM_PEERS'] = int(h(_num_peers), 16)
self._master['RADIO_ID'] = _peerid
self._master['MODE'] = _hex_mode
self._master['MODE_DECODE'] = _decoded_mode
self._master['FLAGS'] = _hex_flags
self._master['FLAGS_DECODE'] = _decoded_flags
self._master_stat['CONNECTED'] = True
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
2015-06-07 14:19:32 -04:00
logger.warning('(%s) Registration response (we requested reg) from the Master: %s, %s:%s (%s peers)', self._network, int_id(_peerid), self._master['IP'], self._master['PORT'], self._local['NUM_PEERS'])
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# WE ARE MASTER AND SOMEONE HAS REQUESTED REGISTRATION FROM US - ANSWER IT
2014-09-05 14:57:47 -04:00
def master_reg_req(self, _data, _peerid, _host, _port):
_ip_address = _host
_port = _port
_hex_mode = _data[5]
_hex_flags = _data[6:10]
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
self.MASTER_REG_REPLY_PKT = (MASTER_REG_REPLY + self._local_id + self.TS_FLAGS + hex_str_2(self._local['NUM_PEERS']) + IPSC_VER)
master_reg_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_REG_REPLY_PKT)
self.send_packet(master_reg_reply_packet, (_host, _port))
logger.info('(%s) Master Registration Packet Received from peer %s, %s:%s', self._network, int_id(_peerid), _host, _port)
2014-05-14 15:18:33 -04:00
2014-09-05 14:57:47 -04:00
# If this entry was NOT already in our list, add it.
if _peerid not in self._peers.keys():
self._peers[_peerid] = {
'IP': _ip_address,
'PORT': _port,
'MODE': _hex_mode,
'MODE_DECODE': _decoded_mode,
'FLAGS': _hex_flags,
'FLAGS_DECODE': _decoded_flags,
'STATUS': {
'CONNECTED': True,
'KEEP_ALIVES_SENT': 0,
'KEEP_ALIVES_MISSED': 0,
'KEEP_ALIVES_OUTSTANDING': 0,
'KEEP_ALIVES_RECEIVED': 0,
2015-06-12 09:28:48 -04:00
'KEEP_ALIVE_RX_TIME': int(time())
2014-09-05 14:57:47 -04:00
}
}
self._local['NUM_PEERS'] = len(self._peers)
logger.debug('(%s) Peer Added To Peer List: %s, %s:%s (IPSC now has %s Peers)', self._network, self._peers[_peerid], _host, _port, self._local['NUM_PEERS'])
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# WE ARE MASTER AND SOEMONE SENT US A KEEP-ALIVE - ANSWER IT, TRACK IT
2014-09-05 14:57:47 -04:00
def master_alive_req(self, _peerid, _host, _port):
if _peerid in self._peers.keys():
self._peers[_peerid]['STATUS']['KEEP_ALIVES_RECEIVED'] += 1
2015-06-12 09:28:48 -04:00
self._peers[_peerid]['STATUS']['KEEP_ALIVE_RX_TIME'] = int(time())
2014-09-05 14:57:47 -04:00
master_alive_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_ALIVE_REPLY_PKT)
self.send_packet(master_alive_reply_packet, (_host, _port))
2014-09-05 14:57:47 -04:00
logger.debug('(%s) Master Keep-Alive Request Received from peer %s, %s:%s', self._network, int_id(_peerid), _host, _port)
2014-09-05 14:57:47 -04:00
else:
logger.warning('(%s) Master Keep-Alive Request Received from *UNREGISTERED* peer %s, %s:%s', self._network, int_id(_peerid), _host, _port)
2014-09-05 14:57:47 -04:00
2014-09-05 20:35:37 -04:00
# WE ARE MASTER AND A PEER HAS REQUESTED A PEER LIST - SEND THEM ONE
2014-09-05 14:57:47 -04:00
def peer_list_req(self, _peerid):
if _peerid in self._peers.keys():
logger.debug('(%s) Peer List Request from peer %s', self._network, int_id(_peerid))
peer_list_packet = self.PEER_LIST_REPLY_PKT + build_peer_list(self._peers)
peer_list_packet = self.hashed_packet(self._local['AUTH_KEY'], peer_list_packet)
2014-12-20 10:56:15 -05:00
self.send_to_ipsc(peer_list_packet)
2014-09-05 14:57:47 -04:00
else:
logger.warning('(%s) Peer List Request Received from *UNREGISTERED* peer %s', self._network, int_id(_peerid))
# Reset the outstanding keep-alive counter for _peerid...
# Used when receiving acks OR when we see traffic from a repeater, since they ignore keep-alives when transmitting
#
def reset_keep_alive(self, _peerid):
if _peerid in self._peers.keys():
self._peers[_peerid]['STATUS']['KEEP_ALIVES_OUTSTANDING'] = 0
2015-06-12 09:28:48 -04:00
self._peers[_peerid]['STATUS']['KEEP_ALIVE_RX_TIME'] = int(time())
if _peerid == self._master['RADIO_ID']:
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
2014-09-05 20:35:37 -04:00
# THE NEXT SECTION DEFINES FUNCTIONS THAT MUST BE DIFFERENT FOR HASHED AND UNHASHED PACKETS
# HASHED MEANS AUTHENTICATED IPSC
# UNHASHED MEANS UNAUTHENTICATED IPSC
# NEXT THREE FUNCITONS ARE FOR AUTHENTICATED PACKETS
2013-11-26 17:05:08 -05:00
# Take a packet to be SENT, calculate auth hash and return the whole thing
#
def auth_hashed_packet(self, _key, _data):
_hash = binascii.a2b_hex((hmac_new(_key,_data,sha1)).hexdigest()[:20])
2013-11-26 17:05:08 -05:00
return _data + _hash
# Remove the hash from a packet and return the payload
#
def auth_strip_hash(self, _data):
return _data[:-10]
# Take a RECEIVED packet, calculate the auth hash and verify authenticity
#
def auth_validate_auth(self, _key, _data):
_payload = self.strip_hash(_data)
_hash = _data[-10:]
_chk_hash = binascii.a2b_hex((hmac_new(_key,_payload,sha1)).hexdigest()[:20])
if _chk_hash == _hash:
return True
else:
return False
# NEXT THREE FUNCITONS ARE FOR UN-AUTHENTICATED PACKETS
# There isn't a hash to build, so just return the data
#
def unauth_hashed_packet(self, _key, _data):
return _data
# Remove the hash from a packet and return the payload... except don't
#
def unauth_strip_hash(self, _data):
return _data
# Everything is validated, so just return True
#
def unauth_validate_auth(self, _key, _data):
return True
2014-05-14 15:18:33 -04:00
#************************************************
# TIMED LOOP - CONNECTION MAINTENANCE
#************************************************
# Timed loop initialization (called by the twisted reactor)
#
def startProtocol(self):
# Timed loops for:
# IPSC connection establishment and maintenance
# Reporting/Housekeeping
#
2014-09-05 20:35:37 -04:00
# IF WE'RE NOT THE MASTER...
2014-05-14 15:18:33 -04:00
if not self._local['MASTER_PEER']:
self._peer_maintenance = task.LoopingCall(self.peer_maintenance_loop)
self._peer_maintenance_loop = self._peer_maintenance.start(self._local['ALIVE_TIMER'])
#
2014-09-05 20:35:37 -04:00
# IF WE ARE THE MASTER...
if self._local['MASTER_PEER']:
self._master_maintenance = task.LoopingCall(self.master_maintenance_loop)
self._master_maintenance_loop = self._master_maintenance.start(self._local['ALIVE_TIMER'])
# Timed loop used for IPSC connection Maintenance when we are the MASTER
#
def master_maintenance_loop(self):
2014-05-15 23:21:54 -04:00
logger.debug('(%s) MASTER Connection Maintenance Loop Started', self._network)
2015-06-12 09:28:48 -04:00
update_time = int(time())
2014-05-15 23:21:54 -04:00
for peer in self._peers.keys():
keep_alive_delta = update_time - self._peers[peer]['STATUS']['KEEP_ALIVE_RX_TIME']
logger.debug('(%s) Time Since Last KeepAlive Request from Peer %s: %s seconds', self._network, int_id(peer), keep_alive_delta)
2014-05-16 10:02:45 -04:00
if keep_alive_delta > 120:
de_register_peer(self._network, peer)
2014-05-17 11:53:27 -04:00
peer_list_packet = self.PEER_LIST_REPLY_PKT + build_peer_list(self._peers)
peer_list_packet = self.hashed_packet(self._local['AUTH_KEY'], peer_list_packet)
2014-12-20 10:56:15 -05:00
self.send_to_ipsc(peer_list_packet)
logger.warning('(%s) Timeout Exceeded for Peer %s, De-registering', self._network, int_id(peer))
2014-05-16 10:02:45 -04:00
# Timed loop used for IPSC connection Maintenance when we are a PEER
2014-05-14 15:18:33 -04:00
#
def peer_maintenance_loop(self):
logger.debug('(%s) PEER Connection Maintenance Loop Started', self._network)
# If the master isn't connected, we have to do that before we can do anything else!
#
2013-11-26 17:05:08 -05:00
if not self._master_stat['CONNECTED']:
reg_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_REG_REQ_PKT)
self.send_packet(reg_packet, self._master_sock)
2015-06-07 14:19:32 -04:00
logger.info('(%s) Registering with the Master: %s:%s', self._network, self._master['IP'], self._master['PORT'])
# Once the master is connected, we have to send keep-alives.. and make sure we get them back
2013-11-26 17:05:08 -05:00
elif self._master_stat['CONNECTED']:
# Send keep-alive to the master
master_alive_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_ALIVE_PKT)
self.send_packet(master_alive_packet, self._master_sock)
2015-06-07 14:19:32 -04:00
logger.debug('(%s) Keep Alive Sent to the Master: %s, %s:%s', self._network, int_id(self._master['RADIO_ID']) ,self._master['IP'], self._master['PORT'])
# If we had a keep-alive outstanding by the time we send another, mark it missed.
if (self._master_stat['KEEP_ALIVES_OUTSTANDING']) > 0:
self._master_stat['KEEP_ALIVES_MISSED'] += 1
2015-06-07 14:19:32 -04:00
logger.info('(%s) Master Keep-Alive Missed: %s:%s', self._network, self._master['IP'], self._master['PORT'])
2013-11-26 17:05:08 -05:00
# If we have missed too many keep-alives, de-register the master and start over.
if self._master_stat['KEEP_ALIVES_OUTSTANDING'] >= self._local['MAX_MISSED']:
self._master_stat['CONNECTED'] = False
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
2015-06-07 14:19:32 -04:00
logger.error('(%s) Maximum Master Keep-Alives Missed -- De-registering the Master: %s:%s', self._network, self._master['IP'], self._master['PORT'])
# Update our stats before we move on...
2013-08-01 16:09:20 -04:00
self._master_stat['KEEP_ALIVES_SENT'] += 1
self._master_stat['KEEP_ALIVES_OUTSTANDING'] += 1
else:
# This is bad. If we get this message, we need to reset the state and try again
2015-06-07 14:19:32 -04:00
logger.error('->> (%s) Master in UNKOWN STATE: %s:%s', self._network, self._master_sock)
2013-11-26 17:05:08 -05:00
self._master_stat['CONNECTED'] = False
# If the master is connected and we don't have a peer-list yet....
#
2013-11-26 17:05:08 -05:00
if (self._master_stat['CONNECTED'] == True) and (self._master_stat['PEER_LIST'] == False):
# Ask the master for a peer-list
2014-05-14 15:18:33 -04:00
if self._local['NUM_PEERS']:
peer_list_req_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_LIST_REQ_PKT)
self.send_packet(peer_list_req_packet, self._master_sock)
2014-05-14 15:18:33 -04:00
logger.info('(%s), No Peer List - Requesting One From the Master', self._network)
else:
self._master_stat['PEER_LIST'] = True
logger.debug('(%s), Skip asking for a Peer List, we are the only Peer', self._network)
# If we do have a peer-list, we need to register with the peers and send keep-alives...
#
2013-11-26 17:05:08 -05:00
if self._master_stat['PEER_LIST']:
# Iterate the list of peers... so we do this for each one.
for peer in self._peers.keys():
# We will show up in the peer list, but shouldn't try to talk to ourselves.
if peer == self._local_id:
continue
# If we haven't registered to a peer, send a registration
if not self._peers[peer]['STATUS']['CONNECTED']:
peer_reg_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_REG_REQ_PKT)
self.send_packet(peer_reg_packet, (self._peers[peer]['IP'], self._peers[peer]['PORT']))
2015-06-07 14:19:32 -04:00
logger.info('(%s) Registering with Peer %s, %s:%s', self._network, int_id(peer), self._peers[peer]['IP'], self._peers[peer]['PORT'])
# If we have registered with the peer, then send a keep-alive
elif self._peers[peer]['STATUS']['CONNECTED']:
peer_alive_req_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_ALIVE_REQ_PKT)
self.send_packet(peer_alive_req_packet, (self._peers[peer]['IP'], self._peers[peer]['PORT']))
2015-06-07 14:19:32 -04:00
logger.debug('(%s) Keep-Alive Sent to the Peer %s, %s:%s', self._network, int_id(peer), self._peers[peer]['IP'], self._peers[peer]['PORT'])
# If we have a keep-alive outstanding by the time we send another, mark it missed.
if self._peers[peer]['STATUS']['KEEP_ALIVES_OUTSTANDING'] > 0:
self._peers[peer]['STATUS']['KEEP_ALIVES_MISSED'] += 1
2015-06-07 14:19:32 -04:00
logger.info('(%s) Peer Keep-Alive Missed for %s, %s:%s', self._network, int_id(peer), self._peers[peer]['IP'], self._peers[peer]['PORT'])
# If we have missed too many keep-alives, de-register the peer and start over.
if self._peers[peer]['STATUS']['KEEP_ALIVES_OUTSTANDING'] >= self._local['MAX_MISSED']:
self._peers[peer]['STATUS']['CONNECTED'] = False
#del peer # Becuase once it's out of the dictionary, you can't use it for anything else.
2015-06-07 14:19:32 -04:00
logger.warning('(%s) Maximum Peer Keep-Alives Missed -- De-registering the Peer: %s, %s:%s', self._network, int_id(peer), self._peers[peer]['IP'], self._peers[peer]['PORT'])
2013-08-01 16:09:20 -04:00
# Update our stats before moving on...
self._peers[peer]['STATUS']['KEEP_ALIVES_SENT'] += 1
self._peers[peer]['STATUS']['KEEP_ALIVES_OUTSTANDING'] += 1
2014-05-14 15:18:33 -04:00
#************************************************
# MESSAGE RECEIVED - TAKE ACTION
#************************************************
2013-11-26 17:05:08 -05:00
# Actions for received packets by type: For every packet received, there are some things that we need to do:
# Decode some of the info
# Check for auth and authenticate the packet
# Strip the hash from the end... we don't need it anymore
#
2013-11-26 17:05:08 -05:00
# Once they're done, we move on to the processing or callbacks for each packet type.
#
2014-05-14 15:18:33 -04:00
# Callbacks are iterated in the order of "more likely" to "less likely" to reduce processing time
#
2013-06-27 17:15:54 -04:00
def datagramReceived(self, data, (host, port)):
# Loop timing test, uncomment the next two lines. Use for testing only.
#_pkt_id = randint(0,10000)
2015-06-12 09:28:48 -04:00
#_pkt_time = time()
_packettype = data[0:1]
_peerid = data[1:5]
2014-08-13 18:14:46 -04:00
_ipsc_seq = data[5:6]
2014-05-14 15:18:33 -04:00
# AUTHENTICATE THE PACKET
2013-11-26 17:05:08 -05:00
if not self.validate_auth(self._local['AUTH_KEY'], data):
logger.warning('(%s) AuthError: IPSC packet failed authentication. Type %s: Peer: %s, %s:%s', self._network, h(_packettype), int_id(_peerid), host, port)
return
2014-05-14 15:18:33 -04:00
# REMOVE SHA-1 AUTHENTICATION HASH: WE NO LONGER NEED IT
data = self.strip_hash(data)
2014-05-14 15:18:33 -04:00
# PACKETS THAT WE RECEIVE FROM ANY VALID PEER OR VALID MASTER
2013-11-26 17:05:08 -05:00
if _packettype in ANY_PEER_REQUIRED:
2013-12-06 08:11:19 -05:00
if not(valid_master(self._network, _peerid) == False or valid_peer(self._peers.keys(), _peerid) == False):
logger.warning('(%s) PeerError: Peer not in peer-list: %s, %s:%s', self._network, int_id(_peerid), host, port)
2013-07-31 21:46:03 -04:00
return
2014-05-14 15:18:33 -04:00
# ORIGINATED BY SUBSCRIBER UNITS - a.k.a someone transmitted
2013-11-26 17:05:08 -05:00
if _packettype in USER_PACKETS:
2014-08-13 18:14:46 -04:00
# Extract IPSC header not already extracted
_src_sub = data[6:9]
_dst_sub = data[9:12]
2014-08-13 18:14:46 -04:00
_call_type = data[12:13]
_unknown_1 = data[13:17]
_call_info = int_id(data[17:18])
_ts = bool(_call_info & TS_CALL_MSK)
_end = bool(_call_info & END_MSK)
2015-03-24 12:06:42 -04:00
# Extract RTP Header Fields
2014-08-13 18:14:46 -04:00
'''
Coming soon kids!!!
Looks like version, padding, extention, CSIC, payload type and SSID never change.
The things we might care about are below.
_rtp_byte_1 = int_id(data[18:19])
_rtp_byte_2 = int_id(data[19:20])
_rtp_seq = int_id(data[20:22])
_rtp_tmstmp = int_id(data[22:26])
2015-03-24 12:06:42 -04:00
_rtp_ssid = int_id(data[26:30])
# Extract RTP Payload Data Fields
_payload_type = int_id(data[30:31])
2014-08-13 18:14:46 -04:00
'''
# User Voice and Data Call Types:
2013-11-26 17:05:08 -05:00
if _packettype == GROUP_VOICE:
self.reset_keep_alive(_peerid)
self.group_voice(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
# Loop timing test, uncomment the next two lines. Use for testing only.
2015-06-12 09:28:48 -04:00
#_pkt_proc_time = (time() - _pkt_time) * 1000
#logger.info('TIMING: Group voice packet ID %s took %s ms', _pkt_id, _pkt_proc_time)
return
2013-11-26 17:05:08 -05:00
elif _packettype == PVT_VOICE:
self.reset_keep_alive(_peerid)
self.private_voice(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
return
2013-11-26 17:05:08 -05:00
elif _packettype == GROUP_DATA:
self.reset_keep_alive(_peerid)
self.group_data(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
return
2013-11-26 17:05:08 -05:00
elif _packettype == PVT_DATA:
self.reset_keep_alive(_peerid)
self.private_data(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
return
return
2014-05-14 15:18:33 -04:00
# MOTOROLA XCMP/XNL CONTROL PROTOCOL: We don't process these (yet)
2013-11-26 17:05:08 -05:00
elif _packettype == XCMP_XNL:
self.xcmp_xnl(self._network, data)
return
2014-05-14 15:18:33 -04:00
# ORIGINATED BY PEERS, NOT IPSC MAINTENANCE: Call monitoring is all we've found here so far
2014-05-18 16:30:11 -04:00
elif _packettype == CALL_MON_STATUS:
self.call_mon_status(self._network, data)
return
2013-12-01 20:24:50 -05:00
elif _packettype == CALL_MON_RPT:
self.call_mon_rpt(self._network, data)
return
2013-12-01 20:24:50 -05:00
elif _packettype == CALL_MON_NACK:
self.call_mon_nack(self._network, data)
return
2014-05-14 15:18:33 -04:00
# IPSC CONNECTION MAINTENANCE MESSAGES
2013-11-26 17:05:08 -05:00
elif _packettype == DE_REG_REQ:
de_register_peer(self._network, _peerid)
logger.warning('(%s) Peer De-Registration Request From: %s, %s:%s', self._network, int_id(_peerid), host, port)
return
2013-11-26 17:05:08 -05:00
elif _packettype == DE_REG_REPLY:
logger.warning('(%s) Peer De-Registration Reply From: %s, %s:%s', self._network, int_id(_peerid), host, port)
return
2013-11-26 17:05:08 -05:00
elif _packettype == RPT_WAKE_UP:
self.repeater_wake_up(self._network, data)
logger.debug('(%s) Repeater Wake-Up Packet From: %s, %s:%s', self._network, int_id(_peerid), host, port)
return
return
2014-05-14 15:18:33 -04:00
#
# THE FOLLOWING PACKETS ARE RECEIVED ONLY IF WE ARE OPERATING AS A PEER
#
# ONLY ACCEPT FROM A PREVIOUSLY VALIDATED PEER
2013-11-26 17:05:08 -05:00
if _packettype in PEER_REQUIRED:
if not valid_peer(self._peers.keys(), _peerid):
logger.warning('(%s) PeerError: Peer not in peer-list: %s, %s:%s', self._network, int_id(_peerid), host, port)
return
2014-05-14 15:18:33 -04:00
# REQUESTS FROM PEERS: WE MUST REPLY IMMEDIATELY FOR IPSC MAINTENANCE
2013-11-26 17:05:08 -05:00
if _packettype == PEER_ALIVE_REQ:
2014-09-05 14:57:47 -04:00
self.peer_alive_req(data, _peerid, host, port)
return
2013-11-26 17:05:08 -05:00
elif _packettype == PEER_REG_REQ:
2014-09-05 14:57:47 -04:00
self.peer_reg_req(_peerid, host, port)
return
2014-05-14 15:18:33 -04:00
# ANSWERS FROM REQUESTS WE SENT TO PEERS: WE DO NOT REPLY
2013-11-26 17:05:08 -05:00
elif _packettype == PEER_ALIVE_REPLY:
2014-09-05 14:57:47 -04:00
self.peer_alive_reply(_peerid)
2013-11-11 15:38:27 -05:00
return
2013-11-26 17:05:08 -05:00
elif _packettype == PEER_REG_REPLY:
2014-09-05 14:57:47 -04:00
self.peer_reg_reply(_peerid)
return
return
2014-05-14 15:18:33 -04:00
2014-05-14 15:18:33 -04:00
# PACKETS ONLY ACCEPTED FROM OUR MASTER
2014-05-14 15:18:33 -04:00
# PACKETS WE ONLY ACCEPT IF WE HAVE FINISHED REGISTERING WITH OUR MASTER
2013-11-26 17:05:08 -05:00
if _packettype in MASTER_REQUIRED:
if not valid_master(self._network, _peerid):
logger.warning('(%s) MasterError: %s, %s:%s is not the master peer', self._network, int_id(_peerid), host, port)
return
2014-05-14 15:18:33 -04:00
# ANSWERS FROM REQUESTS WE SENT TO THE MASTER: WE DO NOT REPLY
2013-11-26 17:05:08 -05:00
if _packettype == MASTER_ALIVE_REPLY:
2014-09-05 14:57:47 -04:00
self.master_alive_reply(_peerid)
return
2013-11-26 17:05:08 -05:00
elif _packettype == PEER_LIST_REPLY:
2014-09-05 14:57:47 -04:00
self.peer_list_reply(data, _peerid)
return
return
2014-05-14 15:18:33 -04:00
# THIS MEANS WE HAVE SUCCESSFULLY REGISTERED TO OUR MASTER - RECORD MASTER INFORMATION
2013-11-26 17:05:08 -05:00
elif _packettype == MASTER_REG_REPLY:
2014-09-05 14:57:47 -04:00
self.master_reg_reply(data, _peerid)
return
2014-05-14 15:18:33 -04:00
# THE FOLLOWING PACKETS ARE RECEIVED ONLLY IF WE ARE OPERATING AS A MASTER
# REQUESTS FROM PEERS: WE MUST REPLY IMMEDIATELY FOR IPSC MAINTENANCE
# REQUEST TO REGISTER TO THE IPSC
2013-11-26 17:05:08 -05:00
elif _packettype == MASTER_REG_REQ:
2014-09-05 15:13:18 -04:00
self.master_reg_req(data, _peerid, host, port)
2014-05-12 22:18:04 -04:00
return
2014-05-14 15:18:33 -04:00
# REQUEST FOR A KEEP-ALIVE REPLY (WE KNOW THE PEER IS STILL ALIVE TOO)
2014-05-12 22:18:04 -04:00
elif _packettype == MASTER_ALIVE_REQ:
2014-09-05 15:10:51 -04:00
self.master_alive_req(_peerid, host, port)
2014-05-12 22:18:04 -04:00
return
2014-05-14 15:18:33 -04:00
# REQUEST FOR A PEER LIST
2014-05-12 22:18:04 -04:00
elif _packettype == PEER_LIST_REQ:
2014-09-05 15:10:51 -04:00
self.peer_list_req(_peerid)
2014-05-12 22:18:04 -04:00
return
2013-07-11 20:45:09 -04:00
2014-09-05 14:57:47 -04:00
2014-05-14 15:18:33 -04:00
# PACKET IS OF AN UNKNOWN TYPE. LOG IT AND IDENTTIFY IT!
2013-06-27 17:15:54 -04:00
else:
self.unknown_message(self._network, _packettype, _peerid, data)
return
2013-06-27 17:15:54 -04:00
2013-07-11 20:45:09 -04:00
#************************************************
# MAIN PROGRAM LOOP STARTS HERE
#************************************************
2013-06-27 17:15:54 -04:00
if __name__ == '__main__':
2015-03-24 12:06:42 -04:00
logger.info('DMRlink \'dmrlink.py\' (c) 2013 - 2015 N0MJS & the K0USY Group - SYSTEM STARTING...')
2014-05-17 14:18:45 -04:00
2015-06-12 09:28:48 -04:00
# INITIALIZE AN IPSC OBJECT (SELF SUSTAINING) FOR EACH CONFIGUED IPSC
networks = {}
2013-07-11 20:45:09 -04:00
for ipsc_network in NETWORK:
2013-11-26 17:05:08 -05:00
if NETWORK[ipsc_network]['LOCAL']['ENABLED']:
networks[ipsc_network] = IPSC(ipsc_network)
reactor.listenUDP(NETWORK[ipsc_network]['LOCAL']['PORT'], networks[ipsc_network], interface=NETWORK[ipsc_network]['LOCAL']['IP'])
2015-06-12 09:28:48 -04:00
# INITIALIZE THE REPORTING LOOP IF CONFIGURED
if REPORTS['REPORT_NETWORKS']:
reporting = task.LoopingCall(reporting_loop)
reporting.start(REPORTS['REPORT_INTERVAL'])
2015-06-30 21:36:14 -04:00
reactor.run()