DMRlink/dmrlink.py

1070 lines
46 KiB
Python
Raw Normal View History

#!/usr/bin/env python
#
2013-08-02 16:31:55 -04:00
# This work is licensed under the Creative Commons Attribution-ShareAlike
# 3.0 Unported License.To view a copy of this license, visit
# http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
# Creative Commons, 444 Castro Street, Suite 900, Mountain View,
# California, 94041, USA.
2014-01-03 16:03:41 -05:00
#NOTE: This program uses a configuration file specified on the command line
# if none is specified, then dmrlink.cfg in the same directory as this
# file will be tried. Finally, if that does not exist, this process
# will terminate
2013-06-27 17:15:54 -04:00
from __future__ import print_function
import ConfigParser
import argparse
2013-07-11 20:45:09 -04:00
import sys
2013-06-27 17:15:54 -04:00
import binascii
2013-10-13 18:01:50 -04:00
import csv
import os
2013-12-15 10:45:39 -05:00
import logging
2013-07-11 20:45:09 -04:00
2013-12-15 10:45:39 -05:00
from logging.config import dictConfig
from hmac import new as hmac_new
from binascii import b2a_hex as h
from hashlib import sha1
from socket import inet_ntoa as IPAddr
2013-11-26 17:05:08 -05:00
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.internet import task
2014-01-02 12:16:23 -05:00
__author__ = 'Cortney T. Buffington, N0MJS'
2014-04-28 23:07:34 -04:00
__copyright__ = 'Copyright (c) 2013, 2014 Cortney T. Buffington, N0MJS and the K0USY Group'
2014-01-02 12:16:23 -05:00
__credits__ = 'Adam Fast, KC0YLK, Dave K, and he who wishes not to be named'
__license__ = 'Creative Commons Attribution-ShareAlike 3.0 Unported'
__version__ = '0.2a'
2014-01-02 12:16:23 -05:00
__maintainer__ = 'Cort Buffington, N0MJS'
__email__ = 'n0mjs@me.com'
__status__ = 'Production'
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', action='store', dest='CFG_FILE', help='/full/path/to/config.file (usually dmrlink.cfg)')
cli_args = parser.parse_args()
#************************************************
# PARSE THE CONFIG FILE AND BUILD STRUCTURE
#************************************************
NETWORK = {}
2013-12-12 19:20:21 -05:00
networks = {}
config = ConfigParser.ConfigParser()
2013-11-26 17:05:08 -05:00
if not cli_args.CFG_FILE:
cli_args.CFG_FILE = os.path.dirname(os.path.abspath(__file__))+'/dmrlink.cfg'
2013-11-26 17:05:08 -05:00
try:
if not config.read(cli_args.CFG_FILE):
sys.exit('Configuration file \''+cli_args.CFG_FILE+'\' is not a valid configuration file! Exiting...')
except:
sys.exit('Configuration file \''+cli_args.CFG_FILE+'\' is not a valid configuration file! Exiting...')
try:
for section in config.sections():
if section == 'GLOBAL':
2014-01-02 12:16:23 -05:00
# Process GLOBAL items in the configuration
2013-12-22 17:16:10 -05:00
PATH = config.get(section, 'PATH')
2013-12-15 10:45:39 -05:00
elif section == 'REPORTS':
2014-01-02 12:16:23 -05:00
# Process REPORTS items in the configuration
2013-12-15 10:45:39 -05:00
REPORTS = {
'REPORT_PEERS': config.getboolean(section, 'REPORT_PEERS'),
'PEER_REPORT_INC_MODE': config.getboolean(section, 'PEER_REPORT_INC_MODE'),
'PEER_REPORT_INC_FLAGS': config.getboolean(section, 'PEER_REPORT_INC_FLAGS')
}
elif section == 'LOGGER':
2014-01-02 12:16:23 -05:00
# Process LOGGER items in the configuration
2013-12-15 10:45:39 -05:00
LOGGER = {
'LOG_FILE': config.get(section, 'LOG_FILE'),
'LOG_HANDLERS': config.get(section, 'LOG_HANDLERS'),
'LOG_LEVEL': config.get(section, 'LOG_LEVEL')
}
else:
2014-01-02 12:16:23 -05:00
# All other sections define indiviual IPSC Networks we connect to
# Each IPSC network config will contain the following three sections
NETWORK.update({section: {'LOCAL': {}, 'MASTER': {}, 'PEERS': {}}})
2014-01-02 12:16:23 -05:00
# LOCAL means we need to know this stuff to be a peer in the network
NETWORK[section]['LOCAL'].update({
2014-01-02 12:16:23 -05:00
# In case we want to keep config, but not actually connect to the network
'ENABLED': config.getboolean(section, 'ENABLED'),
# These items are used to create the MODE byte
'PEER_OPER': config.getboolean(section, 'PEER_OPER'),
'IPSC_MODE': config.get(section, 'IPSC_MODE'),
'TS1_LINK': config.getboolean(section, 'TS1_LINK'),
'TS2_LINK': config.getboolean(section, 'TS2_LINK'),
'MODE': '',
2014-01-02 12:16:23 -05:00
# These items are used to create the multi-byte FLAGS field
'AUTH_ENABLED': config.getboolean(section, 'AUTH_ENABLED'),
'CSBK_CALL': config.getboolean(section, 'CSBK_CALL'),
'RCM': config.getboolean(section, 'RCM'),
'CON_APP': config.getboolean(section, 'CON_APP'),
'XNL_CALL': config.getboolean(section, 'XNL_CALL'),
'XNL_MASTER': config.getboolean(section, 'XNL_MASTER'),
'DATA_CALL': config.getboolean(section, 'DATA_CALL'),
'VOICE_CALL': config.getboolean(section, 'VOICE_CALL'),
'MASTER_PEER': config.getboolean(section, 'MASTER_PEER'),
'FLAGS': '',
2014-01-02 12:16:23 -05:00
# Things we need to know to connect and be a peer in this IPSC
'RADIO_ID': hex(int(config.get(section, 'RADIO_ID')))[2:].rjust(8,'0').decode('hex'),
'PORT': config.getint(section, 'PORT'),
'ALIVE_TIMER': config.getint(section, 'ALIVE_TIMER'),
'MAX_MISSED': config.getint(section, 'MAX_MISSED'),
'AUTH_KEY': (config.get(section, 'AUTH_KEY').rjust(40,'0')).decode('hex'),
'NUM_PEERS': 0,
})
2014-01-02 12:16:23 -05:00
# Master means things we need to know about the master peer of the network
NETWORK[section]['MASTER'].update({
'RADIO_ID': '\x00\x00\x00\x00',
'MODE': '\x00',
2013-12-12 17:12:36 -05:00
'MODE_DECODE': '',
'FLAGS': '\x00\x00\x00\x00',
2013-12-12 17:12:36 -05:00
'FLAGS_DECODE': '',
'STATUS': {
'CONNECTED': False,
'PEER_LIST': False,
'KEEP_ALIVES_SENT': 0,
'KEEP_ALIVES_MISSED': 0,
'KEEP_ALIVES_OUTSTANDING': 0
},
'IP': config.get(section, 'MASTER_IP'),
'PORT': config.getint(section, 'MASTER_PORT')
})
2014-01-02 12:16:23 -05:00
# Temporary locations for building MODE and FLAG data
MODE_BYTE = 0
FLAG_1 = 0
FLAG_2 = 0
# Construct and store the MODE field
if NETWORK[section]['LOCAL']['PEER_OPER']:
MODE_BYTE |= 1 << 6
if NETWORK[section]['LOCAL']['IPSC_MODE'] == 'ANALOG':
MODE_BYTE |= 1 << 4
elif NETWORK[section]['LOCAL']['IPSC_MODE'] == 'DIGITAL':
MODE_BYTE |= 1 << 5
if NETWORK[section]['LOCAL']['TS1_LINK']:
MODE_BYTE |= 1 << 3
else:
2014-01-02 12:16:23 -05:00
MODE_BYTE |= 1 << 2
if NETWORK[section]['LOCAL']['TS2_LINK']:
MODE_BYTE |= 1 << 1
else:
2014-01-02 12:16:23 -05:00
MODE_BYTE |= 1 << 0
NETWORK[section]['LOCAL']['MODE'] = chr(MODE_BYTE)
# Construct and store the FLAGS field
if NETWORK[section]['LOCAL']['CSBK_CALL']:
FLAG_1 |= 1 << 7
if NETWORK[section]['LOCAL']['RCM']:
FLAG_1 |= 1 << 6
if NETWORK[section]['LOCAL']['CON_APP']:
FLAG_1 |= 1 << 5
if NETWORK[section]['LOCAL']['XNL_CALL']:
FLAG_2 |= 1 << 7
if NETWORK[section]['LOCAL']['XNL_CALL'] and NETWORK[section]['LOCAL']['XNL_MASTER']:
FLAG_2 |= 1 << 6
elif NETWORK[section]['LOCAL']['XNL_CALL'] and not NETWORK[section]['LOCAL']['XNL_MASTER']:
FLAG_2 |= 1 << 5
if NETWORK[section]['LOCAL']['AUTH_ENABLED']:
FLAG_2 |= 1 << 4
if NETWORK[section]['LOCAL']['DATA_CALL']:
FLAG_2 |= 1 << 3
if NETWORK[section]['LOCAL']['VOICE_CALL']:
FLAG_2 |= 1 << 2
if NETWORK[section]['LOCAL']['MASTER_PEER']:
FLAG_2 |= 1 << 0
NETWORK[section]['LOCAL']['FLAGS'] = '\x00\x00'+chr(FLAG_1)+chr(FLAG_2)
except:
sys.exit('Could not parse configuration file, exiting...')
2013-12-15 10:45:39 -05:00
#************************************************
# CONFIGURE THE SYSTEM LOGGER
#************************************************
dictConfig({
'version': 1,
'disable_existing_loggers': False,
'filters': {
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'timed': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'console-timed': {
'class': 'logging.StreamHandler',
'formatter': 'timed'
},
'file': {
'class': 'logging.FileHandler',
'formatter': 'simple',
'filename': LOGGER['LOG_FILE'],
},
'file-timed': {
'class': 'logging.FileHandler',
'formatter': 'timed',
'filename': LOGGER['LOG_FILE'],
},
'syslog': {
'class': 'logging.handlers.SysLogHandler',
'formatter': 'verbose',
}
},
'loggers': {
'dmrlink': {
'handlers': LOGGER['LOG_HANDLERS'].split(','),
'level': LOGGER['LOG_LEVEL'],
'propagate': True,
}
}
})
logger = logging.getLogger('dmrlink')
#************************************************
# IMPORTING OTHER FILES - '#include'
#************************************************
# Import IPSC message types and version information
#
try:
from ipsc.ipsc_message_types import *
except ImportError:
sys.exit('IPSC message types file not found or invalid')
# Import IPSC flag mask values
#
try:
from ipsc.ipsc_mask import *
except ImportError:
sys.exit('IPSC mask values file not found or invalid')
# Import the Alias files for numeric ids. This is split to save
# time making lookups in one huge dictionary
#
curdir= os.path.dirname(__file__)
subscriber_ids = {}
peer_ids = {}
talkgroup_ids = {}
try:
2013-12-22 17:16:10 -05:00
with open(PATH+'subscriber_ids.csv', 'rU') as subscriber_ids_csv:
2013-12-15 10:45:39 -05:00
subscribers = csv.reader(subscriber_ids_csv, dialect='excel', delimiter=',')
for row in subscribers:
subscriber_ids[int(row[1])] = (row[0])
except ImportError:
logger.warning('subscriber_ids.csv not found: Subscriber aliases will not be available')
try:
2013-12-22 17:16:10 -05:00
with open(PATH+'peer_ids.csv', 'rU') as peer_ids_csv:
2013-12-15 10:45:39 -05:00
peers = csv.reader(peer_ids_csv, dialect='excel', delimiter=',')
for row in peers:
peer_ids[int(row[1])] = (row[0])
except ImportError:
logger.warning('peer_ids.csv not found: Peer aliases will not be available')
try:
2013-12-22 17:16:10 -05:00
with open(PATH+'talkgroup_ids.csv', 'rU') as talkgroup_ids_csv:
2013-12-15 10:45:39 -05:00
talkgroups = csv.reader(talkgroup_ids_csv, dialect='excel', delimiter=',')
for row in talkgroups:
talkgroup_ids[int(row[1])] = (row[0])
except ImportError:
logger.warning('talkgroup_ids.csv not found: Talkgroup aliases will not be available')
2013-11-26 17:05:08 -05:00
#************************************************
# UTILITY FUNCTIONS FOR INTERNAL USE
#************************************************
# Create a 3 byte TGID or UID from an integer
#
def hex_id(_int_id):
return hex(_int_id)[2:].rjust(6,'0').decode('hex')
# Convert a hex string to an int (radio ID, etc.)
#
def int_id(_hex_string):
return int(h(_hex_string), 16)
# Re-Write Source Radio-ID (DMR NAT)
#
2013-11-21 19:34:06 -05:00
def dmr_nat(_data, _src_id, _nat_id):
_data = _data.replace(_src_id, _nat_id)
return _data
# Lookup text data for numeric IDs
#
def get_info(_id, _dict):
if _id in _dict:
2014-04-29 23:00:38 -04:00
return _dict[_id]
return _id
2013-07-29 22:12:12 -04:00
# Determine if the provided peer ID is valid for the provided network
#
def valid_peer(_peer_list, _peerid):
if _peerid in _peer_list:
return True
return False
2013-07-29 14:38:59 -04:00
2013-07-29 22:12:12 -04:00
# Determine if the provided master ID is valid for the provided network
#
def valid_master(_network, _peerid):
if NETWORK[_network]['MASTER']['RADIO_ID'] == _peerid:
return True
else:
return False
# Accept a complete packet, ready to be sent, and send it to all active peers + master in an IPSC
#
def send_to_ipsc(_target, _packet):
_network = NETWORK[_target]
_network_instance = networks[_target]
_peers = _network['PEERS']
# Send to the Master
2014-05-08 09:39:41 -04:00
if _network['MASTER']['STATUS']['CONNECTED']:
_network_instance.transport.write(_packet, (_network['MASTER']['IP'], _network['MASTER']['PORT']))
# Send to each connected Peer
for peer in _peers.keys():
2013-11-26 17:05:08 -05:00
if _peers[peer]['STATUS']['CONNECTED']:
_network_instance.transport.write(_packet, (_peers[peer]['IP'], _peers[peer]['PORT']))
2013-11-26 17:05:08 -05:00
# De-register a peer from an IPSC by removing it's information
#
def de_register_peer(_network, _peerid):
# Iterate for the peer in our data
2013-11-26 17:05:08 -05:00
if _peerid in NETWORK[_network]['PEERS'].keys():
del NETWORK[_network]['PEERS'][_peerid]
logger.info('(%s) Peer De-Registration Requested for: %s', _network, h(_peerid))
return
else:
logger.warning('(%s) Peer De-Registration Requested for: %s, but we don\'t have a listing for this peer', _network, h(_peerid))
pass
2013-12-11 21:56:39 -05:00
# Process the MODE byte in registration/peer list packets for determining master and peer capabilities
#
def process_mode_byte(_hex_mode):
2013-12-12 08:50:47 -05:00
_mode = int(h(_hex_mode), 16)
# Determine whether or not the peer is operational
2013-12-12 08:42:56 -05:00
_peer_op = bool(_mode & PEER_OP_MSK)
# Determine whether or not timeslot 1 is linked
_ts1 = bool(_mode & IPSC_TS1_MSK)
# Determine whether or not timeslot 2 is linked
_ts2 = bool(_mode & IPSC_TS2_MSK)
# Determine the operational mode of the peer
if _mode & PEER_MODE_MSK == PEER_MODE_MSK:
_peer_mode = 'UNKNOWN'
elif not _mode & PEER_MODE_MSK:
_peer_mode = 'NO_RADIO'
elif _mode & PEER_MODE_ANALOG:
_peer_mode = 'ANALOG'
elif _mode & PEER_MODE_DIGITAL:
_peer_mode = 'DIGITAL'
2013-12-12 08:42:56 -05:00
return {
'PEER_OP': _peer_op,
'PEER_MODE': _peer_mode,
'TS_1': _ts1,
'TS_2': _ts2
}
2013-12-11 21:56:39 -05:00
# Process the FLAGS bytes in registration replies for determining what services are available
#
def process_flags_bytes(_hex_flags):
2013-12-12 08:42:56 -05:00
_byte3 = int(h(_hex_flags[2]), 16)
_byte4 = int(h(_hex_flags[3]), 16)
_csbk = bool(_byte3 & CSBK_MSK)
_rpt_mon = bool(_byte3 & RPT_MON_MSK)
_con_app = bool(_byte3 & CON_APP_MSK)
_xnl_con = bool(_byte4 & XNL_STAT_MSK)
_xnl_master = bool(_byte4 & XNL_MSTR_MSK)
_xnl_slave = bool(_byte4 & XNL_SLAVE_MSK)
_auth = bool(_byte4 & PKT_AUTH_MSK)
_data = bool(_byte4 & DATA_CALL_MSK)
_voice = bool(_byte4 & VOICE_CALL_MSK)
_master = bool(_byte4 & MSTR_PEER_MSK)
return {
2013-12-12 17:12:36 -05:00
'CSBK': _csbk,
'RCM': _rpt_mon,
'CON_APP': _con_app,
'XNL_CON': _xnl_con,
'XNL_MASTER': _xnl_master,
'XNL_SLAVE': _xnl_slave,
'AUTH': _auth,
'DATA': _data,
'VOICE': _voice,
'MASTER': _master
2013-12-12 08:42:56 -05:00
}
2013-11-26 17:05:08 -05:00
# Take a received peer list and the network it belongs to, process and populate the
# data structure in my_ipsc_config with the results, and return a simple list of peers.
2013-07-29 14:38:59 -04:00
#
def process_peer_list(_data, _network):
# Create a temporary peer list to track who we should have in our list -- used to find old peers we should remove.
_temp_peers = []
# Determine the length of the peer list for the parsing iterator
_peer_list_length = int(h(_data[5:7]), 16)
# Record the number of peers in the data structure... we'll use it later (11 bytes per peer entry)
NETWORK[_network]['LOCAL']['NUM_PEERS'] = _peer_list_length/11
2014-05-08 09:29:57 -04:00
logger.info('(%s) Peer List Received from Master: %s peers in this IPSC', _network, NETWORK[_network]['LOCAL']['NUM_PEERS'])
# Iterate each peer entry in the peer list. Skip the header, then pull the next peer, the next, etc.
2013-11-26 17:05:08 -05:00
for i in range(7, _peer_list_length +7, 11):
# Extract various elements from each entry...
_hex_radio_id = (_data[i:i+4])
_hex_address = (_data[i+4:i+8])
_ip_address = IPAddr(_hex_address)
_hex_port = (_data[i+8:i+10])
_port = int(h(_hex_port), 16)
_hex_mode = (_data[i+10:i+11])
# Add this peer to a temporary PeerID list - used to remove any old peers no longer with us
_temp_peers.append(_hex_radio_id)
# This is done elsewhere for the master too, so we use a separate function
_decoded_mode = process_mode_byte(_hex_mode)
# If this entry was NOT already in our list, add it.
if _hex_radio_id not in NETWORK[_network]['PEERS'].keys():
NETWORK[_network]['PEERS'][_hex_radio_id] = {
'IP': _ip_address,
'PORT': _port,
'MODE': _hex_mode,
'MODE_DECODE': _decoded_mode,
'FLAGS': '',
'FLAGS_DECODE': '',
'STATUS': {
'CONNECTED': False,
'KEEP_ALIVES_SENT': 0,
'KEEP_ALIVES_MISSED': 0,
'KEEP_ALIVES_OUTSTANDING': 0
}
}
logger.debug('(%s) Peer Added: %s', _network, NETWORK[_network]['PEERS'][_hex_radio_id])
# Finally, check to see if there's a peer already in our list that was not in this peer list
# and if so, delete it.
for peerid in NETWORK[_network]['PEERS'].keys():
if peerid not in _temp_peers:
de_register_peer(_network, peerid)
2014-01-02 12:16:23 -05:00
logger.warning('(%s) Peer Deleted (not in new peer list): %s', _network, h(peerid))
2013-11-26 17:05:08 -05:00
# Gratuitous print-out of the peer list.. Pretty much debug stuff.
#
def print_peer_list(_network):
_peers = NETWORK[_network]['PEERS']
_status = NETWORK[_network]['MASTER']['STATUS']['PEER_LIST']
#print('Peer List Status for {}: {}' .format(_network, _status))
if _status and not NETWORK[_network]['PEERS']:
print('We are the only peer for: %s' % _network)
print('')
return
2013-07-31 13:33:31 -04:00
print('Peer List for: %s' % _network)
for peer in _peers.keys():
_this_peer = _peers[peer]
_this_peer_stat = _this_peer['STATUS']
if peer == NETWORK[_network]['LOCAL']['RADIO_ID']:
me = '(self)'
else:
me = ''
2013-12-11 15:50:03 -05:00
print('\tRADIO ID: {} {}' .format(int(h(peer), 16), me))
print('\t\tIP Address: {}:{}' .format(_this_peer['IP'], _this_peer['PORT']))
2013-12-15 10:45:39 -05:00
if _this_peer['MODE_DECODE'] and REPORTS['PEER_REPORT_INC_MODE']:
2013-12-12 17:12:36 -05:00
print('\t\tMode Values:')
for name, value in _this_peer['MODE_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
2013-12-15 10:45:39 -05:00
if _this_peer['FLAGS_DECODE'] and REPORTS['PEER_REPORT_INC_FLAGS']:
2013-12-12 17:12:36 -05:00
print('\t\tService Flags:')
for name, value in _this_peer['FLAGS_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
print('\t\tStatus: {}, KeepAlives Sent: {}, KeepAlives Outstanding: {}, KeepAlives Missed: {}' .format(_this_peer_stat['CONNECTED'], _this_peer_stat['KEEP_ALIVES_SENT'], _this_peer_stat['KEEP_ALIVES_OUTSTANDING'], _this_peer_stat['KEEP_ALIVES_MISSED']))
2013-07-31 13:33:31 -04:00
print('')
2013-11-26 17:05:08 -05:00
# Gratuitous print-out of Master info.. Pretty much debug stuff.
#
def print_master(_network):
2014-05-08 09:39:41 -04:00
if NETWORK[_network]['LOCAL']['MASTER_PEER']:
print('DMRlink is the Master for %s' % _network)
else:
_master = NETWORK[_network]['MASTER']
print('Master for %s' % _network)
print('\tRADIO ID: {}' .format(int(h(_master['RADIO_ID']), 16)))
if _master['MODE_DECODE'] and REPORTS['PEER_REPORT_INC_MODE']:
print('\t\tMode Values:')
for name, value in _master['MODE_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
if _master['FLAGS_DECODE'] and REPORTS['PEER_REPORT_INC_FLAGS']:
print('\t\tService Flags:')
for name, value in _master['FLAGS_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
print('\t\tStatus: {}, KeepAlives Sent: {}, KeepAlives Outstanding: {}, KeepAlives Missed: {}' .format(_master['STATUS']['CONNECTED'], _master['STATUS']['KEEP_ALIVES_SENT'], _master['STATUS']['KEEP_ALIVES_OUTSTANDING'], _master['STATUS']['KEEP_ALIVES_MISSED']))
2013-07-11 20:45:09 -04:00
#************************************************
#******** ***********
#******** IPSC Network 'Engine' ***********
#******** ***********
#************************************************
2013-07-11 20:45:09 -04:00
#************************************************
# Base Class (used nearly all of the time)
2013-07-11 20:45:09 -04:00
#************************************************
2013-06-27 17:15:54 -04:00
2013-06-27 17:15:54 -04:00
class IPSC(DatagramProtocol):
# Modify the initializer to set up our environment and build the packets
2013-11-26 17:05:08 -05:00
# we need to maintain connections
#
2013-06-27 17:15:54 -04:00
def __init__(self, *args, **kwargs):
if len(args) == 1:
# Housekeeping: create references to the configuration and status data for this IPSC instance.
# Some configuration objects that are used frequently and have lengthy names are shortened
# such as (self._master_sock) expands to (self._config['MASTER']['IP'], self._config['MASTER']['PORT']).
# Note that many of them reference each other... this is the Pythonic way.
#
self._network = args[0]
self._config = NETWORK[self._network]
#
self._local = self._config['LOCAL']
self._local_id = self._local['RADIO_ID']
#
self._master = self._config['MASTER']
self._master_stat = self._master['STATUS']
self._master_sock = self._master['IP'], self._master['PORT']
#
self._peers = self._config['PEERS']
#
# This is a regular list to store peers for the IPSC. At times, parsing a simple list is much less
# Spendy than iterating a list of dictionaries... Maybe I'll find a better way in the future. Also
# We have to know when we have a new peer list, so a variable to indicate we do (or don't)
#
2013-06-27 17:15:54 -04:00
args = ()
# Packet 'constructors' - builds the necessary control packets for this IPSC instance.
# This isn't really necessary for anything other than readability (reduction of code golf)
#
2014-05-12 22:18:04 -04:00
# General Items
self.TS_FLAGS = (self._local['MODE'] + self._local['FLAGS'])
#
# Peer Link Maintenance Packets
self.MASTER_REG_REQ_PKT = (MASTER_REG_REQ + self._local_id + self.TS_FLAGS + IPSC_VER)
self.MASTER_ALIVE_PKT = (MASTER_ALIVE_REQ + self._local_id + self.TS_FLAGS + IPSC_VER)
self.PEER_LIST_REQ_PKT = (PEER_LIST_REQ + self._local_id)
self.PEER_REG_REQ_PKT = (PEER_REG_REQ + self._local_id + IPSC_VER)
self.PEER_REG_REPLY_PKT = (PEER_REG_REPLY + self._local_id + IPSC_VER)
self.PEER_ALIVE_REQ_PKT = (PEER_ALIVE_REQ + self._local_id + self.TS_FLAGS)
self.PEER_ALIVE_REPLY_PKT = (PEER_ALIVE_REPLY + self._local_id + self.TS_FLAGS)
#
# Master Link Maintenance Packets
self.MASTER_REG_REPLY_PKT = (MASTER_REG_REPLY + self._local_id + self.TS_FLAGS + str(self._local['NUM_PEERS']) + IPSC_VER)
self.MASTER_ALIVE_REPLY_PKT = (MASTER_ALIVE_REPLY + self._local_id + self.TS_FLAGS + IPSC_VER)
self.PEER_LIST_REPLY_PKT = (PEER_LIST_REPLY + self._local_id)
#
logger.info('(%s) IPSC Instance Created', self._network)
2013-06-27 17:15:54 -04:00
else:
# If we didn't get called correctly, log it!
#
logger.error('(%s) IPSC Instance Could Not be Created... Exiting', self._network)
sys.exit()
# Choose which set of fucntions to use - authenticated or not
if self._local['AUTH_ENABLED']:
self.hashed_packet = self.auth_hashed_packet
self.strip_hash = self.auth_strip_hash
self.validate_auth = self.auth_validate_auth
else:
self.hashed_packet = self.unauth_hashed_packet
self.strip_hash = self.unauth_strip_hash
self.validate_auth = self.unauth_validate_auth
# This is called by REACTOR when it starts, We use it to set up the timed
# loop for each instance of the IPSC engine
#
2013-06-27 17:15:54 -04:00
def startProtocol(self):
# Timed loops for:
# IPSC connection establishment and maintenance
# Reporting/Housekeeping
#
2014-05-07 21:03:03 -04:00
if not self._local['MASTER_PEER']:
self._maintenance = task.LoopingCall(self.maintenance_loop)
self._maintenance_loop = self._maintenance.start(self._local['ALIVE_TIMER'])
#
self._reporting = task.LoopingCall(self.reporting_loop)
self._reporting_loop = self._reporting.start(10)
2013-06-27 17:15:54 -04:00
2013-11-26 17:05:08 -05:00
#************************************************
# CALLBACK FUNCTIONS FOR USER PACKET TYPES
#************************************************
def call_mon_origin(self, _network, _data):
logger.debug('(%s) Repeater Call Monitor Origin Packet Received: %s',_network, h(_data))
def call_mon_rpt(self, _network, _data):
logger.debug('(%s) Repeater Call Monitor Repeating Packet Received: %s', _network, h(_data))
def call_mon_nack(self, _network, _data):
logger.debug('(%s) Repeater Call Monitor NACK Packet Received: %s', _network, h(_data))
def xcmp_xnl(self, _network, _data):
logger.debug('(%s) XCMP/XNL Packet Received: %s', _network, h(_data))
2013-11-21 12:47:43 -05:00
def repeater_wake_up(self, _network, _data):
logger.debug('(%s) Repeater Wake-Up Packet Received: %s', _network, h(_data))
def group_voice(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
logger.debug('(%s) Group Voice Packet Received From: %s, IPSC Peer %s, Destination %s', _network, _src_sub, _peerid, _dst_sub)
def private_voice(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
logger.debug('(%s) Private Voice Packet Received From: %s, IPSC Peer %s, Destination %s', _network, _src_sub, _peerid, _dst_sub)
def group_data(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
logger.debug('(%s) Group Data Packet Received From: %s, IPSC Peer %s, Destination %s', _network, _src_sub, _peerid, _dst_sub)
def private_data(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
logger.debug('(%s) Private Data Packet Received From: %s, IPSC Peer %s, Destination %s', _network, _src_sub, _peerid, _dst_sub)
def unknown_message(self, _network, _packettype, _peerid, _data):
_packettype = h(_packettype)
logger.error('(%s) Unknown message type encountered\n\tPacket Type: %s\n\tFrom: %s\n\tPacket: %s', _network, _packettype, _peerid, h(_data))
# Reset the outstanding keep-alive counter for _peerid...
# Used when receiving acks OR when we see traffic from a repeater, since they ignore keep-alives when transmitting
#
def reset_keep_alive(self, _peerid):
if _peerid in self._peers.keys():
self._peers[_peerid]['STATUS']['KEEP_ALIVES_OUTSTANDING'] = 0
if _peerid == self._master['RADIO_ID']:
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
#
# NEXT THREE FUNCITONS ARE FOR AUTHENTICATED PACKETS
#
2013-11-26 17:05:08 -05:00
# Take a packet to be SENT, calculate auth hash and return the whole thing
#
def auth_hashed_packet(self, _key, _data):
_hash = binascii.a2b_hex((hmac_new(_key,_data,sha1)).hexdigest()[:20])
2013-11-26 17:05:08 -05:00
return _data + _hash
# Remove the hash from a packet and return the payload
#
def auth_strip_hash(self, _data):
return _data[:-10]
# Take a RECEIVED packet, calculate the auth hash and verify authenticity
#
def auth_validate_auth(self, _key, _data):
_payload = self.strip_hash(_data)
_hash = _data[-10:]
_chk_hash = binascii.a2b_hex((hmac_new(_key,_payload,sha1)).hexdigest()[:20])
if _chk_hash == _hash:
return True
else:
return False
#
# NEXT THREE FUNCITONS ARE FOR UN-AUTHENTICATED PACKETS
#
# There isn't a hash to build, so just return the data
#
def unauth_hashed_packet(self, _key, _data):
return _data
# Remove the hash from a packet and return the payload... except don't
#
def unauth_strip_hash(self, _data):
return _data
# Everything is validated, so just return True
#
def unauth_validate_auth(self, _key, _data):
return True
#************************************************
# TIMED LOOP - MY CONNECTION MAINTENANCE
#************************************************
def reporting_loop(self):
2013-11-26 17:05:08 -05:00
# Right now, without this, we really don't know anything is happening.
logger.debug('(%s) Periodic Reporting Loop Started', self._network)
2013-12-15 10:45:39 -05:00
if REPORTS['REPORT_PEERS']:
print_master(self._network)
print_peer_list(self._network)
def maintenance_loop(self):
logger.debug('(%s) Periodic Connection Maintenance Loop Started', self._network)
# If the master isn't connected, we have to do that before we can do anything else!
#
2013-11-26 17:05:08 -05:00
if not self._master_stat['CONNECTED']:
reg_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_REG_REQ_PKT)
2013-11-26 17:05:08 -05:00
self.transport.write(reg_packet, self._master_sock)
2013-12-03 11:47:08 -05:00
logger.info('(%s) Registering with the Master', self._network)
# Once the master is connected, we have to send keep-alives.. and make sure we get them back
2013-11-26 17:05:08 -05:00
elif self._master_stat['CONNECTED']:
# Send keep-alive to the master
master_alive_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_ALIVE_PKT)
2013-11-26 17:05:08 -05:00
self.transport.write(master_alive_packet, self._master_sock)
2014-01-21 11:40:52 -05:00
logger.debug('(%s) Keep Alive Sent to the Master', self._network)
# If we had a keep-alive outstanding by the time we send another, mark it missed.
if (self._master_stat['KEEP_ALIVES_OUTSTANDING']) > 0:
self._master_stat['KEEP_ALIVES_MISSED'] += 1
2013-11-10 22:43:55 -05:00
logger.info('(%s) Master Keep-Alive Missed', self._network)
2013-11-26 17:05:08 -05:00
# If we have missed too many keep-alives, de-register the master and start over.
if self._master_stat['KEEP_ALIVES_OUTSTANDING'] >= self._local['MAX_MISSED']:
self._master_stat['CONNECTED'] = False
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
2013-11-10 22:43:55 -05:00
logger.error('(%s) Maximum Master Keep-Alives Missed -- De-registering the Master', self._network)
# Update our stats before we move on...
2013-08-01 16:09:20 -04:00
self._master_stat['KEEP_ALIVES_SENT'] += 1
self._master_stat['KEEP_ALIVES_OUTSTANDING'] += 1
else:
# This is bad. If we get this message, we need to reset the state and try again
logger.error('->> (%s) Master in UNKOWN STATE:%s:%s', self._network, self._master_sock)
2013-11-26 17:05:08 -05:00
self._master_stat['CONNECTED'] = False
# If the master is connected and we don't have a peer-list yet....
#
2013-11-26 17:05:08 -05:00
if (self._master_stat['CONNECTED'] == True) and (self._master_stat['PEER_LIST'] == False):
# Ask the master for a peer-list
peer_list_req_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_LIST_REQ_PKT)
2013-11-26 17:05:08 -05:00
self.transport.write(peer_list_req_packet, self._master_sock)
2013-11-10 22:43:55 -05:00
logger.info('(%s), No Peer List - Requesting One From the Master', self._network)
# If we do have a peer-list, we need to register with the peers and send keep-alives...
#
2013-11-26 17:05:08 -05:00
if self._master_stat['PEER_LIST']:
# Iterate the list of peers... so we do this for each one.
for peer_id in self._peers.keys():
peer = self._peers[peer_id]
# We will show up in the peer list, but shouldn't try to talk to ourselves.
if peer_id == self._local_id:
continue
# If we haven't registered to a peer, send a registration
2013-11-26 17:05:08 -05:00
if not peer['STATUS']['CONNECTED']:
peer_reg_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_REG_REQ_PKT)
self.transport.write(peer_reg_packet, (peer['IP'], peer['PORT']))
logger.info('(%s) Registering with Peer %s', self._network, int_id(peer_id))
# If we have registered with the peer, then send a keep-alive
2013-11-26 17:05:08 -05:00
elif peer['STATUS']['CONNECTED']:
peer_alive_req_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_ALIVE_REQ_PKT)
self.transport.write(peer_alive_req_packet, (peer['IP'], peer['PORT']))
2014-01-21 11:40:52 -05:00
logger.debug('(%s) Keep-Alive Sent to the Peer %s', self._network, int_id(peer_id))
# If we have a keep-alive outstanding by the time we send another, mark it missed.
if peer['STATUS']['KEEP_ALIVES_OUTSTANDING'] > 0:
peer['STATUS']['KEEP_ALIVES_MISSED'] += 1
logger.info('(%s) Peer Keep-Alive Missed for %s', self._network, int_id(peer_id))
# If we have missed too many keep-alives, de-register the peer and start over.
if peer['STATUS']['KEEP_ALIVES_OUTSTANDING'] >= self._local['MAX_MISSED']:
peer['STATUS']['CONNECTED'] = False
#del peer # Becuase once it's out of the dictionary, you can't use it for anything else.
logger.warning('(%s) Maximum Peer Keep-Alives Missed -- De-registering the Peer: %s', self._network, int_id(peer_id))
2013-08-01 16:09:20 -04:00
# Update our stats before moving on...
2013-08-01 16:09:20 -04:00
peer['STATUS']['KEEP_ALIVES_SENT'] += 1
peer['STATUS']['KEEP_ALIVES_OUTSTANDING'] += 1
# For public display of information, etc. - anything not part of internal logging/diagnostics
#
def _notify_event(self, network, event, info):
"""
Used internally whenever an event happens that may be useful to notify the outside world about.
Arguments:
network: string, network name to look up in config
event: string, basic description
info: dict, in the interest of accomplishing as much as possible without code changes.
The dict will typically contain a peer_id so the origin of the event is known.
"""
pass
#************************************************
# RECEIVED DATAGRAM - ACT IMMEDIATELY!!!
#************************************************
2013-11-26 17:05:08 -05:00
# Actions for received packets by type: For every packet received, there are some things that we need to do:
# Decode some of the info
# Check for auth and authenticate the packet
# Strip the hash from the end... we don't need it anymore
#
2013-11-26 17:05:08 -05:00
# Once they're done, we move on to the processing or callbacks for each packet type.
#
2013-06-27 17:15:54 -04:00
def datagramReceived(self, data, (host, port)):
_packettype = data[0:1]
_peerid = data[1:5]
# Authenticate the packet
2013-11-26 17:05:08 -05:00
if not self.validate_auth(self._local['AUTH_KEY'], data):
2014-05-07 21:03:03 -04:00
logger.warning('(%s) AuthError: IPSC packet failed authentication. Type %s: Peer ID: %s', self._network, h(_packettype), int_id(_peerid))
return
# Strip the hash, we won't need it anymore
data = self.strip_hash(data)
# Packets types that must be originated from a peer (including master peer)
2013-11-26 17:05:08 -05:00
if _packettype in ANY_PEER_REQUIRED:
2013-12-06 08:11:19 -05:00
if not(valid_master(self._network, _peerid) == False or valid_peer(self._peers.keys(), _peerid) == False):
2014-05-07 21:03:03 -04:00
logger.warning('(%s) PeerError: Peer not in peer-list: %s', self._network, int_id(_peerid))
2013-07-31 21:46:03 -04:00
return
2013-11-26 17:05:08 -05:00
# User, as in "subscriber" generated packets - a.k.a someone transmitted
if _packettype in USER_PACKETS:
# Extract commonly used items from the packet header
_src_sub = data[6:9]
_dst_sub = data[9:12]
_call = int_id(data[17:18])
_ts = bool(_call & TS_CALL_MSK)
_end = bool(_call & END_MSK)
# User Voice and Data Call Types:
2013-11-26 17:05:08 -05:00
if _packettype == GROUP_VOICE:
self.reset_keep_alive(_peerid)
self.group_voice(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
2014-05-07 21:03:03 -04:00
self._notify_event(self._network, 'group_voice', {'peer_id': int_id(_peerid)})
return
2013-11-26 17:05:08 -05:00
elif _packettype == PVT_VOICE:
self.reset_keep_alive(_peerid)
self.private_voice(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
2014-05-07 21:03:03 -04:00
self._notify_event(self._network, 'private_voice', {'peer_id': int_id(_peerid)})
return
2013-11-26 17:05:08 -05:00
elif _packettype == GROUP_DATA:
self.reset_keep_alive(_peerid)
self.group_data(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
2014-05-07 21:03:03 -04:00
self._notify_event(self._network, 'group_data', {'peer_id': int_id(_peerid)})
return
2013-11-26 17:05:08 -05:00
elif _packettype == PVT_DATA:
self.reset_keep_alive(_peerid)
self.private_data(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
2014-05-07 21:03:03 -04:00
self._notify_event(self._network, 'private_voice', {'peer_id': int_id(_peerid)})
return
return
# Other peer-required types that we don't do much or anything with yet
2013-11-26 17:05:08 -05:00
elif _packettype == XCMP_XNL:
self.xcmp_xnl(self._network, data)
return
2013-12-01 20:24:50 -05:00
elif _packettype == CALL_MON_ORIGIN:
self.call_mon_origin(self._network, data)
return
2013-12-01 20:24:50 -05:00
elif _packettype == CALL_MON_RPT:
self.call_mon_rpt(self._network, data)
return
2013-12-01 20:24:50 -05:00
elif _packettype == CALL_MON_NACK:
self.call_mon_nack(self._network, data)
return
# Connection maintenance packets that fall into this category
2013-11-26 17:05:08 -05:00
elif _packettype == DE_REG_REQ:
de_register_peer(self._network, _peerid)
2014-05-07 21:03:03 -04:00
logger.warning('(%s) Peer De-Registration Request From: %s', self._network, int_id(_peerid))
return
2013-11-26 17:05:08 -05:00
elif _packettype == DE_REG_REPLY:
2014-05-07 21:03:03 -04:00
logger.warning('(%s) Peer De-Registration Reply From: %s', self._network, int_id(_peerid))
return
2013-11-26 17:05:08 -05:00
elif _packettype == RPT_WAKE_UP:
self.repeater_wake_up(self._network, data)
2014-05-07 21:03:03 -04:00
logger.debug('(%s) Repeater Wake-Up Packet From: %s', self._network, int_id(_peerid))
return
return
2013-07-31 21:46:03 -04:00
# Packets types that must be originated from a peer
2013-11-26 17:05:08 -05:00
if _packettype in PEER_REQUIRED:
if not valid_peer(self._peers.keys(), _peerid):
2014-05-07 21:03:03 -04:00
logger.warning('(%s) PeerError: Peer %s not in peer-list', self._network, int_id(_peerid))
return
# Packets we send...
2013-11-26 17:05:08 -05:00
if _packettype == PEER_ALIVE_REQ:
2013-12-12 17:12:36 -05:00
_hex_mode = (data[5])
_hex_flags = (data[6:10])
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
self._peers[_peerid]['MODE'] = _hex_mode
self._peers[_peerid]['MODE_DECODE'] = _decoded_mode
self._peers[_peerid]['FLAGS'] = _hex_flags
self._peers[_peerid]['FLAGS_DECODE'] = _decoded_flags
2013-11-26 17:05:08 -05:00
# Generate a hashed packet from our template and send it.
peer_alive_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_ALIVE_REPLY_PKT)
self.transport.write(peer_alive_reply_packet, (host, port))
2013-12-12 18:03:00 -05:00
self.reset_keep_alive(_peerid) # Might as well reset our own counter, we know it's out there...
2014-05-07 21:03:03 -04:00
logger.debug('(%s) Keep-Alive reply sent to Peer %s', self._network, int_id(_peerid))
return
2013-11-26 17:05:08 -05:00
elif _packettype == PEER_REG_REQ:
peer_reg_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_REG_REPLY_PKT)
self.transport.write(peer_reg_reply_packet, (host, port))
2014-05-07 21:03:03 -04:00
logger.info('(%s) Peer Registration Request From: %s', self._network, int_id(_peerid))
return
# Packets we receive...
2013-11-26 17:05:08 -05:00
elif _packettype == PEER_ALIVE_REPLY:
self.reset_keep_alive(_peerid)
2014-05-07 21:03:03 -04:00
logger.debug('(%s) Keep-Alive Reply (we sent the request) Received from Peer %s', self._network, int_id(_peerid))
2013-11-11 15:38:27 -05:00
return
2013-11-26 17:05:08 -05:00
elif _packettype == PEER_REG_REPLY:
if _peerid in self._peers.keys():
self._peers[_peerid]['STATUS']['CONNECTED'] = True
2014-05-07 21:03:03 -04:00
logger.info('(%s) Registration Reply From: %s', self._network, int_id(_peerid))
return
return
# Packets types that must be originated from a Master
# Packets we receive...
2013-11-26 17:05:08 -05:00
if _packettype in MASTER_REQUIRED:
if not valid_master(self._network, _peerid):
2014-05-07 21:03:03 -04:00
logger.warning('(%s) MasterError: %s is not the master peer', self._network, int_id(_peerid))
return
2013-11-26 17:05:08 -05:00
if _packettype == MASTER_ALIVE_REPLY:
self.reset_keep_alive(_peerid)
2014-05-07 21:03:03 -04:00
logger.debug('(%s) Keep-Alive Reply (we sent the request) Received from the Master %s', self._network, int_id(_peerid))
return
2013-11-26 17:05:08 -05:00
elif _packettype == PEER_LIST_REPLY:
NETWORK[self._network]['MASTER']['STATUS']['PEER_LIST'] = True
if len(data) > 18:
process_peer_list(data, self._network)
2014-05-07 21:03:03 -04:00
logger.debug('(%s) Peer List Reply Recieved From Master %s', self._network, int_id(_peerid))
return
return
2013-11-26 17:05:08 -05:00
# When we hear from the master, record it's ID, flag that we're connected, and reset the dead counter.
elif _packettype == MASTER_REG_REPLY:
2013-12-11 20:09:35 -05:00
2014-05-13 12:18:44 -04:00
_hex_mode = data[5]
_hex_flags = data[6:10]
_num_peers = data[10:12]
2013-12-12 17:12:36 -05:00
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
2014-05-13 12:18:44 -04:00
self._local['NUM_PEERS'] = int(h(_num_peers), 16)
self._master['RADIO_ID'] = _peerid
self._master['MODE'] = _hex_mode
2013-12-12 17:12:36 -05:00
self._master['MODE_DECODE'] = _decoded_mode
self._master['FLAGS'] = _hex_flags
self._master['FLAGS_DECODE'] = _decoded_flags
self._master_stat['CONNECTED'] = True
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
2014-05-13 12:18:44 -04:00
logger.warning('(%s) Registration response (we requested reg) from the Master %s (%s peers)', self._network, int_id(_peerid), self._local['NUM_PEERS'])
return
2014-05-07 21:03:03 -04:00
# This is if we operate as a master... work in progress
2013-11-26 17:05:08 -05:00
elif _packettype == MASTER_REG_REQ:
2014-05-12 22:18:04 -04:00
2014-05-13 12:18:44 -04:00
_hex_mode = data[5]
_hex_flags = data[6:10]
2014-05-12 22:18:04 -04:00
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
''' BELOW STOLEN FROM MASTER_REG_REPLY - MUST PARSE TO SEE IF THE PEER EXISTS AND IF NOT, THEN
WE NEED TO ADD IT TO THE PEER LIST WE KEEP IN OUR DATA STRUCTURE... WORK STARTS HERE
self._master['RADIO_ID'] = _peerid
self._master['MODE'] = _hex_mode
self._master['MODE_DECODE'] = _decoded_mode
self._master['FLAGS'] = _hex_flags
self._master['FLAGS_DECODE'] = _decoded_flags
self._master_stat['CONNECTED'] = True
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
'''
master_reg_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_REG_REPLY_PKT)
self.transport.write(master_reg_reply_packet, (host, port))
2014-05-07 21:03:03 -04:00
logger.debug('(%s) Master Registration Packet Received from peer %s', self._network, int_id(_peerid))
2014-05-12 22:18:04 -04:00
return
elif _packettype == MASTER_ALIVE_REQ:
master_alive_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_ALIVE_REPLY_PKT)
self.transport.write(master_alive_reply_packet, (host, port))
logger.debug('(%s) Master Keep-Alive Request Received from peer %s', self._network, int_id(_peerid))
return
elif _packettype == PEER_LIST_REQ:
logger.debug('(%s) Peer List Received from peer %s', self._network, int_id(_peerid))
return
2013-07-11 20:45:09 -04:00
2013-11-26 17:05:08 -05:00
# If there's a packet type we don't know about, it should be logged so we can figure it out and take an appropriate action!
2013-06-27 17:15:54 -04:00
else:
self.unknown_message(self._network, _packettype, _peerid, data)
return
2013-06-27 17:15:54 -04:00
2013-07-11 20:45:09 -04:00
#************************************************
# MAIN PROGRAM LOOP STARTS HERE
#************************************************
2013-06-27 17:15:54 -04:00
if __name__ == '__main__':
logger.info('DMRlink \'dmrlink.py\' (c) 2013, 2014 N0MJS & the K0USY Group - SYSTEM STARTING...')
networks = {}
2013-07-11 20:45:09 -04:00
for ipsc_network in NETWORK:
2013-11-26 17:05:08 -05:00
if NETWORK[ipsc_network]['LOCAL']['ENABLED']:
networks[ipsc_network] = IPSC(ipsc_network)
reactor.listenUDP(NETWORK[ipsc_network]['LOCAL']['PORT'], networks[ipsc_network])
2014-01-02 12:16:23 -05:00
reactor.run()