2013-10-29 09:17:50 -04:00
|
|
|
# Copyright (c) 2013 Cortney T. Buffington, N0MJS and the K0USY Group. n0mjs@me.com
|
2013-08-02 16:31:55 -04:00
|
|
|
#
|
|
|
|
# This work is licensed under the Creative Commons Attribution-ShareAlike
|
|
|
|
# 3.0 Unported License.To view a copy of this license, visit
|
|
|
|
# http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
|
|
|
|
# Creative Commons, 444 Castro Street, Suite 900, Mountain View,
|
|
|
|
# California, 94041, USA.
|
|
|
|
|
2013-06-27 17:15:54 -04:00
|
|
|
from __future__ import print_function
|
|
|
|
from twisted.internet.protocol import DatagramProtocol
|
|
|
|
from twisted.internet import reactor
|
|
|
|
from twisted.internet import task
|
2013-11-13 17:19:32 -05:00
|
|
|
from binascii import b2a_hex as h
|
2013-10-12 12:08:06 -04:00
|
|
|
import ConfigParser
|
|
|
|
import os
|
2013-07-11 20:45:09 -04:00
|
|
|
import sys
|
2013-06-27 19:17:52 -04:00
|
|
|
import argparse
|
2013-06-27 17:15:54 -04:00
|
|
|
import binascii
|
|
|
|
import hmac
|
|
|
|
import hashlib
|
2013-07-15 13:04:48 -04:00
|
|
|
import socket
|
2013-10-13 18:01:50 -04:00
|
|
|
import csv
|
2013-10-21 11:40:29 -04:00
|
|
|
import re
|
2013-07-11 20:45:09 -04:00
|
|
|
|
|
|
|
#************************************************
|
|
|
|
# IMPORTING OTHER FILES - '#include'
|
|
|
|
#************************************************
|
|
|
|
|
|
|
|
# Import system logger configuration
|
2013-07-20 09:28:52 -04:00
|
|
|
#
|
2013-07-11 20:45:09 -04:00
|
|
|
try:
|
2013-09-10 16:28:18 -04:00
|
|
|
from ipsc.ipsc_logger import logger
|
2013-07-11 20:45:09 -04:00
|
|
|
except ImportError:
|
2013-10-10 00:07:38 -04:00
|
|
|
sys.exit('System logger configuration not found or invalid')
|
2013-07-11 20:45:09 -04:00
|
|
|
|
|
|
|
# Import IPSC message types and version information
|
2013-07-20 09:28:52 -04:00
|
|
|
#
|
2013-07-11 20:45:09 -04:00
|
|
|
try:
|
2013-09-10 16:28:18 -04:00
|
|
|
from ipsc.ipsc_message_types import *
|
2013-07-11 20:45:09 -04:00
|
|
|
except ImportError:
|
|
|
|
sys.exit('IPSC message types file not found or invalid')
|
|
|
|
|
|
|
|
# Import IPSC flag mask values
|
2013-07-20 09:28:52 -04:00
|
|
|
#
|
2013-07-11 20:45:09 -04:00
|
|
|
try:
|
2013-09-10 16:28:18 -04:00
|
|
|
from ipsc.ipsc_mask import *
|
2013-07-11 20:45:09 -04:00
|
|
|
except ImportError:
|
|
|
|
sys.exit('IPSC mask values file not found or invalid')
|
|
|
|
|
2013-10-30 12:47:30 -04:00
|
|
|
# Import the Alias files for numeric ids. This is split to save
|
|
|
|
# time making lookukps in one huge dictionary
|
|
|
|
#
|
|
|
|
subscriber_ids = {}
|
|
|
|
peer_ids = {}
|
|
|
|
talkgroup_ids = {}
|
|
|
|
|
2013-10-13 18:01:50 -04:00
|
|
|
try:
|
2013-10-30 12:47:30 -04:00
|
|
|
with open('./subscriber_ids.csv', 'rU') as subscriber_ids_csv:
|
|
|
|
subscribers = csv.reader(subscriber_ids_csv, dialect='excel', delimiter=',')
|
|
|
|
for row in subscribers:
|
|
|
|
subscriber_ids[int(row[1])] = (row[0])
|
2013-10-13 18:01:50 -04:00
|
|
|
except ImportError:
|
2013-11-09 12:33:52 -05:00
|
|
|
logger.warning('subscriber_ids.csv not found: Subscriber aliases will not be avaiale')
|
2013-10-13 18:01:50 -04:00
|
|
|
|
2013-10-30 12:47:30 -04:00
|
|
|
try:
|
|
|
|
with open('./peer_ids.csv', 'rU') as peer_ids_csv:
|
|
|
|
peers = csv.reader(peer_ids_csv, dialect='excel', delimiter=',')
|
|
|
|
for row in peers:
|
|
|
|
peer_ids[int(row[1])] = (row[0])
|
|
|
|
except ImportError:
|
2013-11-09 12:33:52 -05:00
|
|
|
logger.warning('peer_ids.csv not found: Peer aliases will not be avaiale')
|
2013-10-30 12:47:30 -04:00
|
|
|
|
|
|
|
try:
|
|
|
|
with open('./talkgroup_ids.csv', 'rU') as talkgroup_ids_csv:
|
|
|
|
talkgroups = csv.reader(talkgroup_ids_csv, dialect='excel', delimiter=',')
|
|
|
|
for row in talkgroups:
|
|
|
|
talkgroup_ids[int(row[1])] = (row[0])
|
|
|
|
except ImportError:
|
2013-11-09 12:33:52 -05:00
|
|
|
logger.warning('talkgroup_ids.csv not found: Talkgroup aliases will not be avaiale')
|
2013-10-30 12:47:30 -04:00
|
|
|
|
2013-10-13 18:01:50 -04:00
|
|
|
|
2013-10-12 12:08:06 -04:00
|
|
|
#************************************************
|
|
|
|
# PARSE THE CONFIG FILE AND BUILD STRUCTURE
|
|
|
|
#************************************************
|
|
|
|
|
2013-10-28 21:34:29 -04:00
|
|
|
networks = {}
|
2013-10-12 12:08:06 -04:00
|
|
|
NETWORK = {}
|
|
|
|
|
|
|
|
config = ConfigParser.ConfigParser()
|
|
|
|
config.read('./dmrlink.cfg')
|
|
|
|
|
2013-11-09 12:33:52 -05:00
|
|
|
try:
|
|
|
|
for section in config.sections():
|
|
|
|
if section == 'GLOBAL':
|
|
|
|
pass
|
2013-10-28 21:34:29 -04:00
|
|
|
else:
|
2013-11-13 17:19:32 -05:00
|
|
|
NETWORK.update({section: {'LOCAL': {}, 'MASTER': {}, 'PEERS': {}}})
|
2013-11-09 12:33:52 -05:00
|
|
|
NETWORK[section]['LOCAL'].update({
|
|
|
|
'MODE': '',
|
|
|
|
'PEER_OPER': True,
|
|
|
|
'PEER_MODE': 'DIGITAL',
|
|
|
|
'FLAGS': '',
|
|
|
|
'MAX_MISSED': 10,
|
|
|
|
'NUM_PEERS': 0,
|
|
|
|
'STATUS': {
|
|
|
|
'ACTIVE': False
|
|
|
|
},
|
|
|
|
'ENABLED': config.getboolean(section, 'ENABLED'),
|
|
|
|
'TS1_LINK': config.getboolean(section, 'TS1_LINK'),
|
|
|
|
'TS2_LINK': config.getboolean(section, 'TS2_LINK'),
|
|
|
|
'AUTH_ENABLED': config.getboolean(section, 'AUTH_ENABLED'),
|
|
|
|
'RADIO_ID': hex(int(config.get(section, 'RADIO_ID')))[2:].rjust(8,'0').decode('hex'),
|
|
|
|
'PORT': config.getint(section, 'PORT'),
|
|
|
|
'ALIVE_TIMER': config.getint(section, 'ALIVE_TIMER'),
|
|
|
|
'AUTH_KEY': (config.get(section, 'AUTH_KEY').rjust(40,'0')).decode('hex'),
|
|
|
|
})
|
|
|
|
NETWORK[section]['MASTER'].update({
|
|
|
|
'RADIO_ID': '\x00\x00\x00\x00',
|
|
|
|
'MODE': '\x00',
|
|
|
|
'PEER_OPER': False,
|
|
|
|
'PEER_MODE': '',
|
|
|
|
'TS1_LINK': False,
|
|
|
|
'TS2_LINK': False,
|
|
|
|
'FLAGS': '\x00\x00\x00\x00',
|
|
|
|
'STATUS': {
|
|
|
|
'CONNECTED': False,
|
|
|
|
'PEER_LIST': False,
|
|
|
|
'KEEP_ALIVES_SENT': 0,
|
|
|
|
'KEEP_ALIVES_MISSED': 0,
|
|
|
|
'KEEP_ALIVES_OUTSTANDING': 0
|
|
|
|
},
|
|
|
|
'IP': config.get(section, 'MASTER_IP'),
|
|
|
|
'PORT': config.getint(section, 'MASTER_PORT')
|
|
|
|
})
|
|
|
|
|
|
|
|
if NETWORK[section]['LOCAL']['AUTH_ENABLED']:
|
|
|
|
# 0x1C - Voice and Data calls only, 0xDC - Voice, Data and XCMP/XNL
|
|
|
|
NETWORK[section]['LOCAL']['FLAGS'] = '\x00\x00\x00\x1C'
|
|
|
|
#NETWORK[section]['LOCAL']['FLAGS'] = '\x00\x00\x00\xDC'
|
|
|
|
else:
|
|
|
|
NETWORK[section]['LOCAL']['FLAGS'] = '\x00\x00\x00\x0C'
|
2013-09-10 16:28:18 -04:00
|
|
|
|
2013-11-09 12:33:52 -05:00
|
|
|
if not NETWORK[section]['LOCAL']['TS1_LINK'] and not NETWORK[section]['LOCAL']['TS2_LINK']:
|
|
|
|
NETWORK[section]['LOCAL']['MODE'] = '\x65'
|
|
|
|
elif NETWORK[section]['LOCAL']['TS1_LINK'] and not NETWORK[section]['LOCAL']['TS2_LINK']:
|
|
|
|
NETWORK[section]['LOCAL']['MODE'] = '\x66'
|
|
|
|
elif not NETWORK[section]['LOCAL']['TS1_LINK'] and NETWORK[section]['LOCAL']['TS2_LINK']:
|
|
|
|
NETWORK[section]['LOCAL']['MODE'] = '\x69'
|
|
|
|
else:
|
|
|
|
NETWORK[section]['LOCAL']['MODE'] = '\x6A'
|
|
|
|
except:
|
|
|
|
logger.critical('Could not parse configuration file, exiting...')
|
|
|
|
sys.exit('Could not parse configuration file, exiting...')
|
2013-09-10 16:28:18 -04:00
|
|
|
|
|
|
|
#************************************************
|
|
|
|
# UTILITY FUNCTIONS FOR INTERNAL USE
|
|
|
|
#************************************************
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-10-22 12:21:32 -04:00
|
|
|
# Convert a hex string to an int (radio ID, etc.)
|
|
|
|
#
|
|
|
|
def int_id(_hex_string):
|
2013-11-13 17:19:32 -05:00
|
|
|
return int(h(_hex_string), 16)
|
2013-10-22 12:21:32 -04:00
|
|
|
|
2013-10-21 11:40:29 -04:00
|
|
|
# Re-Write Source Radio-ID (DMR NAT)
|
|
|
|
#
|
|
|
|
def dmr_nat(_data, _nat_id):
|
|
|
|
src_radio_id = _data[6:9]
|
|
|
|
_data = re.sub(src_radio_id, _nat_id, _data)
|
|
|
|
return _data
|
|
|
|
|
2013-10-13 21:10:58 -04:00
|
|
|
# Lookup text data for numeric IDs
|
|
|
|
#
|
2013-10-30 12:47:30 -04:00
|
|
|
def get_info(_id, _dict):
|
|
|
|
if _id in _dict:
|
|
|
|
return _dict[_id]
|
2013-10-13 21:10:58 -04:00
|
|
|
return _id
|
|
|
|
|
2013-07-29 22:12:12 -04:00
|
|
|
# Determine if the provided peer ID is valid for the provided network
|
2013-07-29 22:06:25 -04:00
|
|
|
#
|
2013-08-06 23:33:04 -04:00
|
|
|
def valid_peer(_peer_list, _peerid):
|
|
|
|
if _peerid in _peer_list:
|
2013-11-09 12:33:52 -05:00
|
|
|
return True
|
2013-08-06 23:33:04 -04:00
|
|
|
return False
|
|
|
|
|
2013-07-29 14:38:59 -04:00
|
|
|
|
2013-07-29 22:12:12 -04:00
|
|
|
# Determine if the provided master ID is valid for the provided network
|
|
|
|
#
|
|
|
|
def valid_master(_network, _peerid):
|
2013-07-30 11:52:10 -04:00
|
|
|
if NETWORK[_network]['MASTER']['RADIO_ID'] == _peerid:
|
2013-09-10 16:28:18 -04:00
|
|
|
return True
|
2013-07-29 14:23:37 -04:00
|
|
|
else:
|
|
|
|
return False
|
2013-09-10 16:28:18 -04:00
|
|
|
|
2013-08-26 09:16:54 -04:00
|
|
|
|
2013-08-28 12:06:54 -04:00
|
|
|
# Accept a complete packet, ready to be sent, and send it to all active peers + master in an IPSC
|
|
|
|
#
|
|
|
|
def send_to_ipsc(_target, _packet):
|
2013-11-05 20:30:31 -05:00
|
|
|
# Send to the Master
|
2013-08-28 12:06:54 -04:00
|
|
|
networks[_target].transport.write(_packet, (NETWORK[_target]['MASTER']['IP'], NETWORK[_target]['MASTER']['PORT']))
|
2013-08-30 17:23:12 -04:00
|
|
|
# Send to each connected Peer
|
2013-11-13 19:17:52 -05:00
|
|
|
for peer in networks[_target]['PEERS'].keys():
|
2013-08-28 12:06:54 -04:00
|
|
|
if peer['STATUS']['CONNECTED'] == True:
|
|
|
|
networks[_target].transport.write(_packet, (peer['IP'], peer['PORT']))
|
2013-11-09 11:14:39 -05:00
|
|
|
|
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# De-register a peer from an IPSC by removing it's infomation
|
|
|
|
#
|
|
|
|
def de_register_peer(_network, _peerid):
|
|
|
|
# Iterate for the peer in our data
|
2013-11-13 17:19:32 -05:00
|
|
|
if _peerid in self._peers.keys():
|
|
|
|
del self._peers[_peerid]
|
|
|
|
logger.info('(%s) Peer De-Registration Requested for: %s', _network, h(_peerid))
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
logger.warning('(%s) Peer De-Registration Requested for: %s, but we don\'t have a listing for this peer', _network, h(_peerid))
|
|
|
|
pass
|
2013-08-30 17:23:12 -04:00
|
|
|
|
|
|
|
|
2013-07-29 14:38:59 -04:00
|
|
|
# Take a recieved peer list and the network it belongs to, process and populate the
|
2013-08-30 17:23:12 -04:00
|
|
|
# data structure in my_ipsc_config with the results, and return a simple list of peers.
|
2013-07-29 14:38:59 -04:00
|
|
|
#
|
2013-11-13 17:19:32 -05:00
|
|
|
def process_peer_list(_data, _network):
|
2013-09-10 10:43:45 -04:00
|
|
|
# Determine the length of the peer list for the parsing iterator
|
2013-11-13 17:19:32 -05:00
|
|
|
_peer_list_length = int(h(_data[5:7]), 16)
|
2013-09-10 10:43:45 -04:00
|
|
|
# Record the number of peers in the data structure... we'll use it later (11 bytes per peer entry)
|
|
|
|
NETWORK[_network]['LOCAL']['NUM_PEERS'] = _peer_list_length/11
|
2013-11-09 12:33:52 -05:00
|
|
|
logger.info('(%s) Peer List Received from Master: %s peers in this IPSC', _network, _peer_list_length/11)
|
2013-07-28 23:22:04 -04:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# Iterate each peer entry in the peer list. Skip the header, then pull the next peer, the next, etc.
|
2013-09-10 10:43:45 -04:00
|
|
|
for i in range(7, (_peer_list_length)+7, 11):
|
2013-08-30 17:23:12 -04:00
|
|
|
# Extract various elements from each entry...
|
|
|
|
_hex_radio_id = (_data[i:i+4])
|
|
|
|
_hex_address = (_data[i+4:i+8])
|
|
|
|
_ip_address = socket.inet_ntoa(_hex_address)
|
|
|
|
_hex_port = (_data[i+8:i+10])
|
2013-11-13 17:19:32 -05:00
|
|
|
_port = int(h(_hex_port), 16)
|
2013-08-30 17:23:12 -04:00
|
|
|
_hex_mode = (_data[i+10:i+11])
|
2013-11-13 17:19:32 -05:00
|
|
|
_mode = int(h(_hex_mode), 16)
|
2013-08-30 17:23:12 -04:00
|
|
|
# mask individual Mode parameters
|
|
|
|
_link_op = _mode & PEER_OP_MSK
|
|
|
|
_link_mode = _mode & PEER_MODE_MSK
|
|
|
|
_ts1 = _mode & IPSC_TS1_MSK
|
|
|
|
_ts2 = _mode & IPSC_TS2_MSK
|
|
|
|
|
|
|
|
# Determine whether or not the peer is operational
|
|
|
|
if _link_op == 0b01000000:
|
|
|
|
_peer_op = True
|
|
|
|
else:
|
|
|
|
_peer_op = False
|
|
|
|
|
|
|
|
# Determine the operational mode of the peer
|
|
|
|
if _link_mode == 0b00000000:
|
|
|
|
_peer_mode = 'NO_RADIO'
|
|
|
|
elif _link_mode == 0b00010000:
|
|
|
|
_peer_mode = 'ANALOG'
|
|
|
|
elif _link_mode == 0b00100000:
|
|
|
|
_peer_mode = 'DIGITAL'
|
|
|
|
else:
|
|
|
|
_peer_node = 'NO_RADIO'
|
|
|
|
|
|
|
|
# Determine whether or not timeslot 1 is linked
|
|
|
|
if _ts1 == 0b00001000:
|
|
|
|
_ts1 = True
|
|
|
|
else:
|
|
|
|
_ts1 = False
|
|
|
|
|
|
|
|
# Determine whether or not timeslot 2 is linked
|
|
|
|
if _ts2 == 0b00000010:
|
|
|
|
_ts2 = True
|
|
|
|
else:
|
|
|
|
_ts2 = False
|
|
|
|
|
|
|
|
# If this entry was NOT already in our list, add it.
|
2013-09-10 16:28:18 -04:00
|
|
|
# Note: We keep a "simple" peer list in addition to the large data
|
|
|
|
# structure because soemtimes, we just need to identify a
|
|
|
|
# peer quickly.
|
2013-11-13 17:19:32 -05:00
|
|
|
if _hex_radio_id not in NETWORK[_network]['PEERS'].keys():
|
|
|
|
NETWORK[_network]['PEERS'][_hex_radio_id] = {
|
2013-08-30 17:23:12 -04:00
|
|
|
'IP': _ip_address,
|
|
|
|
'PORT': _port,
|
|
|
|
'MODE': _hex_mode,
|
|
|
|
'PEER_OPER': _peer_op,
|
|
|
|
'PEER_MODE': _peer_mode,
|
|
|
|
'TS1_LINK': _ts1,
|
|
|
|
'TS2_LINK': _ts2,
|
2013-11-13 17:19:32 -05:00
|
|
|
'STATUS': {
|
|
|
|
'CONNECTED': False,
|
|
|
|
'KEEP_ALIVES_SENT': 0,
|
|
|
|
'KEEP_ALIVES_MISSED': 0,
|
|
|
|
'KEEP_ALIVES_OUTSTANDING': 0
|
|
|
|
}
|
|
|
|
}
|
2013-08-06 23:33:04 -04:00
|
|
|
|
2013-07-28 23:22:04 -04:00
|
|
|
|
2013-07-20 09:28:52 -04:00
|
|
|
# Gratuituous print-out of the peer list.. Pretty much debug stuff.
|
|
|
|
#
|
2013-07-28 23:33:14 -04:00
|
|
|
def print_peer_list(_network):
|
2013-11-13 17:19:32 -05:00
|
|
|
_peers = NETWORK[_network]['PEERS']
|
|
|
|
|
2013-10-28 23:39:45 -04:00
|
|
|
_status = NETWORK[_network]['MASTER']['STATUS']['PEER_LIST']
|
2013-10-24 16:48:16 -04:00
|
|
|
#print('Peer List Status for {}: {}' .format(_network, _status))
|
2013-09-10 16:28:18 -04:00
|
|
|
|
|
|
|
if _status and not NETWORK[_network]['PEERS']:
|
|
|
|
print('We are the only peer for: %s' % _network)
|
2013-09-10 21:36:35 -04:00
|
|
|
print('')
|
2013-07-28 23:33:14 -04:00
|
|
|
return
|
2013-11-13 17:19:32 -05:00
|
|
|
|
2013-07-31 13:33:31 -04:00
|
|
|
print('Peer List for: %s' % _network)
|
2013-11-13 17:19:32 -05:00
|
|
|
for peer in _peers.keys():
|
|
|
|
_this_peer = _peers[peer]
|
|
|
|
_this_peer_stat = _this_peer['STATUS']
|
|
|
|
|
|
|
|
if peer == NETWORK[_network]['LOCAL']['RADIO_ID']:
|
2013-07-31 21:56:49 -04:00
|
|
|
me = '(self)'
|
|
|
|
else:
|
|
|
|
me = ''
|
2013-11-13 17:19:32 -05:00
|
|
|
|
|
|
|
print('\tRADIO ID: {} {}' .format(int(h(peer), 16), me))
|
|
|
|
print('\t\tIP Address: {}:{}' .format(_this_peer['IP'], _this_peer['PORT']))
|
|
|
|
print('\t\tOperational: {}, Mode: {}, TS1 Link: {}, TS2 Link: {}' .format(_this_peer['PEER_OPER'], _this_peer['PEER_MODE'], _this_peer['TS1_LINK'], _this_peer['TS2_LINK']))
|
|
|
|
print('\t\tStatus: {}, KeepAlives Sent: {}, KeepAlives Outstanding: {}, KeepAlives Missed: {}' .format(_this_peer_stat['CONNECTED'], _this_peer_stat['KEEP_ALIVES_SENT'], _this_peer_stat['KEEP_ALIVES_OUTSTANDING'], _this_peer_stat['KEEP_ALIVES_MISSED']))
|
|
|
|
|
2013-07-31 13:33:31 -04:00
|
|
|
print('')
|
2013-11-13 17:19:32 -05:00
|
|
|
|
2013-10-24 16:48:16 -04:00
|
|
|
# Gratuituous print-out of Master info.. Pretty much debug stuff.
|
|
|
|
#
|
|
|
|
def print_master(_network):
|
|
|
|
_master = NETWORK[_network]['MASTER']
|
|
|
|
print('Master for %s' % _network)
|
2013-11-13 17:19:32 -05:00
|
|
|
print('\tRADIO ID: {}' .format(int(h(_master['RADIO_ID']), 16)))
|
2013-10-24 16:48:16 -04:00
|
|
|
print('\t\tIP Address: {}:{}' .format(_master['IP'], _master['PORT']))
|
|
|
|
print('\t\tOperational: {}, Mode: {}, TS1 Link: {}, TS2 Link: {}' .format(_master['PEER_OPER'], _master['PEER_MODE'], _master['TS1_LINK'], _master['TS2_LINK']))
|
|
|
|
print('\t\tStatus: {}, KeepAlives Sent: {}, KeepAlives Outstanding: {}, KeepAlives Missed: {}' .format(_master['STATUS']['CONNECTED'], _master['STATUS']['KEEP_ALIVES_SENT'], _master['STATUS']['KEEP_ALIVES_OUTSTANDING'], _master['STATUS']['KEEP_ALIVES_MISSED']))
|
2013-07-11 20:45:09 -04:00
|
|
|
|
2013-07-20 09:28:52 -04:00
|
|
|
|
|
|
|
#************************************************
|
|
|
|
#******** ***********
|
2013-07-20 16:41:21 -04:00
|
|
|
#******** IPSC Network 'Engine' ***********
|
2013-07-20 09:28:52 -04:00
|
|
|
#******** ***********
|
|
|
|
#************************************************
|
|
|
|
|
2013-07-11 20:45:09 -04:00
|
|
|
#************************************************
|
2013-09-10 16:28:18 -04:00
|
|
|
# Base Class (used nearly all of the time)
|
2013-07-11 20:45:09 -04:00
|
|
|
#************************************************
|
2013-06-27 17:15:54 -04:00
|
|
|
|
2013-09-10 16:28:18 -04:00
|
|
|
|
2013-06-27 17:15:54 -04:00
|
|
|
class IPSC(DatagramProtocol):
|
|
|
|
|
2013-07-20 09:28:52 -04:00
|
|
|
# Modify the initializer to set up our environment and build the packets
|
|
|
|
# we need to maitain connections
|
|
|
|
#
|
2013-06-27 17:15:54 -04:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
if len(args) == 1:
|
2013-07-20 16:41:21 -04:00
|
|
|
# Housekeeping: create references to the configuration and status data for this IPSC instance.
|
2013-07-28 16:53:56 -04:00
|
|
|
# Some configuration objects that are used frequently and have lengthy names are shortened
|
2013-08-30 17:23:12 -04:00
|
|
|
# such as (self._master_sock) expands to (self._config['MASTER']['IP'], self._config['MASTER']['PORT']).
|
|
|
|
# Note that many of them reference each other... this is the Pythonic way.
|
2013-07-20 16:41:21 -04:00
|
|
|
#
|
2013-07-28 17:22:25 -04:00
|
|
|
self._network = args[0]
|
|
|
|
self._config = NETWORK[self._network]
|
2013-07-28 16:53:56 -04:00
|
|
|
#
|
|
|
|
self._local = self._config['LOCAL']
|
|
|
|
self._local_stat = self._local['STATUS']
|
|
|
|
self._local_id = self._local['RADIO_ID']
|
|
|
|
#
|
|
|
|
self._master = self._config['MASTER']
|
|
|
|
self._master_stat = self._master['STATUS']
|
|
|
|
self._master_sock = self._master['IP'], self._master['PORT']
|
|
|
|
#
|
|
|
|
self._peers = self._config['PEERS']
|
2013-08-06 23:33:04 -04:00
|
|
|
#
|
2013-08-08 22:48:28 -04:00
|
|
|
# This is a regular list to store peers for the IPSC. At times, parsing a simple list is much less
|
|
|
|
# Spendy than iterating a list of dictionaries... Maybe I'll find a better way in the future. Also
|
|
|
|
# We have to know when we have a new peer list, so a variable to indicate we do (or don't)
|
|
|
|
#
|
2013-06-27 17:15:54 -04:00
|
|
|
args = ()
|
2013-07-20 16:41:21 -04:00
|
|
|
|
2013-10-28 21:34:29 -04:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# Packet 'constructors' - builds the necessary control packets for this IPSC instance.
|
|
|
|
# This isn't really necessary for anything other than readability (reduction of code golf)
|
2013-07-20 16:41:21 -04:00
|
|
|
#
|
2013-07-28 16:53:56 -04:00
|
|
|
self.TS_FLAGS = (self._local['MODE'] + self._local['FLAGS'])
|
|
|
|
self.MASTER_REG_REQ_PKT = (MASTER_REG_REQ + self._local_id + self.TS_FLAGS + IPSC_VER)
|
|
|
|
self.MASTER_ALIVE_PKT = (MASTER_ALIVE_REQ + self._local_id + self.TS_FLAGS + IPSC_VER)
|
|
|
|
self.PEER_LIST_REQ_PKT = (PEER_LIST_REQ + self._local_id)
|
|
|
|
self.PEER_REG_REQ_PKT = (PEER_REG_REQ + self._local_id + IPSC_VER)
|
|
|
|
self.PEER_REG_REPLY_PKT = (PEER_REG_REPLY + self._local_id + IPSC_VER)
|
|
|
|
self.PEER_ALIVE_REQ_PKT = (PEER_ALIVE_REQ + self._local_id + self.TS_FLAGS)
|
|
|
|
self.PEER_ALIVE_REPLY_PKT = (PEER_ALIVE_REPLY + self._local_id + self.TS_FLAGS)
|
2013-11-09 12:33:52 -05:00
|
|
|
logger.info('(%s) IPSC Instance Created', self._network)
|
2013-06-27 17:15:54 -04:00
|
|
|
else:
|
2013-07-20 16:41:21 -04:00
|
|
|
# If we didn't get called correctly, log it!
|
|
|
|
#
|
2013-11-09 12:33:52 -05:00
|
|
|
logger.error('(%s) IPSC Instance Could Not be Created... Exiting', self._network)
|
2013-10-21 21:54:51 -04:00
|
|
|
sys.exit()
|
2013-09-10 16:28:18 -04:00
|
|
|
|
|
|
|
|
2013-07-20 09:28:52 -04:00
|
|
|
# This is called by REACTOR when it starts, We use it to set up the timed
|
|
|
|
# loop for each instance of the IPSC engine
|
|
|
|
#
|
2013-06-27 17:15:54 -04:00
|
|
|
def startProtocol(self):
|
2013-10-24 16:48:16 -04:00
|
|
|
# Timed loops for:
|
|
|
|
# IPSC connection establishment and maintenance
|
|
|
|
# Reporting/Housekeeping
|
|
|
|
#
|
|
|
|
self._maintenance = task.LoopingCall(self.maintenance_loop)
|
|
|
|
self._maintenance_loop = self._maintenance.start(self._local['ALIVE_TIMER'])
|
2013-07-20 16:41:21 -04:00
|
|
|
#
|
2013-10-24 16:48:16 -04:00
|
|
|
self._reporting = task.LoopingCall(self.reporting_loop)
|
|
|
|
self._reporting_loop = self._reporting.start(10)
|
2013-06-27 17:15:54 -04:00
|
|
|
|
2013-10-28 21:34:29 -04:00
|
|
|
#************************************************
|
|
|
|
# CALLBACK FUNCTIONS FOR USER PACKET TYPES
|
|
|
|
#************************************************
|
|
|
|
|
|
|
|
def call_ctl_1(self, _network, _data):
|
|
|
|
print('({}) Call Control Type 1 Packet Received From: {}' .format(_network, _src_sub))
|
|
|
|
|
|
|
|
def call_ctl_2(self, _network, _data):
|
|
|
|
print('({}) Call Control Type 2 Packet Received' .format(_network))
|
|
|
|
|
|
|
|
def call_ctl_3(self, _network, _data):
|
|
|
|
print('({}) Call Control Type 3 Packet Received' .format(_network))
|
|
|
|
|
|
|
|
def xcmp_xnl(self, _network, _data):
|
|
|
|
print('({}) XCMP/XNL Packet Received From: {}' .format(_network, _src_sub))
|
|
|
|
|
|
|
|
def group_voice(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
|
2013-10-30 12:47:30 -04:00
|
|
|
_dst_sub = get_info(int_id(_dst_sub), talkgroup_ids)
|
|
|
|
_peerid = get_info(int_id(_peerid), peer_ids)
|
|
|
|
_src_sub = get_info(int_id(_src_sub), subscriber_ids)
|
2013-10-28 23:39:45 -04:00
|
|
|
print('({}) Group Voice Packet Received From: {}, IPSC Peer {}, Destination {}' .format(_network, _src_sub, _peerid, _dst_sub))
|
2013-10-28 21:34:29 -04:00
|
|
|
|
|
|
|
def private_voice(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
|
2013-10-30 12:47:30 -04:00
|
|
|
_dst_sub = get_info(int_id(_dst_sub), subscriber_ids)
|
|
|
|
_peerid = get_info(int_id(_peerid), peer_ids)
|
|
|
|
_src_sub = get_info(int_id(_src_sub), subscriber_ids)
|
2013-10-28 23:39:45 -04:00
|
|
|
print('({}) Private Voice Packet Received From: {}, IPSC Peer {}, Destination {}' .format(_network, _src_sub, _peerid, _dst_sub))
|
2013-10-28 21:34:29 -04:00
|
|
|
|
|
|
|
def group_data(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
|
2013-10-30 12:47:30 -04:00
|
|
|
_dst_sub = get_info(int_id(_dst_sub), talkgroup_ids)
|
|
|
|
_peerid = get_info(int_id(_peerid), peer_ids)
|
|
|
|
_src_sub = get_info(int_id(_src_sub), subscriber_ids)
|
2013-10-28 23:39:45 -04:00
|
|
|
print('({}) Group Data Packet Received From: {}, IPSC Peer {}, Destination {}' .format(_network, _src_sub, _peerid, _dst_sub))
|
2013-10-28 21:34:29 -04:00
|
|
|
|
|
|
|
def private_data(self, _network, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
|
2013-10-30 12:47:30 -04:00
|
|
|
_dst_sub = get_info(int_id(_dst_sub), subscriber_ids)
|
|
|
|
_peerid = get_info(int_id(_peerid), peer_ids)
|
|
|
|
_src_sub = get_info(int_id(_src_sub), subscriber_ids)
|
2013-10-28 23:39:45 -04:00
|
|
|
print('({}) Private Data Packet Received From: {}, IPSC Peer {}, Destination {}' .format(_network, _src_sub, _peerid, _dst_sub))
|
2013-10-28 21:34:29 -04:00
|
|
|
|
|
|
|
def unknown_message(self, _network, _packettype, _peerid, _data):
|
|
|
|
_time = time.strftime('%m/%d/%y %H:%M:%S')
|
2013-11-13 17:19:32 -05:00
|
|
|
_packettype = h(_packettype)
|
2013-10-30 12:47:30 -04:00
|
|
|
_peerid = get_info(int_id(_peerid), peer_ids)
|
2013-10-28 21:34:29 -04:00
|
|
|
print('{} ({}) Unknown message type encountered\n\tPacket Type: {}\n\tFrom: {}' .format(_time, _network, _packettype, _peerid))
|
2013-11-13 17:19:32 -05:00
|
|
|
print('\t', h(_data))
|
2013-10-28 21:34:29 -04:00
|
|
|
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-09-10 16:28:18 -04:00
|
|
|
# Take a packet to be SENT, calcualte auth hash and return the whole thing
|
|
|
|
#
|
|
|
|
def hashed_packet(self, _key, _data):
|
|
|
|
_hash = binascii.a2b_hex((hmac.new(_key,_data,hashlib.sha1)).hexdigest()[:20])
|
|
|
|
return (_data + _hash)
|
|
|
|
|
2013-10-30 14:36:45 -04:00
|
|
|
# Remove the hash from a packet and return the payload
|
|
|
|
#
|
|
|
|
def strip_hash(self, _data):
|
|
|
|
return _data[:-10]
|
2013-09-10 16:28:18 -04:00
|
|
|
|
|
|
|
# Take a RECEIVED packet, calculate the auth hash and verify authenticity
|
|
|
|
#
|
|
|
|
def validate_auth(self, _key, _data):
|
2013-10-30 14:36:45 -04:00
|
|
|
_payload = self.strip_hash(_data)
|
2013-09-10 16:28:18 -04:00
|
|
|
_hash = _data[-10:]
|
|
|
|
_chk_hash = binascii.a2b_hex((hmac.new(_key,_payload,hashlib.sha1)).hexdigest()[:20])
|
|
|
|
|
|
|
|
if _chk_hash == _hash:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2013-07-20 09:28:52 -04:00
|
|
|
|
|
|
|
#************************************************
|
|
|
|
# TIMED LOOP - MY CONNECTION MAINTENANCE
|
|
|
|
#************************************************
|
2013-10-24 16:48:16 -04:00
|
|
|
|
|
|
|
def reporting_loop(self):
|
2013-08-30 17:23:12 -04:00
|
|
|
# Right now, without this, we really dont' know anything is happening.
|
2013-10-29 08:08:04 -04:00
|
|
|
# print_master(self._network)
|
|
|
|
# print_peer_list(self._network)
|
2013-11-09 12:33:52 -05:00
|
|
|
logger.debug('(%s) Periodic Connection Maintenance Loop Started', self._network)
|
2013-10-29 08:08:04 -04:00
|
|
|
pass
|
2013-10-24 16:48:16 -04:00
|
|
|
|
|
|
|
def maintenance_loop(self):
|
2013-09-10 16:28:18 -04:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# If the master isn't connected, we have to do that before we can do anything else!
|
2013-10-21 21:54:51 -04:00
|
|
|
#
|
2013-09-10 16:28:18 -04:00
|
|
|
if self._master_stat['CONNECTED'] == False:
|
|
|
|
reg_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_REG_REQ_PKT)
|
2013-07-28 16:53:56 -04:00
|
|
|
self.transport.write(reg_packet, (self._master_sock))
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# Once the master is connected, we have to send keep-alives.. and make sure we get them back
|
2013-08-08 22:48:28 -04:00
|
|
|
elif (self._master_stat['CONNECTED'] == True):
|
2013-08-30 17:23:12 -04:00
|
|
|
# Send keep-alive to the master
|
2013-09-10 16:28:18 -04:00
|
|
|
master_alive_packet = self.hashed_packet(self._local['AUTH_KEY'], self.MASTER_ALIVE_PKT)
|
2013-07-28 16:53:56 -04:00
|
|
|
self.transport.write(master_alive_packet, (self._master_sock))
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# If we had a keep-alive outstanding by the time we send another, mark it missed.
|
2013-07-28 16:53:56 -04:00
|
|
|
if (self._master_stat['KEEP_ALIVES_OUTSTANDING']) > 0:
|
|
|
|
self._master_stat['KEEP_ALIVES_MISSED'] += 1
|
2013-11-10 22:43:55 -05:00
|
|
|
logger.info('(%s) Master Keep-Alive Missed', self._network)
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# If we have missed too many keep-alives, de-regiseter the master and start over.
|
2013-07-28 16:53:56 -04:00
|
|
|
if self._master_stat['KEEP_ALIVES_OUTSTANDING'] >= self._local['MAX_MISSED']:
|
|
|
|
self._master_stat['CONNECTED'] = False
|
2013-11-10 22:43:55 -05:00
|
|
|
logger.error('(%s) Maximum Master Keep-Alives Missed -- De-registering the Master', self._network)
|
2013-08-30 17:23:12 -04:00
|
|
|
|
|
|
|
# Update our stats before we move on...
|
2013-08-01 16:09:20 -04:00
|
|
|
self._master_stat['KEEP_ALIVES_SENT'] += 1
|
|
|
|
self._master_stat['KEEP_ALIVES_OUTSTANDING'] += 1
|
2013-07-20 09:28:52 -04:00
|
|
|
|
|
|
|
else:
|
2013-10-21 21:54:51 -04:00
|
|
|
# This is bad. If we get this message, we need to reset the state and try again
|
2013-07-28 17:22:25 -04:00
|
|
|
logger.error('->> (%s) Master in UNKOWN STATE:%s:%s', self._network, self._master_sock)
|
2013-10-21 21:54:51 -04:00
|
|
|
self._master_stat['CONNECTED'] == False
|
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
# If the master is connected and we don't have a peer-list yet....
|
|
|
|
#
|
2013-10-28 23:39:45 -04:00
|
|
|
if ((self._master_stat['CONNECTED'] == True) and (self._master_stat['PEER_LIST'] == False)):
|
2013-08-30 17:23:12 -04:00
|
|
|
# Ask the master for a peer-list
|
2013-09-10 16:28:18 -04:00
|
|
|
peer_list_req_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_LIST_REQ_PKT)
|
2013-07-28 16:53:56 -04:00
|
|
|
self.transport.write(peer_list_req_packet, (self._master_sock))
|
2013-11-10 22:43:55 -05:00
|
|
|
logger.info('(%s), No Peer List - Requesting One From the Master', self._network)
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
|
2013-10-28 23:39:45 -04:00
|
|
|
# If we do have a peer-list, we need to register with the peers and send keep-alives...
|
2013-10-21 21:54:51 -04:00
|
|
|
#
|
2013-10-28 23:39:45 -04:00
|
|
|
if (self._master_stat['PEER_LIST'] == True):
|
2013-08-30 17:23:12 -04:00
|
|
|
# Iterate the list of peers... so we do this for each one.
|
2013-11-13 17:19:32 -05:00
|
|
|
for peer_id in self._peers.keys():
|
|
|
|
peer = self._peers[peer_id]
|
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# We will show up in the peer list, but shouldn't try to talk to ourselves.
|
2013-11-13 17:19:32 -05:00
|
|
|
if peer_id == self._local_id:
|
2013-07-20 09:28:52 -04:00
|
|
|
continue
|
2013-11-13 17:19:32 -05:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# If we haven't registered to a peer, send a registration
|
2013-07-26 18:29:47 -04:00
|
|
|
if peer['STATUS']['CONNECTED'] == False:
|
2013-09-10 16:28:18 -04:00
|
|
|
peer_reg_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_REG_REQ_PKT)
|
2013-07-20 09:28:52 -04:00
|
|
|
self.transport.write(peer_reg_packet, (peer['IP'], peer['PORT']))
|
2013-11-13 17:19:32 -05:00
|
|
|
logger.info('(%s) Registering with Peer %s', self._network, int_id(peer_id))
|
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# If we have registered with the peer, then send a keep-alive
|
2013-07-26 18:29:47 -04:00
|
|
|
elif peer['STATUS']['CONNECTED'] == True:
|
2013-09-10 16:28:18 -04:00
|
|
|
peer_alive_req_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_ALIVE_REQ_PKT)
|
2013-07-20 16:41:21 -04:00
|
|
|
self.transport.write(peer_alive_req_packet, (peer['IP'], peer['PORT']))
|
2013-11-13 17:19:32 -05:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# If we have a keep-alive outstanding by the time we send another, mark it missed.
|
2013-07-26 18:29:47 -04:00
|
|
|
if peer['STATUS']['KEEP_ALIVES_OUTSTANDING'] > 0:
|
|
|
|
peer['STATUS']['KEEP_ALIVES_MISSED'] += 1
|
2013-11-13 17:19:32 -05:00
|
|
|
logger.info('(%s) Peer Keep-Alive Missed for %s', self._network, int_id(peer_id))
|
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# If we have missed too many keep-alives, de-register the peer and start over.
|
2013-07-28 16:53:56 -04:00
|
|
|
if peer['STATUS']['KEEP_ALIVES_OUTSTANDING'] >= self._local['MAX_MISSED']:
|
2013-07-26 18:29:47 -04:00
|
|
|
peer['STATUS']['CONNECTED'] = False
|
2013-11-13 17:19:32 -05:00
|
|
|
del peer # Becuase once it's out of the dictionary, you can't use it for anything else.
|
|
|
|
logger.warning('(%s) Maximum Peer Keep-Alives Missed -- De-registering the Peer: %s', self._network, int_id(peer_id))
|
2013-08-01 16:09:20 -04:00
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# Update our stats before moving on...
|
2013-08-01 16:09:20 -04:00
|
|
|
peer['STATUS']['KEEP_ALIVES_SENT'] += 1
|
|
|
|
peer['STATUS']['KEEP_ALIVES_OUTSTANDING'] += 1
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
|
|
|
|
# For public display of information, etc. - anything not part of internal logging/diagnostics
|
|
|
|
#
|
2013-09-14 20:07:32 -04:00
|
|
|
def _notify_event(self, network, event, info):
|
|
|
|
"""
|
|
|
|
Used internally whenever an event happens that may be useful to notify the outside world about.
|
|
|
|
Arguments:
|
|
|
|
network: string, network name to look up in config
|
|
|
|
event: string, basic description
|
|
|
|
info: dict, in the interest of accomplishing as much as possible without code changes.
|
|
|
|
The dict will typically contain a peer_id so the origin of the event is known.
|
|
|
|
"""
|
|
|
|
pass
|
2013-10-22 12:21:32 -04:00
|
|
|
|
2013-07-15 13:04:48 -04:00
|
|
|
|
2013-07-20 09:28:52 -04:00
|
|
|
#************************************************
|
|
|
|
# RECEIVED DATAGRAM - ACT IMMEDIATELY!!!
|
|
|
|
#************************************************
|
|
|
|
|
2013-08-30 17:23:12 -04:00
|
|
|
# Actions for recieved packets by type: For every packet recieved, there are some things that we need to do:
|
|
|
|
# Decode some of the info
|
|
|
|
# Check for auth and authenticate the packet
|
|
|
|
# Strip the hash from the end... we don't need it anymore
|
|
|
|
#
|
|
|
|
# Once they're done, we move on to the proccessing or callbacks for each packet type.
|
2013-07-20 09:28:52 -04:00
|
|
|
#
|
2013-06-27 17:15:54 -04:00
|
|
|
def datagramReceived(self, data, (host, port)):
|
2013-07-15 13:04:48 -04:00
|
|
|
_packettype = data[0:1]
|
|
|
|
_peerid = data[1:5]
|
2013-07-29 14:23:37 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
# Authenticate the packet
|
|
|
|
if self.validate_auth(self._local['AUTH_KEY'], data) == False:
|
2013-11-13 17:19:32 -05:00
|
|
|
logger.warning('(%s) AuthError: IPSC packet failed authentication. Type %s: Peer ID: %s', self._network, h(_packettype), int(h(_peerid), 16))
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
# Strip the hash, we won't need it anymore
|
2013-10-30 14:36:45 -04:00
|
|
|
data = self.strip_hash(data)
|
2013-06-29 00:36:39 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
# Packets types that must be originated from a peer (including master peer)
|
|
|
|
if (_packettype in ANY_PEER_REQUIRED):
|
2013-11-13 17:19:32 -05:00
|
|
|
if not(valid_master(self._network, _peerid) == False or valid_peer(self._peers.keys(), _peerid) == False):
|
|
|
|
logger.warning('(%s) PeerError: Peer not in peer-list: %s', self._network, int(h(_peerid), 16))
|
2013-07-31 21:46:03 -04:00
|
|
|
return
|
2013-10-21 21:54:51 -04:00
|
|
|
|
|
|
|
# User, as in "subscriber" generated packets - a.k.a someone trasmitted
|
|
|
|
if (_packettype in USER_PACKETS):
|
|
|
|
# Extract commonly used items from the packet header
|
|
|
|
_src_sub = data[6:9]
|
|
|
|
_dst_sub = data[9:12]
|
2013-10-22 12:21:32 -04:00
|
|
|
_call = int_id(data[17:18])
|
|
|
|
_ts = bool(_call & TS_CALL_MSK)
|
|
|
|
_end = bool(_call & END_MSK)
|
2013-10-21 21:54:51 -04:00
|
|
|
|
|
|
|
# User Voice and Data Call Types:
|
|
|
|
if (_packettype == GROUP_VOICE):
|
2013-11-13 17:19:32 -05:00
|
|
|
self._notify_event(self._network, 'group_voice', {'peer_id': int(h(_peerid), 16)})
|
2013-10-28 21:34:29 -04:00
|
|
|
self.group_voice(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
elif (_packettype == PVT_VOICE):
|
2013-11-13 17:19:32 -05:00
|
|
|
self._notify_event(self._network, 'private_voice', {'peer_id': int(h(_peerid), 16)})
|
2013-10-28 21:34:29 -04:00
|
|
|
self.private_voice(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
elif (_packettype == GROUP_DATA):
|
2013-11-13 17:19:32 -05:00
|
|
|
self._notify_event(self._network, 'group_data', {'peer_id': int(h(_peerid), 16)})
|
2013-10-28 21:34:29 -04:00
|
|
|
self.group_data(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
elif (_packettype == PVT_DATA):
|
2013-11-13 17:19:32 -05:00
|
|
|
self._notify_event(self._network, 'private_voice', {'peer_id': int(h(_peerid), 16)})
|
2013-10-28 21:34:29 -04:00
|
|
|
self.private_data(self._network, _src_sub, _dst_sub, _ts, _end, _peerid, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
return
|
|
|
|
|
|
|
|
# Other peer-required types that we don't do much or anything with yet
|
|
|
|
elif (_packettype == XCMP_XNL):
|
2013-10-28 21:34:29 -04:00
|
|
|
self.xcmp_xnl(self._network, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
elif (_packettype == CALL_CTL_1):
|
2013-10-28 21:34:29 -04:00
|
|
|
self.call_ctl_1(self._network, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
elif (_packettype == CALL_CTL_2):
|
2013-10-28 21:34:29 -04:00
|
|
|
self.call_ctl_2(self._network, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
elif (_packettype == CALL_CTL_3):
|
2013-10-28 21:34:29 -04:00
|
|
|
self.call_ctl_3(self._network, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
# Connection maintenance packets that fall into this category
|
|
|
|
elif (_packettype == DE_REG_REQ):
|
|
|
|
de_register_peer(self._network, _peerid)
|
2013-11-10 22:43:55 -05:00
|
|
|
logger.warning('(%s) Peer De-Registration Request From:%s:%s', self._network, host, port)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
elif (_packettype == DE_REG_REPLY):
|
2013-11-10 22:43:55 -05:00
|
|
|
logger.warning('(%s) Peer De-Registration Reply From:%s:%s', self._network, host, port)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
elif (_packettype == RPT_WAKE_UP):
|
2013-11-10 22:43:55 -05:00
|
|
|
logger.debug('(%s) Repeater Wake-Up Packet From:%s:%s', self._network, host, port)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
return
|
2013-09-10 16:28:18 -04:00
|
|
|
|
2013-07-31 21:46:03 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
# Packets types that must be originated from a peer
|
|
|
|
if (_packettype in PEER_REQUIRED):
|
2013-11-13 17:19:32 -05:00
|
|
|
if valid_peer(self._peers.keys(), _peerid) == False:
|
|
|
|
logger.warning('(%s) PeerError: Peer %s not in peer-list: %s', self._network, int(h(_peerid), 16), self._peers.keys())
|
2013-07-30 12:50:29 -04:00
|
|
|
return
|
2013-10-21 21:54:51 -04:00
|
|
|
|
|
|
|
# Packets we send...
|
|
|
|
if (_packettype == PEER_ALIVE_REQ):
|
|
|
|
# Generate a hashed paket from our template and send it.
|
|
|
|
peer_alive_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_ALIVE_REPLY_PKT)
|
|
|
|
self.transport.write(peer_alive_reply_packet, (host, port))
|
2013-07-30 12:50:29 -04:00
|
|
|
return
|
2013-10-21 21:54:51 -04:00
|
|
|
|
|
|
|
elif (_packettype == PEER_REG_REQ):
|
2013-10-10 17:23:52 -04:00
|
|
|
peer_reg_reply_packet = self.hashed_packet(self._local['AUTH_KEY'], self.PEER_REG_REPLY_PKT)
|
|
|
|
self.transport.write(peer_reg_reply_packet, (host, port))
|
2013-10-14 16:09:16 -04:00
|
|
|
return
|
2013-10-21 21:54:51 -04:00
|
|
|
|
2013-10-22 12:21:32 -04:00
|
|
|
# Packets we receive...
|
2013-10-21 21:54:51 -04:00
|
|
|
elif (_packettype == PEER_ALIVE_REPLY):
|
2013-11-13 17:19:32 -05:00
|
|
|
if _peerid in self._peers.keys():
|
|
|
|
self._peers[_peerid]['STATUS']['KEEP_ALIVES_OUTSTANDING'] = 0
|
2013-11-11 15:38:27 -05:00
|
|
|
return
|
2013-10-21 21:54:51 -04:00
|
|
|
|
|
|
|
elif (_packettype == PEER_REG_REPLY):
|
2013-11-13 17:19:32 -05:00
|
|
|
if _peerid in self._peers.keys():
|
|
|
|
self._peers[_peerid]['STATUS']['CONNECTED'] = True
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
return
|
2013-09-10 16:28:18 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
|
|
|
|
# Packets types that must be originated from a Master
|
2013-10-22 12:21:32 -04:00
|
|
|
# Packets we receive...
|
2013-10-21 21:54:51 -04:00
|
|
|
if (_packettype in MASTER_REQUIRED):
|
|
|
|
if valid_master(self._network, _peerid) == False:
|
2013-11-13 17:19:32 -05:00
|
|
|
logger.warning('(%s) MasterError: %s is not the master peer', self._network, int(h(_peerid), 16))
|
2013-10-14 16:09:16 -04:00
|
|
|
return
|
2013-10-21 21:54:51 -04:00
|
|
|
|
|
|
|
if (_packettype == MASTER_ALIVE_REPLY):
|
|
|
|
# This action is so simple, it doesn't require a callback function, master is responding, we're good.
|
|
|
|
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
|
2013-10-14 16:09:16 -04:00
|
|
|
return
|
2013-08-08 22:48:28 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
elif (_packettype == PEER_LIST_REPLY):
|
2013-10-28 23:39:45 -04:00
|
|
|
NETWORK[self._network]['MASTER']['STATUS']['PEER_LIST'] = True
|
2013-10-21 21:54:51 -04:00
|
|
|
if len(data) > 18:
|
2013-11-13 17:19:32 -05:00
|
|
|
process_peer_list(data, self._network)
|
2013-10-14 16:09:16 -04:00
|
|
|
return
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
|
|
|
|
2013-09-10 16:28:18 -04:00
|
|
|
|
2013-10-21 21:54:51 -04:00
|
|
|
# When we hear from the maseter, record it's ID, flag that we're connected, and reset the dead counter.
|
|
|
|
elif (_packettype == MASTER_REG_REPLY):
|
|
|
|
self._master['RADIO_ID'] = _peerid
|
|
|
|
self._master_stat['CONNECTED'] = True
|
|
|
|
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
|
|
|
|
return
|
|
|
|
|
|
|
|
# We know about these types, but absolutely don't take an action
|
|
|
|
elif (_packettype == MASTER_REG_REQ):
|
|
|
|
# We can't operate as a master as of now, so we should never receive one of these.
|
2013-11-10 22:43:55 -05:00
|
|
|
logger.debug('(%s) Master Registration Packet Recieved - WE ARE NOT A MASTER!', self._network)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
2013-07-11 20:45:09 -04:00
|
|
|
|
2013-08-08 22:48:28 -04:00
|
|
|
# If there's a packet type we don't know aobut, it should be logged so we can figure it out and take an appropriate action!
|
2013-06-27 17:15:54 -04:00
|
|
|
else:
|
2013-10-28 21:34:29 -04:00
|
|
|
self.unknown_message(self._network, _packettype, _peerid, data)
|
2013-10-21 21:54:51 -04:00
|
|
|
return
|
2013-06-27 17:15:54 -04:00
|
|
|
|
2013-10-22 12:21:32 -04:00
|
|
|
|
2013-09-10 16:28:18 -04:00
|
|
|
#************************************************
|
|
|
|
# Derived Class
|
|
|
|
# used in the rare event of an
|
|
|
|
# unauthenticated IPSC network.
|
|
|
|
#************************************************
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-09-10 16:28:18 -04:00
|
|
|
class UnauthIPSC(IPSC):
|
|
|
|
|
|
|
|
# There isn't a hash to build, so just return the data
|
|
|
|
#
|
|
|
|
def hashed_packet(self, _key, _data):
|
2013-10-30 14:36:45 -04:00
|
|
|
return _data
|
|
|
|
|
|
|
|
# Remove the hash from a packet and return the payload
|
|
|
|
#
|
2013-11-12 16:10:11 -05:00
|
|
|
def strip_hash(_self, _data):
|
2013-10-30 14:36:45 -04:00
|
|
|
return _data
|
2013-09-10 16:28:18 -04:00
|
|
|
|
|
|
|
# Everything is validated, so just return True
|
|
|
|
#
|
|
|
|
def validate_auth(self, _key, _data):
|
|
|
|
return True
|
|
|
|
|
2013-07-20 09:28:52 -04:00
|
|
|
|
2013-07-11 20:45:09 -04:00
|
|
|
#************************************************
|
|
|
|
# MAIN PROGRAM LOOP STARTS HERE
|
|
|
|
#************************************************
|
2013-06-27 17:15:54 -04:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2013-11-09 12:33:52 -05:00
|
|
|
logger.info('DMRlink \'dmrlink.py\' (c) 2013 N0MJS & the K0USY Group - SYSTEM STARTING...')
|
2013-10-28 23:39:45 -04:00
|
|
|
networks = {}
|
2013-07-11 20:45:09 -04:00
|
|
|
for ipsc_network in NETWORK:
|
2013-10-12 12:08:06 -04:00
|
|
|
if (NETWORK[ipsc_network]['LOCAL']['ENABLED']):
|
2013-09-10 21:36:35 -04:00
|
|
|
if NETWORK[ipsc_network]['LOCAL']['AUTH_ENABLED'] == True:
|
|
|
|
networks[ipsc_network] = IPSC(ipsc_network)
|
|
|
|
else:
|
|
|
|
networks[ipsc_network] = UnauthIPSC(ipsc_network)
|
2013-08-16 15:30:20 -04:00
|
|
|
reactor.listenUDP(NETWORK[ipsc_network]['LOCAL']['PORT'], networks[ipsc_network])
|
2013-11-09 12:33:52 -05:00
|
|
|
reactor.run()
|