#!/usr/bin/python
#
# datapath-update --
#
#
#
#
#
#
#

import sys
import getopt
import os
import subprocess
import re
import math
import time
import logging
import logging.handlers
import traceback

import cumulus.platforms

# initialize the global logger
global logger
logger = logging.getLogger('datapath-update')
fmt = logging.Formatter(fmt='%(name)s %(levelname)s: %(message)s')
handler = logging.handlers.SysLogHandler('/dev/log')
handler.setFormatter(fmt)
logger.setLevel(logging.INFO)
#logger.setLevel(logging.DEBUG)
logger.addHandler(handler)

# ==============================================================================
#
#                           V A L U E __ P A I R
#
# ==============================================================================
class ValuePair(object) :

    def __init__ (self, name, value, format='none') :
        self.name = name
        self.value = value
        self.format = format

    def set_value(self, value) :
        self.value = value

    def get_name(self) :
        return self.name

    def get_value(self) :
        return self.value

# ==============================================================================
#
#                              R E G I S T E R
#
# ==============================================================================
class Register(object) :

    def __init__ (self, name, set_op_str, value, value_pair, comment) :
        self.name        = name
        self.set_op_str  = set_op_str
        self.value       = value
        self.value_pair  = value_pair
        self.modify      = []
        self.comment     = comment

    def add_value(self, value) :
        self.value.append(value)

    def set_value(self, value, index) :
        for i in range(len(self.value), index+1) :
            self.value.append(0)
        self.value[index] = value

    def get_value(self, index) :
        return self.value[index]

    def get_name(self) :
        return self.name

    def add_set_op_str(self, set_op_str) :
        self.set_op_str = set_op_str

    def add_modify(self, modify) :
        self.modify.append(modify)

    def print_entry (self, register, file) :
        file.write("%s %s" % (register.set_op_str, register.name))
        for value in register.value :
            file.write(" %s" % value)
        for pair in register.value_pair :
            if pair.format == 'hex' :
                file.write(" %s=0x%x" % (pair.get_name(), pair.get_value()))
            else :
                file.write(" %s=%s" % (pair.get_name(), pair.get_value()))
        if register.comment != None :
            file.write('    # %s\n' % register.comment)
        else :
            file.write('\n')

    def print_register (self, file) :
        self.print_entry(self, file)
        for mod in self.modify :
            self.generate_print(mod, file)


# ==============================================================================
#
#                       R E G I S T E R __ T E M P L A T E
#
# ==============================================================================
class RegisterTemplate(Register) :

      def __init__ (self, name, set_op_str, loop_list, value_list, value_pair_list) :
            self.loop_list = loop_list
            super(RegisterTemplate,self).__init__(name, set_op_str, value_list, value_pair_list, None)

      def print_entry (self, register, file) :
            file.write("for I=%s,%s,%s '%s %s $I " % (register.loop_list[0],
                                                      register.loop_list[1],
                                                      register.loop_list[2],
                                                      register.set_op_str,
                                                      register.name))
            for value in register.value :
                  file.write(value)
                  for pair in register.value_pair :
                        file.write(" %s=%s" % (pair.get_name(), pair.get_value()))
                  file.write("'\n")

      def print_register (self, file) :
            self.print_entry(self, file)
            for mod in self.modify :
                  self.print_entry(mod, file)

# ==============================================================================
#
#                              H W __ L I M I T S
#
# ==============================================================================
class BufferDesc(object):

      def __init__ (self) :
          self.cell_count = {}
          self.cell_count['ingress'] = { "pg_min"           : 0,
                                         "pg_hdrm"          : 0,
                                         "service_pool"     : 0,
                                         "shared"           : 0,
                                         "global_headroom"  : 0,
                                         "available"        : 0 }
          self.cell_count['egress']  = { "minimum"   : 0,
                                         "available" : 0 }

          self.pg_buffer_limit        = {}
          self.ing_sp_buffer_limit    = {}
          self.ing_sp_buffer_offset   = {}
          self.shared_buffer_limit    = 0
          self.eg_sp_buffer_limit     = {}
          self.queue_buffer_limit     = {}
          self.queue_buffer_unlimited = {}
          self.queue_buffer_color_aware = {}

      def init_desc (self, config_mgr) :
          self.config_mgr = config_mgr

      def init_available_cell_count (self, direction, cell_count) :
            self.cell_count[direction]['available'] = cell_count

      def allocate_cells (self, direction, counter, cell_count) :
            if direction in self.cell_count and counter in self.cell_count[direction] :
                  if self.cell_count[direction]['available'] >= cell_count :
                        self.cell_count[direction]['available'] -= cell_count
                        self.cell_count[direction][counter] += cell_count
                        return 0
                  else :
                        self.config_mgr.report_error('%s %s: %d cells not available (%d remaining)' % (direction,
                                                                                                       counter,
                                                                                                       cell_count,
                                                                                                       self.cell_count[direction]['available']))
            else :
                  self.config_mgr.report_error('direction %s or counter %s is not valid' % (direction, counter))
            return -1

      def get_remaining_cells (self, direction) :
            return self.cell_count[direction]['available']

# ==============================================================================
#
#                              P O R T __ D E S C
#
# ==============================================================================
class PortDesc(object):

    def __init__(self, chip) :

        self.chip               = chip
        self.port_count         = 0
        self.port_bw_count      = {}
        self.port_bw_weight     = {}
        self.port_bw            = []
        self.port_bw_range_dict = {}
        self.port_map           = {'pipe0' : {}, 'pipe1': {}, 'logical' : {}}
        # maintain a list of HSPs per-pipe
        self.hsp_port_map       = {'pipe0' : [], 'pipe1': []}
        self.hsp_bw             = chip.hsp_bw
        self.sdk_port_label_list = []
        self.label_2_bw          = {}
        self.label_2_logical     = {}

    def get_port_count (self) :
        return self.port_count

    def _add_port_range(self, port_bw, start_port, end_port) :
        # account for the new ports
        range_port_count = end_port - start_port + 1
        self.port_count += range_port_count
        if port_bw not in self.port_bw_count :
            self.port_bw_count[port_bw] = 0
            self.port_bw.append(port_bw)
        self.port_bw_count[port_bw] += range_port_count

        # create a port range
        if port_bw not in self.port_bw_range_dict :
            self.port_bw_range_dict[port_bw] = []
        self.port_bw_range_dict[port_bw].append((start_port, end_port))

    def get_port_bw_range_list (self, port_bw) :
        return self.port_bw_range_dict[port_bw]

    def get_weighted_port_count (self) :
        return self.weighted_port_count

    def get_port_bw (self) :
        return self.port_bw

    def get_weighted_per_port_cells (self, memory_cells, port_bw) :
        weight = 0
        if port_bw == 0 :
            return memory_cells
        weighted_cell_count = int((memory_cells / self.weighted_port_count) * self.port_bw_weight[port_bw])
        return weighted_cell_count

    def _set_port_map(self, logical_port, phy_port, bw) :
        base_phy_port_y = 65
        pipe = 'pipe1'
        if phy_port < base_phy_port_y :
            pipe = 'pipe0'
        if bw not in self.port_map[pipe]:
            self.port_map[pipe][bw] = []

        if self.hsp_bw != None and bw >= self.hsp_bw :
            self.hsp_port_map[pipe].append(logical_port)

        #XXX do HSPs need to be a part of the per-bw list?
        self.port_map[pipe][bw].append({'logical' : logical_port,
                                        'phy' : phy_port})
        self.port_map['logical'][logical_port] = {'phy_port' : phy_port,
                                                  'bw' : bw, 'mmu' : -1, 
                                                  'uc_queue' : -1}

    def is_hsp_port(self, logical_port):
        if self.hsp_bw == None :
            return False
        bw = self.port_map['logical'][logical_port]['bw']
        return bw >= self.hsp_bw

    def _set_pipe_mmu_ports(self, pipe, mmu_port, pipe_uc_queue_base) :

        is_t2plus = False
        if isinstance(self.chip, cumulus.platform.TridentTwoPlusChip) :
            is_t2plus = True

        # HSP ports get the lowest MMU port numbers in the pipe (sorted
        # by logical port number across all port speeds). Also HSP ports 
        # have 10 UC queues.
        if is_t2plus:
            # uc queue base is calculated at a fixed offset for each port
            mmu_offset = mmu_port
            if mmu_port >= 64 :
                mmu_offset = mmu_port - 64
            uc_queue_base = pipe_uc_queue_base + (mmu_offset * 10)
        else :
            uc_queue_base = pipe_uc_queue_base
        for logical_port in self.hsp_port_map[pipe]:
            self.port_map['logical'][logical_port]['mmu_port'] = mmu_port
            self.port_map['logical'][logical_port]['uc_queue'] = uc_queue_base
            mmu_port += 1
            uc_queue_base += 10

        bw_list = self.port_map[pipe].keys()
        bw_list.sort(reverse=True)

        # MMU port assignment for non-hsp UC queues
        # non-hsp ports have 12 UC queues
        for bw in bw_list :
            phy_port_dict = {}
            for port_info in self.port_map[pipe][bw] :
                phy_port_dict[port_info['phy']] = port_info['logical']
            phy_port_list = phy_port_dict.keys()
            phy_port_list.sort()

            if is_t2plus:
                # uc queue base is calculated at a fixed offset for each port
                mmu_offset = mmu_port
                if mmu_port >= 64 :
                    mmu_offset = mmu_port - 64
                uc_queue_base = pipe_uc_queue_base + (mmu_offset * 12)
            else :
                # the base queue must be divisible by 4 to support PFC
                # (per comment in SDK 6.4.8)
                uc_queue_base = (uc_queue_base + 3) & (~3);
            for phy_port in phy_port_list :
                logical_port = phy_port_dict[phy_port]
                #hsp ports have already been assigned mmu ports
                if not self.is_hsp_port(logical_port):
                    self.port_map['logical'][logical_port]['mmu_port'] = mmu_port
                    self.port_map['logical'][logical_port]['uc_queue'] = uc_queue_base
                    mmu_port += 1
                    uc_queue_base += 12

    # XXX trident2 only
    def _set_mmu_ports(self) :
        self._set_pipe_mmu_ports('pipe0', 0, 0)
        self._set_pipe_mmu_ports('pipe1', 64, 2048)

    def dump_port_map(self):
        sys.stderr.write('CL portmap\n')
        logical_port_list = self.port_map['logical'].keys()
        logical_port_list.sort()
        for logical_port in logical_port_list :
            sys.stderr.write('lport %d; mmu: %d; uc_q: %d\n' % 
                   (logical_port + 1, 
                    self.port_map['logical'][logical_port]['mmu_port'],
                    self.port_map['logical'][logical_port]['uc_queue']))

    # ----------------------------------------------------------
    #
    #            r e a d __ p o r t __ c o n f i g
    #
    # ----------------------------------------------------------
    def read_port_config(self, port_file, bcm_config_file, linux_port_map_file):

        # linux port map
        self.linux_port_map = {}
        self.linux_port_list = []
        linux_port_map_re = re.compile('(?P<linux>(swp\d+)(s\d+)?)\s+(?P<sdk>(x|g|c)e\d+)')
        f = open(linux_port_map_file)
        for line in f:
            m = linux_port_map_re.match(line)
            if m :
                linux_port = m.group('linux')
                sdk_port = m.group('sdk')
                self.linux_port_map[linux_port] = sdk_port
                self.linux_port_list.append(linux_port)

        #port_range_re  = re.compile('[x|g]e(?P<start>\d+)-[g|x]e(?P<end>\d+)|[g|x]e(?P<port>\d+)')
        port_range_re  = re.compile('(?P<prefix>[c|x|g]e)(?P<start>\d+)-[c|g|x]e(?P<end>\d+)|(?P<port_prefix>[c|g|x]e)(?P<port>\d+)')
        port_bw_re     = re.compile('setenv (?P<bw>\d+)G(fab)?ports')
        allports_re    = re.compile('setenv allports')
        f = open(port_file)
        for line in f:
            bw_match       = port_bw_re.match(line)
            allports_match = allports_re.match(line)
            if bw_match or allports_match :

                if bw_match :
                    # get the port bandwidth
                    port_bw = int(bw_match.group('bw'))

                range_list = port_range_re.findall(line)
                for range_entry in range_list :
                    # parse the port range
                    if range_entry[3] != '' :
                        prefix           = range_entry[3]
                        port_range_start = int(range_entry[4])
                        port_range_end   = int(range_entry[4])
                    else :
                        prefix           = range_entry[0]
                        port_range_start = int(range_entry[1])
                        port_range_end   = int(range_entry[2])

                    # remember the port parameters
                    for port_num in range(port_range_start, port_range_end + 1) :
                        sdk_port_label = '%s%s' % (prefix, port_num)
                        if allports_match :
                            self.sdk_port_label_list.append(sdk_port_label)
                        else :
                            self.label_2_bw[sdk_port_label] = port_bw
                    if bw_match :
                        self._add_port_range(port_bw,
                                             port_range_start,
                                             port_range_end)
        f.close()

        # add the CPU port to the sdk port info
        self.sdk_port_label_list.append('cpu0')
        self.label_2_bw['cpu0']      = 0

        # try to read the portmap info: this may not be present for some chips
        port_map_line_re = re.compile('portmap_(?P<logical_port>\d+)\.\d+=(?P<phy_port>\d+):(?P<bw>\d+)')
        f = open(bcm_config_file)
        for line in f:
            m = port_map_line_re.match(line)
            if m :
                logical_port = int(m.group('logical_port')) - 1
                phy_port = int(m.group('phy_port'))
                bw = int(m.group('bw'))
                sdk_port_idx = logical_port
                self._set_port_map(logical_port, phy_port, bw)
                if sdk_port_idx < len(self.sdk_port_label_list) :
                    sdk_port_label = self.sdk_port_label_list[sdk_port_idx]
                    self.label_2_logical[sdk_port_label] = logical_port

        f.close()
        if len(self.port_map['logical'].keys()) == 0 :
            # this mapping is fixed on this platform: start with the lowest bw ports
            logical_port = 0
            for sdk_port_label in self.sdk_port_label_list :
                self._set_port_map(logical_port, -1, int(port_bw))
                logical_port += 1

        #keep the HSP lists sorted by logical port
        self.hsp_port_map['pipe0'].sort()
        self.hsp_port_map['pipe1'].sort()

        self._set_mmu_ports()

        if self.port_count == 0 :
            logger.error('No ports configured in %s: exiting' % self.port_file)
            sys.exit()

        # process port description
        min_bw = sys.maxint
        for bw in self.port_bw_count :
            if bw < min_bw :
                min_bw = bw
        for bw in self.port_bw_count :
            weight = bw / min_bw
            self.port_bw_weight[bw] = weight

        self.weighted_port_count = 0
        for bw in self.port_bw_count :
            self.weighted_port_count += (self.port_bw_count[bw] * self.port_bw_weight[bw])


# ==============================================================================
#
#                       P A R A M E T E R __ M A N A G E R
#
# ==============================================================================
class ParameterManager(object):

    def __init__(self, chip, config_manager) :
        self.chip           = chip
        self.config_manager = config_manager
        self.port_desc      = config_manager.port_desc
        self.parameter_group_list = []

    def generate_output_objects (self) :
        pass

    def print_output_objects (self, file_dict) :
        if self.config_manager.traffic.disable_custom_datapath_config == 1 :
            return
        for parameter_group in self.parameter_group_list :
            parameter_group.write_to_file(file_dict['parameter'])

# ==============================================================================
#
#             E S W __ B U F F E R __ P A R A M E T E R __ M G R
#
# ==============================================================================
class ESW_BufferParameterMgr(ParameterManager) :

    def __init__(self, chip, config_manager) :
        super(ESW_BufferParameterMgr,self).__init__(chip, config_manager)

    def generate_sp_limits(self, label, sp_config) :
        num_service_pools = self.config_manager.hardware.num_service_pools
        yellow_percent    = self.config_manager.traffic.yellow_limit_percent
        red_percent       = self.config_manager.traffic.red_limit_percent

        group = ParameterGroup('service pool size')
        for sp_id in range(num_service_pools) :
            service_pool = sp_config.pool_dict[sp_id]
            if service_pool.configured != True :
                sp_limit = 0
            else :
                sp_limit = service_pool.percent
                group.add_parameter(Parameter('buf.%spool%d.size' % (label, sp_id),
                                              sp_limit,
                                              suffix='%'))

                yellow_limit = 0
                if yellow_percent != None :
                    yellow_limit = (yellow_percent / 100) * sp_limit
                else :
                    yellow_limit = sp_limit
                    group.add_parameter(Parameter('buf.%spool%d.yellow_size' % (label, sp_id),
                                                  yellow_limit,
                                                  suffix='%'))
                red_limit = 0
                if red_percent != None :
                    red_limit = (red_percent / 100) * sp_limit
                else :
                    red_limit = sp_limit
                group.add_parameter(Parameter('buf.%spool%d.red_size' % (label, sp_id),
                                              red_limit,
                                              suffix='%'))

        self.parameter_group_list.append(group)

# ==============================================================================
#
#        E S W __ I N G R __ B U F F E R __ P A R A M E T E R __ M G R
#
# ==============================================================================
class ESW_IngrBufferParameterMgr(ESW_BufferParameterMgr):

    def __init__(self, chip, config_manager) :
        super(ESW_IngrBufferParameterMgr,self).__init__(chip, config_manager)

    def generate_pg_limits (self) :
        num_priority_groups = self.config_manager.hardware.num_priority_groups
        pg_buffer_limit     = self.config_manager.buffer_desc.pg_buffer_limit
        hdrm_group          = ParameterGroup('priority group headroom')
        min_group           = ParameterGroup('per priority group guarantee')
        shared_group        = ParameterGroup('per priority group shared limit and shared resume')

        lossless_flag = 0
        for pg_id in range(num_priority_groups):
            # generate the weighted per-port, per-pg buffer allocation
            # field values for the minimum and pg headroom buffers

            if pg_id not in pg_buffer_limit :
                continue
            for sdk_port_label in self.config_manager.port_desc.sdk_port_label_list :
                value_dict = {}
                bw = self.config_manager.port_desc.label_2_bw[sdk_port_label]
                pause_flag = False
                if pg_id == 7 and sdk_port_label != 'cpu0' :
                    for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
                        if sdk_port_label in port_pause_config.sdk_port_list :
                            pause_flag = True
                            break

                # set the pg min cell limit
                pg_min_cell_limit = 0
                if sdk_port_label == "cpu0" :
                    pg_min_cell_limit = self.config_manager.buffer_desc.cpu_pg_min_cells
                elif pause_flag :
                    pg_min_cell_limit = port_pause_config.minimum_cell_limit;
                elif pg_buffer_limit[pg_id]['pg_min'] > 0 :
                    per_port_min_cells = self.port_desc.get_weighted_per_port_cells(pg_buffer_limit[pg_id]['pg_min'], bw)
                    pg_min_cell_limit = int(per_port_min_cells)
                if pg_min_cell_limit < 100 :
                    # pg_min_cell_limit = 100 # XXX debugging
                    pass
                min_group.add_parameter(Parameter('buf.prigroup%s.guarantee_%s' % (pg_id, sdk_port_label), pg_min_cell_limit))

                # set the shared buffer limit
                if pause_flag :
                    pg_shared_cell_limit = port_pause_config.shared_cell_limit
                    pg_shared_cell_floor = port_pause_config.shared_reset_floor
                elif pg_buffer_limit[pg_id]['shared'] > 0 :
                    pg_shared_cell_limit = pg_buffer_limit[pg_id]['shared']
                    reset_floor = pg_shared_cell_limit - 500
                    if reset_floor < 0 :
                        reset_floor = 0
                    pg_shared_cell_floor = reset_floor
                shared_group.add_parameter(Parameter('buf.prigroup%s.pool_scale_%s' % \
                                                     (pg_id, sdk_port_label), -1))
                shared_group.add_parameter(Parameter('buf.prigroup%s.pool_limit_%s' % \
                                                     (pg_id, sdk_port_label),
                                                     pg_shared_cell_limit))
                shared_group.add_parameter(Parameter('buf.prigroup%s.pool_resume_%s' % \
                                                     (pg_id, sdk_port_label), pg_shared_cell_floor))

                # set the global headroom enable and pg headroom limit fields
                gbl_hdrm_enable    = 1
                pg_hdrm_cell_limit = 0
                if pause_flag :
                    pg_hdrm_cell_limit = port_pause_config.pg_hdrm
                    lossless_flag = 1
                elif sdk_port_label in pg_buffer_limit[pg_id]['pg_hdrm'] :
                    pg_hdrm_cell_limit = pg_buffer_limit[pg_id]['pg_hdrm'][sdk_port_label]
                    lossless_flag = 1
                hdrm_group.add_parameter(Parameter('buf.prigroup%s.device_headroom_enable_%s' % \
                                                   (pg_id, sdk_port_label), gbl_hdrm_enable))
                hdrm_group.add_parameter(Parameter('buf.prigroup%s.headroom_%s' % \
                                                   (pg_id, sdk_port_label), pg_hdrm_cell_limit))
        hdrm_group.add_parameter(Parameter('mmu_lossless', lossless_flag))

        self.parameter_group_list.append(min_group)
        self.parameter_group_list.append(shared_group)
        self.parameter_group_list.append(hdrm_group)

    def generate_sp_mapping (self) :

        num_priority_groups = self.config_manager.hardware.num_priority_groups

        group = ParameterGroup('service pool mapping')
        value_str = ''
        for pg_id in range(num_priority_groups) :
            if pg_id in self.config_manager.priority_group.pg2sp :
                sp_id = self.config_manager.priority_group.pg2sp[pg_id]
            else :
                sp_id = 0
            if pg_id > 0 :
                value_str += ','
                value_str += '%s' % sp_id

        group.add_parameter(Parameter('buf.map.prigroup.pool', value_str))
        self.parameter_group_list.append(group)

    def generate_port_limits (self) :

        group = ParameterGroup('per-port minimum guaranteed and shared buffers')

        sp_config = self.config_manager.ingress_service_pool
        for sp_id in range(self.config_manager.hardware.num_service_pools) :
            service_pool = sp_config.pool_dict[sp_id]
            """
            if service_pool.configured != True :
                shared_limit = 0
            else :
                # shared_limit = service_pool.percent
            """
            # keep the per-port limit effectively unlimited
            shared_limit = 90

            # thdi_port_sp_config_x port_sp_min_limit, resume_limit, max_limit
            group.add_parameter(Parameter('buf.ingportpool%d.guarantee' % sp_id, 0))
            group.add_parameter(Parameter('buf.ingportpool%d.pool_limit' % sp_id,
                                          shared_limit, suffix='%'))
            group.add_parameter(Parameter('buf.ingportpool%d.pool_resume_limit' % sp_id, 0))
        self.parameter_group_list.append(group)

    def generate_packet_size (self) :
        group = ParameterGroup('maximum packet size')
        group.add_parameter(Parameter('pkt_size', self.config_manager.hardware.max_frame_cells ))
        self.parameter_group_list.append(group)

    def generate_ing_sp_limits (self) :
        self.generate_sp_limits('ingr', self.config_manager.ingress_service_pool)

    def generate_output_objects (self) :
        self.generate_packet_size()
        self.generate_port_limits()
        self.generate_pg_limits()
        self.generate_sp_mapping()
        self.generate_ing_sp_limits()

# ==============================================================================
#
#       E S W __ I N G R __ P R I __ M A P __ P A R A M E T E R __ M G R
#
# ==============================================================================
class ESW_IngrPriMapParameterMgr(ParameterManager):

    def __init__(self, chip, config_manager) :
        super(ESW_IngrPriMapParameterMgr,self).__init__(chip, config_manager)

    def generate_pg_map (self) :
        max_priority_value        = self.config_manager.hardware.num_priorities
        priority_value_dict       = {}
        pause_priority_value_dict = {}
        for cos_id in reversed(xrange(max_priority_value)) :
            if cos_id in self.config_manager.priority_group.cos2group :
                pg_id = self.config_manager.priority_group.cos2group[cos_id]
            else :
                pg_id = 0
            if cos_id < 0 :
                pg_id = 7
            priority_value_dict[cos_id]       = pg_id
            pause_priority_value_dict[cos_id] = 7
        sorted_keys = priority_value_dict.keys()
        sorted_keys.sort()
        default_value_string = '%s' % priority_value_dict[sorted_keys[0]]
        for key in sorted_keys[1:] :
            default_value_string += ',%s' % priority_value_dict[key]
        pause_value_string = '%s' % pause_priority_value_dict[sorted_keys[0]]
        for key in sorted_keys[1:] :
            pause_value_string += ',%s' % pause_priority_value_dict[key]
        group = ParameterGroup("priority to priority group mapping")
        for port_label in self.config_manager.port_desc.sdk_port_label_list :
            value_string = default_value_string
            for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
                if port_label in port_pause_config.sdk_port_list :
                    value_string = pause_value_string
                    break
            # port label syntax is different for csv syntax
            group.add_parameter(Parameter('buf.map.pri.prigroup_%s' % port_label, value_string))
        self.parameter_group_list.append(group)

    def generate_output_objects (self) :
        self.generate_pg_map()

# ==============================================================================
#
#         E S W __ E G R __ B U F F E R __ P A R A M E T E R __ M G R
#
# ==============================================================================
class ESW_EgrBufferParameterMgr(ESW_BufferParameterMgr) :

    def __init__(self, chip, config_manager) :
        super(ESW_EgrBufferParameterMgr,self).__init__(chip, config_manager)

    def generate_cos_map (self) :
        pass

    # ------------------------------------------------------------------
    #
    #             g e t __ q __ c o n f i g __ v a l u e s
    #
    # ------------------------------------------------------------------
    def get_q_config_values (self, q_type, q_id) :

        queue_buffer_limit       = self.config_manager.buffer_desc.queue_buffer_limit
        queue_buffer_unlimited   = self.config_manager.buffer_desc.queue_buffer_unlimited
        queue_buffer_color_aware = self.config_manager.buffer_desc.queue_buffer_color_aware
        values                   = {}

        sp_id = self.config_manager.priority_group.q2sp[q_type][q_id]
        values['spid'] = sp_id
        values['q_group_id'] = sp_id # not used, avoids an error message
        if queue_buffer_unlimited[q_type][q_id] == True :
            values['limit_enable'] = 0
            values['shared_limit'] = 0
        else :
            values['limit_enable'] = 1

            # allow access to the service pool buffer
            if queue_buffer_color_aware[q_type][q_id] :
                buffer_type = 'shared green'
            else :
                buffer_type = 'shared'
            if buffer_type in queue_buffer_limit[q_type][q_id] :
                q_shared_cells = queue_buffer_limit[q_type][q_id][buffer_type]
            else :
                q_shared_cells = 0
            values['shared_limit']  = q_shared_cells

        values['color_limit_enable'] = 0
        values['yellow_limit']       = 0
        values['red_limit']          = 0
        if queue_buffer_unlimited[q_type][q_id] == False \
               and queue_buffer_color_aware[q_type][q_id] :
            # set the red and yellow limits
            values['color_limit_enable'] = 1
            buffer_type = 'shared yellow'
            if buffer_type in queue_buffer_limit[q_type][q_id] :
                q_shared_cells = queue_buffer_limit[q_type][q_id][buffer_type]
            else :
                q_shared_cells = 0
            values['yellow_limit'] = q_shared_cells

            buffer_type = 'shared red'
            if buffer_type in queue_buffer_limit[q_type][q_id] :
                q_shared_cells = queue_buffer_limit[q_type][q_id][buffer_type]
            else :
                q_shared_cells = 0
            values['red_limit'] = q_shared_cells

        values['dynamic_limit'] = 0

        return values

    def get_q_min_value (self, q_type, q_id, bw) :

        queue_buffer_limit = self.config_manager.buffer_desc.queue_buffer_limit
        buffer_type              = 'minimum'
        values                   = {}

        if buffer_type in queue_buffer_limit[q_type][q_id] :
            q_min_cells = queue_buffer_limit[q_type][q_id][buffer_type]
        else :
            q_min_cells = 0
        values['min_limit']  = self.port_desc.get_weighted_per_port_cells(q_min_cells, bw)

        return values

    def generate_queue_type_regs(self, q_type, swp_flag) :

        num_priority_groups = self.config_manager.hardware.num_priority_groups
        if swp_flag == True :
            port_suffix = ''
        else :
            port_suffix = '_cpu0'
        for q_id in self.config_manager.priority_group.q2sp[q_type] :
            values = self.get_q_config_values (q_type, q_id)
            title_prefix = 'UC'
            q_prefix = ''
            if q_type == 'mc' :
                title_prefix = 'MC'
                q_prefix = 'm'
            elif q_type == 'cpu' :
                title_prefix = 'CPU'
                q_prefix = 'm'
            group = ParameterGroup('%s Queue %d buffers' % (title_prefix, q_id))
            if q_type == 'uc' :
                group.add_parameter(Parameter('buf.queue%d.qgroup_id%s' % (q_id, port_suffix), -1))
            if q_type != 'cpu' :
                group.add_parameter(Parameter('buf.%squeue%d.pool' % (q_prefix, q_id), values['spid']))
            if values['limit_enable'] == 1 :
                group.add_parameter(Parameter('buf.%squeue%d.discard_enable%s' % (q_prefix, q_id, port_suffix), 1))
                group.add_parameter(Parameter('buf.%squeue%d.pool_scale%s' % (q_prefix, q_id, port_suffix), -1))
                group.add_parameter(Parameter('buf.%squeue%d.pool_limit%s' % (q_prefix, q_id, port_suffix), values['shared_limit']))
                group.add_parameter(Parameter('buf.%squeue%d.pool_resume%s' % (q_prefix, q_id, port_suffix), values['shared_limit'] - 100))

                if values['color_limit_enable'] :
                    group.add_parameter(Parameter('buf.%squeue%d.color_discard_enable%s' % (q_prefix, q_id, port_suffix), 1))
                    group.add_parameter(Parameter('buf.%squeue%d.pool_yellow_limit%s' % (q_prefix, q_id, port_suffix), values['shared_yellow_limit']))
                    group.add_parameter(Parameter('buf.%squeue%d.pool_yellow_resume%s' % (q_prefix, q_id, port_suffix), values['shared_yellow_limit'] - 100))
                    group.add_parameter(Parameter('buf.%squeue%d.pool_red_limit%s' % (q_prefix, q_id, port_suffix), values['shared_red_limit']))
                    group.add_parameter(Parameter('buf.%squeue%d.pool_red_resume%s' % (q_prefix, q_id, port_suffix), values['shared_red_limit'] - 100))
                else :
                    group.add_parameter(Parameter('buf.%squeue%d.color_discard_enable%s' % (q_prefix, q_id, port_suffix), 0))
                    group.add_parameter(Parameter('buf.%squeue%d.pool_yellow_limit%s' % (q_prefix, q_id, port_suffix), 0))
                    group.add_parameter(Parameter('buf.%squeue%d.pool_yellow_resume%s' % (q_prefix, q_id, port_suffix), 0))
                    group.add_parameter(Parameter('buf.%squeue%d.pool_red_limit%s' % (q_prefix, q_id, port_suffix), 0))
                    group.add_parameter(Parameter('buf.%squeue%d.pool_red_resume%s' % (q_prefix, q_id, port_suffix), 0))

                if swp_flag == False :
                    sdk_port_label_list = ['cpu0']
                else :
                    sdk_port_label_list = self.config_manager.port_desc.sdk_port_label_list
                for port_label in sdk_port_label_list :
                    if swp_flag == True and port_label == 'cpu0' :
                        continue
                    bw = self.config_manager.port_desc.label_2_bw[port_label]
                    values = self.get_q_min_value(q_type, q_id, bw)
                    min_limit_port_suffix = '_%s' % port_label
                    group.add_parameter(Parameter('buf.%squeue%d.guarantee%s' % (q_prefix, q_id, min_limit_port_suffix), values['min_limit']))
            else :
                group.add_parameter(Parameter('buf.%squeue%d.discard_enable%s' % (q_prefix, q_id, port_suffix), 0))

            # queue.qgroup_guarantee_enable bool ??  trident only
            self.parameter_group_list.append(group)

    def generate_queue_regs(self) :
        self.generate_queue_type_regs('uc', True)
        self.generate_queue_type_regs('mc', True)
        self.generate_queue_type_regs('cpu', False)

    def generate_sp_shared_limits(self) :
        self.generate_sp_limits('egr', self.config_manager.egress_service_pool)

    def generate_output_objects (self) :
        # TOO has per port scheduling registers in a table on page 518
        self.generate_cos_map()
        self.generate_queue_regs()
        self.generate_sp_shared_limits()

# ==============================================================================
#
#                          C H I P __ M A N A G E R
#
# ==============================================================================
class ChipManager(object):

    def __init__(self, chip, config_manager) :
        self.manager_list = []
        self.config_manager = config_manager
        if getattr(self, 'creator_list', None) == None :
            self.creator_list = []
        for creator in self.creator_list :
            self.manager_list.append(creator(chip, config_manager))

    def generate_output_objects(self) :
        for manager in self.manager_list :
            manager.generate_output_objects()

    def print_output_objects(self, file_dict) :
        for type, file in file_dict.iteritems() :
            if self.config_manager.traffic.disable_custom_datapath_config == 1 :
                file.write('# custom datapath configuration is disabled from /etc/cumulus/datapath/traffic.conf\n')
        for manager in self.manager_list :
            manager.print_output_objects(file_dict)

# ==============================================================================
#
#              T R I D E N T __ C H I P __ M A N A G E R
#
# ==============================================================================
class TridentChipManager(ChipManager):

    def __init__(self, chip, config_manager) :
        self.creator_list = [IngressRegisterManager,
                             ForwardingRegisterManager,
                             TridentEgressRegisterManager]

        super(TridentChipManager,self).__init__(chip, config_manager)

# ==============================================================================
#
#              T 2__ C H I P __ M A N A G E R
#
# ==============================================================================
class T2_ChipManager(ChipManager):

    def __init__(self, chip, config_manager) :

        self.creator_list = [T2_IngressRegisterManager,
                             ForwardingRegisterManager,
                             T2_EgressRegisterManager]
        super(T2_ChipManager,self).__init__(chip, config_manager)

# ==============================================================================
#
#              T O M A H A W K __ C H I P __ M A N A G E R
#
# ==============================================================================
class TomahawkChipManager(ChipManager):

    def __init__(self, chip, config_manager) :
        self.creator_list = []
        self.creator_list = [ ESW_EgrBufferParameterMgr,
                              ESW_IngrBufferParameterMgr,
                              ESW_IngrPriMapParameterMgr,
                              ESW_PortsRegisterManager,
                              ForwardingRegisterManager ]
        super(TomahawkChipManager,self).__init__(chip, config_manager)

# ==============================================================================
#
#              H E L I X 4__ C H I P __ M A N A G E R
#
# ==============================================================================
class Helix4_ChipManager(ChipManager):

    def __init__(self, chip, config_manager) :
        self.creator_list = (ESW_EgrBufferParameterMgr,
                             ESW_IngrBufferParameterMgr,
                             ESW_IngrPriMapParameterMgr,
                             ESW_PortsRegisterManager,
                             ForwardingRegisterManager)
        super(Helix4_ChipManager,self).__init__(chip, config_manager)

# ==============================================================================
#
#                    C H I P __ M A N A G E R __ F A C T O R Y
#
# ==============================================================================
class ChipManagerFactory(object):

    map = { 'TridentChip'           : TridentChipManager,
            'TridentTwo_56850_Chip' : T2_ChipManager,
            'TridentTwo_56854_Chip' : T2_ChipManager,
            'TridentTwoPlus_56860_Chip' : T2_ChipManager,
            'TridentTwoPlus_56864_Chip' : T2_ChipManager,
            'Helix4Chip'            : Helix4_ChipManager,
            'TomahawkChip'          : TomahawkChipManager }

    @classmethod
    def get_new_manager(self, chip, config_manager) :
        chip_name = chip.__class__.__name__
        if chip_name not in self.map :
                        return None
        manager = self.map[chip_name](chip, config_manager)
        return manager

# ==============================================================================
#
#                              O U T P U T __ O B J E C T
#
# ==============================================================================
class OutputObject(object) :

    def __init__ (self, name, value, comment) :
        self.name     = name
        self.value    = value
        self.comment  = comment

    def write_to_file (self, file) :
        pass

# ==============================================================================
#
#                              P A R A M E T E R
#
# ==============================================================================
class Parameter(OutputObject) :

    def __init__ (self, name, value, suffix='', comment=None) :
        self.suffix = suffix
        super(Parameter,self).__init__(name, value, comment)

    def write_to_file (self, file) :
        if self.comment != None :
            file.write("%s=%s%s  # %s\n" % (self.name,
                                            str(self.value),
                                            self.suffix,
                                            self.comment))
        else :
            file.write("%s=%s%s\n" % (self.name, str(self.value), self.suffix))

# ==============================================================================
#
#                         P A R A M E T E R __ G R O U P
#
# ==============================================================================
class ParameterGroup(object) :

    def __init__ (self, comment=None) :
        self.parameter_list = []
        self.comment        = comment

    def add_parameter (self, parameter) :
        self.parameter_list.append(parameter)

    def write_to_file (self, file) :
        if self.comment != None :
            file.write('# %s\n' % self.comment)
        for parameter in self.parameter_list :
            parameter.write_to_file(file)
            file.write('\n')

# ==============================================================================
#
#                          R E G I S T E R __ M A N A G E R
#
# ==============================================================================
class RegisterManager(object):

    def __init__(self, chip, config_manager) :
        self.chip          = chip
        self.reg_dict      = {}
        self.reg_list      = []
        self.port_cmd_list = []
        self.section_dict  = {}
        self.config_manager = config_manager
        self.port_desc      = config_manager.port_desc
        self.hardware       = config_manager.hardware
        self.header_str     = ""

    # ------------------------------------------------------------------
    #
    #                 a d d __ n e w __ r e g i s t e r
    #
    # ------------------------------------------------------------------
    def add_new_register(self, name, section, operation, value_list, value_pairs, comment=None) :
        register = Register(name, operation, value_list, value_pairs, comment)
        self.reg_dict[name] = register
        self.reg_list.append(register)
        self.section_dict[section].append(register)

    # ------------------------------------------------------------------
    #
    #                 a d d __ n e w __ p o r t __ p a u s e __ c m d
    #
    # ------------------------------------------------------------------
    def add_port_pause_cmd(self, port_label, tx_pause, rx_pause) :
        tx_pause_label = 'off'
        if tx_pause:
            tx_pause_label = 'on'
        rx_pause_label = 'off'
        if rx_pause:
            rx_pause_label = 'on'
        port_cmd = "port %s TPAU=%s RPAU=%s\n" % (port_label, tx_pause_label, rx_pause_label)
        self.port_cmd_list.append(port_cmd)

    # ------------------------------------------------------------------
    #
    #         a d d __ n e w __ r e g i s t e r __ t e m p l a t e
    #
    # ------------------------------------------------------------------
    def add_new_register_template(self, name, section, operation, loop_list, value_list, value_pairs) :

        register = RegisterTemplate(name, "write", loop_list, value_list, value_pairs)
        self.reg_dict[name] = register
        self.reg_list.append(register)
        self.section_dict[section].append(register)

    def generate_output_objects (self) :
        pass

    # ------------------------------------------------------------------
    #
    #              p r i n t __ o u t p u t __ o b j e c t s
    #
    # ------------------------------------------------------------------
    def print_output_objects (self, file_dict) :
        if self.config_manager.traffic.disable_custom_datapath_config == 1 :
            return
        file = file_dict['register']
        file.write("\n\n# --- %s ---\n" % self.header_str)
        for section in self.section_dict :
            if len(self.section_dict[section]) > 0 :
                file.write("\n# ----- %s ------\n" % section)
                for register in self.section_dict[section] :
                    register.print_register(file)

        file.write('\n# ------ port commands ------- \n')
        for command in self.port_cmd_list :
            file.write(command)

# ==============================================================================
#
#     E S W __ P O R T S __ R E G I S T E R __ M A N A G E R
#
# ==============================================================================
class ESW_PortsRegisterManager(RegisterManager):

    def __init__(self, chip, config_manager) :
        super(ESW_PortsRegisterManager,self).__init__(chip, config_manager)
        self.section_dict = {"ports" : []}
        self.header_str = "Port Registers"

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ r e g i s t e r s
    #
    # ------------------------------------------------------------------
    def generate_output_objects (self) :

        section = 'ports'

        port_table_size = 64 # XXX fixme: change to self.chip.port_table_size
        value_list = ['0','%d' % port_table_size]
        value_pairs = [ValuePair("port_pri",           "0"),
                       ValuePair("pri_mapping",        "0"),
                       ValuePair("trust_incoming_vid", "0"),
                       ValuePair("vt_enable",          "0") ]
        name = "port"
        self.add_new_register(name, section, "modify", value_list, value_pairs)

        # adjust for ports with pause enabled
        for sdk_port_label in self.config_manager.port_desc.sdk_port_label_list :
            if sdk_port_label == 'cpu0' :
                continue
            tx_enable = 0
            rx_enable = 0
            for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
                if sdk_port_label in port_pause_config.sdk_port_list :
                    # enable pause on the port
                    tx_enable = port_pause_config.tx_enable
                    rx_enable = port_pause_config.rx_enable
                    break
            self.add_port_pause_cmd(sdk_port_label, tx_enable, rx_enable)

# ==============================================================================
#
#                               C O N F I G
#
# ==============================================================================
class Config(object):

    # ------------------------------------------------------------------
    #
    #                          __ i n i t __
    #
    # ------------------------------------------------------------------
    def __init__ (self, name, config_dict) :
        self.name = name
        self.config_dict = config_dict

    # ------------------------------------------------------------------
    #
    #                     i n i t __ c o n f i g
    #
    # ------------------------------------------------------------------
    def init_config (self, config_mgr) :
        self.config_mgr = config_mgr

    # ------------------------------------------------------------------
    #
    #                        s e t __ c o n f i g
    #
    # ------------------------------------------------------------------
    def set_config (self, name, value) :
        if type(name) == list :
            if len(name) == 0:
                self.config_mgr.report_error( '%s set_config: no name' % self.name)
                return
            name = name[0]
        elif type(name) != str :
            self.config_mgr.report_error('%s set_config: name format %s not recognized' % (self.name, name))
        if name in self.config_dict :
            value_type = self.config_dict[name]
            if value_type == 'int' :
                typed_value = int(value)
            elif value_type == 'float' :
                typed_value = float(value)
            elif value_type == 'bool' :
                if value == 'true' or value == 'True' or value == 'TRUE' :
                    typed_value = True
                else :
                    typed_value = False
            elif value_type == 'int list' :
                int_list_re = re.compile('\[' +'(?P<list>((\d+),)*(\d+)?)' + '\]')
                m = int_list_re.search(value)
                if m :
                    typed_value = m.group('list').split(',')
                    temp_value_list = []
                    for idx in range(len(typed_value)) :
                        if typed_value[idx] != '' :
                            temp_value_list.append(int(typed_value[idx]))
                    typed_value = temp_value_list
                else :
                    self.config_mgr.report_error('Config %s value %s not recognized as an integer list' % (name, value))
                    return
            elif value_type == 'string' :
                typed_value = value
            else :
                self.config_mgr.report_error('Config value type %s not supported' % value_type)
                return
            setattr(self, name, typed_value)
        else :
            self.config_mgr.report_error('%s not supported in %s' % (name, self.name))

    # ------------------------------------------------------------------
    #
    #                        c h e c k __ c o n f i g
    #
    # ------------------------------------------------------------------
    def check_config (self) :
        for name in self.config_dict :
            if not hasattr(self, name) :
                self.config_mgr.report_error('%s: attribute %s not configured' % (self.name, name))

    # ------------------------------------------------------------------
    #
    #                      p r o c e s s __ c o n f i g
    #
    # ------------------------------------------------------------------
    def process_config (self) :
        # create and initialize missing configuration parameters
        for name in self.config_dict :
            if not hasattr(self, name) :
                value_type = self.config_dict[name]
                if value_type == 'int' :
                    typed_value = 0
                if value_type == 'bool' :
                    typed_value = False
                elif value_type == 'float' :
                    typed_value = float(0)
                elif value_type == 'int list' :
                    typed_value = []
                elif value_type == 'string' :
                    typed_value = ''
                else :
                    self.config_mgr.report_error('%s: value type %s not recognized' % (self.name, value_type))
                setattr(self, name, typed_value)

# ==============================================================================
#
#                         C O N F I G __ S E T
#
# ==============================================================================
class ConfigSet(Config):
    def __init__ (self, name, config_dict, set_dict) :
        self.set_dict = set_dict
        super(ConfigSet,self).__init__(name, config_dict)

    def init_config (self, config_mgr) :
        super(ConfigSet,self).init_config(config_mgr)
        for member_name, member in self.set_dict.iteritems() :
            member.init_config(config_mgr)

    def set_config (self, name, value) :
        if type(name) == list :
            num_levels = len(name)
            if num_levels == 0:
                self.config_mgr.report_error('%s set_config: no name' % self.name)
                return
            parameter_name = name[0]
            if parameter_name in self.config_dict :
                super(ConfigSet, self).set_config(parameter_name, value)
            elif parameter_name in self.set_dict :
                set_info = self.set_dict[parameter_name]
                set_info.set_config(name[1:], value)
            else :
                self.config_mgr.report_error('%s: parameter name %s in %s not recognized' % (self.name, parameter_name, name))
                return
        else :
            self.config_mgr.report_error('%s: name format %s not recognized' % (self.name, name))
            return

    def check_config (self) :
        super(ConfigSet,self).check_config()
        for member_name, member in self.set_dict.iteritems() :
            member.check_config()

    def process_config (self) :
        super(ConfigSet,self).process_config()
        for member_name, member in self.set_dict.iteritems() :
            member.process_config()


# ==============================================================================
#
#                       S C H E D U L I N G __ C O N F I G
#
# ==============================================================================
class SchedulingConfig(Config):
    def __init__ (self) :
        config_dict = { 'algorithm' : 'string' }
        super(SchedulingConfig,self).__init__('Scheduling', config_dict)

# ==============================================================================
#
#                       F W D __ T A B L E __ C O N F I G
#
# ==============================================================================
class FwdTableConfig(Config):
    def __init__ (self) :
        self.profile = 'default'
        config_dict = { 'profile' : 'string' }
        super(FwdTableConfig,self).__init__('forwarding_table', config_dict)

# ==============================================================================
#
#                             C O S __ Q U E U E __ C O N F I G
#
# ==============================================================================
class CosQueueConfig(Config):
    def __init__ (self, name) :
        config_dict = { 'uc' : 'int',
                        'cpu': 'int' }
        super(CosQueueConfig,self).__init__('CoS Queue Config %s' % name,
                                            config_dict)

    def set_config (self, name, value) :
        super(CosQueueConfig,self).set_config(name, value)

    def check_config (self) :
        for name in self.config_dict :
            if not hasattr(self, name) :
                setattr(self, name, 0)
        super(CosQueueConfig,self).check_config()

    def set_mc_queue (self, mc) :
        self.mc = mc

    def match_mc_queue (self) :
        self.mc = self.uc

# ==============================================================================
#
#                   C O S __ Q U E U E __ C O N F I G __ S E T
#
# ==============================================================================
class CosQueueConfigSet(ConfigSet):
    def __init__ (self) :
        config_dict = {}
        cos_dict    = {}
        self.cos_id_dict = {}
        super(CosQueueConfigSet,self).__init__('Cos Set', config_dict, cos_dict)

    def init_config (self, config_mgr) :
        for cos_id in range(config_mgr.hardware.num_priorities) :
            self.cos_id_dict[cos_id] = CosQueueConfig('cos_%d' % cos_id)
            self.set_dict['cos_%d' % cos_id] = self.cos_id_dict[cos_id]
        super(CosQueueConfigSet,self).init_config(config_mgr)

    def match_mc_queues (self) :
        for cos_id, config in self.cos_id_dict.iteritems() :
            config.match_mc_queue()

    def set_mc_queues (self, cos_id, mc) :
        self.cos_id_dict[cos_id].set_mc_queue(mc)

# ==============================================================================
#
#                       S E R V I C E __ P O O L __ C O N F I G
#
# ==============================================================================
class ServicePoolConfig(Config):
    def __init__ (self, name, direction) :
        config_dict = { 'percent'        : 'float' }
        self.configured = False
        super(ServicePoolConfig,self).__init__('%s Service Pool %s' % (direction, name), config_dict)

    def set_configured_flag (self) :
        self.configured = True

    def check_config (self) :
        for name in self.config_dict :
            if not hasattr(self, name) :
                setattr(self, name, 0)
        super(ServicePoolConfig,self).check_config()

    def process_config(self) :
        if self.configured == False:
            if hasattr(self, 'percent') :
                self.percent = 0
        super(ServicePoolConfig,self).process_config()

# ==============================================================================
#
#                  S E R V I C E __ P O O L  __ C O N F I G __ S E T
#
# ==============================================================================
class ServicePoolConfigSet(ConfigSet):
    def __init__ (self, prefix) :
        config_dict    = {}
        set_dict       = {}
        self.pool_dict = {}
        self.prefix = prefix
        super(ServicePoolConfigSet,self).__init__(prefix + ' Service Pool Set', config_dict, set_dict)

    def init_config (self, config_mgr) :
        for sp_id in range(config_mgr.hardware.num_service_pools) :
            label = '%d' % sp_id
            self.pool_dict[sp_id] = ServicePoolConfig(label, self.prefix)
            self.set_dict[label] = self.pool_dict[sp_id]
        super(ServicePoolConfigSet,self).init_config(config_mgr)

    def process_config (self) :
        for type, priority_group in self.config_mgr.priority_group.set_dict.iteritems() :
            if priority_group.configured == False:
                continue
            sp_id = priority_group.service_pool
            self.pool_dict[sp_id].set_configured_flag()
        super(ServicePoolConfigSet,self).process_config()

# ==============================================================================
#
#                    I N G R E S S __ B U F F E R __ C O N F I G
#
# ==============================================================================
class IngressBufferConfig(Config):
    def __init__ (self, name) :
        config_dict = { 'min_percent'    : 'float',
                        'shared_percent' : 'float' }
        super(IngressBufferConfig,self).__init__('%s Ingress Buffer' % name, config_dict)

    def check_config (self) :
        for name in self.config_dict :
            if not hasattr(self, name) :
                setattr(self, name, 0)
        super(IngressBufferConfig,self).check_config()

# ==============================================================================
#
#                E G R E S S __ Q U E U E __ B U F F E R __ C O N F I G
#
# ==============================================================================
class EgressQueueBufferConfig(Config):
    def __init__ (self, name) :
        config_dict = { 'sp_percent'        : 'float',
                        'min_percent'       : 'float' }
        super(EgressQueueBufferConfig,self).__init__('%s Egress Queue Buffer' % name, config_dict)

    def check_config (self) :
        self.color_aware_flag = False
        self.unlimited        = True
        if hasattr(self, 'min_percent') or hasattr(self, 'sp_percent') :
            self.unlimited = False

        for name in self.config_dict :
            if not hasattr(self, name) :
                setattr(self, name, None)
        super(EgressQueueBufferConfig,self).check_config()

# ==============================================================================
#
#                     E G R E S S __ B U F F E R __ C O N F I G
#
# ==============================================================================
class EgressBufferConfig(ConfigSet):
    def __init__ (self, name) :
        config_dict = {}
        queue_buffer_dict = { 'uc' :  EgressQueueBufferConfig('UC' + name),
                              'mc' :  EgressQueueBufferConfig('MC' + name),
                              'cpu' : EgressQueueBufferConfig('CPU' + name) }
        super(EgressBufferConfig,self).__init__('%s Egress Buffer' % name, config_dict, queue_buffer_dict)

# ==============================================================================
#
#                  P R I O R I T Y __ G R O U P __ C O N F I G
#
# ==============================================================================

class PriorityGroupConfig(ConfigSet):
    def __init__ (self, name) :
        config_dict    = { 'id'                      : 'int',
                           'service_pool'            : 'int',
                           'weight'                  : 'int',
                           'unlimited_egress_buffer' : 'bool',
                           'cos_list'                : 'int list',
                           'lossless_flag'           : 'bool' }
        self.configured    = False
        self.lossless_flag = False
        self.unlimited     = False
        self.cos_list      = []
        self.port_group_name_list = []
        self.ingress_buffer  = IngressBufferConfig(name)
        self.egress_buffer   = EgressBufferConfig(name)
        parameter_dict = { 'ingress_buffer' : self.ingress_buffer,
                           'egress_buffer'  : self.egress_buffer }

        super(PriorityGroupConfig,self).__init__('%s Priority Group' % name,
                                                config_dict,
                                                parameter_dict)

    def init_config (self, config_mgr) :
        super(PriorityGroupConfig,self).init_config(config_mgr)

    def set_config (self, name, value) :
        if name[0] == 'cos_list' :
            self.configured = True
        elif name[0] == 'port_group_list' :
            self.config_dict['port_group_list'] = 'string'
        elif name[0] not in self.config_dict :
            port_group_name = name[0]
            if port_group_name not in self.set_dict :
                self.port_group_name_list.append(port_group_name)
                self.set_dict[port_group_name] = PortGroupConfig(port_group_name)
                self.set_dict[port_group_name].init_config(self.config_mgr)
        super(PriorityGroupConfig,self).set_config(name, value)

    def check_config (self) :
        if not hasattr(self, 'unlimited_egress_buffer') :
            self.unlimited_egress_buffer = True
        if self.configured == True :
            if len(self.port_group_name_list) > 0 :
                for port_group_name in self.port_group_name_list :
                    self.set_dict[port_group_name].check_config()
            elif self.lossless_flag == True:
                self.config_mgr.report_error('%s: no port groups' % self.name)
                self.configured = False
                return
            super(PriorityGroupConfig,self).check_config()

    def process_config (self) :
        if self.configured == True:
            super(PriorityGroupConfig,self).process_config()
        else :
            return
        for port_group_name in self.port_group_name_list :
            self.set_dict[port_group_name].process_config()
        for q_type, buffer_config in self.egress_buffer.set_dict.iteritems() :
            if buffer_config.unlimited == True :
                # no configured buffer limits: check the flags
                if self.unlimited_egress_buffer == False :
                    self.config_mgr.report_error('%s: buffer limit conflicts with unlimited buffer flag' % self.name)
                    sys.exit(1)

        # XXX fixme!
        # awkward MC queue mapping management for Trident
        if self.config_mgr.hardware.num_mc_queues < 8 :
            if self.config_mgr.hardware.num_mc_queues < self.config_mgr.hardware.num_service_pools :
                mc_queue = 0
                self.config_mgr.report_error("%s: assigning all packets to MC queue %d (only %d queues available)" % \
                                             (mc_queue,
                                             self.config_mgr.hardware.num_mc_queues))
            else :
                mc_queue = self.service_pool
            for cos_id in self.cos_list :
                self.config_mgr.cos_queue.set_mc_queues(cos_id, mc_queue)

        self.queue = { 'uc'  : {},
                       'mc'  : {},
                       'cpu' : {} }
        for cos_id in self.cos_list :
            # cos to queue
            cos_queue_config = self.config_mgr.cos_queue.cos_id_dict[cos_id]

            # list each egress queue used by the  traffic group
            self.queue['uc'][cos_queue_config.uc]   = 1
            self.queue['mc'][cos_queue_config.mc]   = 1
            self.queue['cpu'][cos_queue_config.cpu] = 1

    def map_queue_to_sp (self, q2sp) :
            # map each egress queue to the service pool
            for cos_id in self.cos_list :
                cos_queue_config = self.config_mgr.cos_queue.cos_id_dict[cos_id]
                q2sp['uc'][cos_queue_config.uc]   = self.service_pool
                q2sp['mc'][cos_queue_config.mc]   = self.service_pool
                q2sp['cpu'][cos_queue_config.cpu] = self.service_pool

# ==============================================================================
#
#                  P R I O R I T Y __ G R O U P __ C O N F I G __ S E T
#
# ==============================================================================
class PriorityGroupConfigSet(ConfigSet):
    def __init__ (self) :
        config_dict = {}
        self.bulk     = PriorityGroupConfig('Bulk')
        self.lossless = PriorityGroupConfig('Lossless')
        self.service  = PriorityGroupConfig('Service')
        self.control  = PriorityGroupConfig('Control')
        group_dict  = { 'bulk'     : self.bulk,
                        'lossless' : self.lossless,
                        'service'  : self.service,
                        'control'  : self.control }
        super(PriorityGroupConfigSet,self).__init__('Priority Group Set',
                                                    config_dict,
                                                    group_dict)

    def set_config (self, name, value) :
        priority_group_name = name[0]
        if priority_group_name not in self.set_dict :
            self.set_dict[priority_group_name] = PriorityGroupConfig(priority_group_name)
            self.set_dict[priority_group_name].init_config(self.config_mgr)
        super(PriorityGroupConfigSet,self).set_config(name, value)

    def check_config (self) :
        super(PriorityGroupConfigSet,self).check_config()
        configured_count = 0
        for type, priority_group in self.set_dict.iteritems() :
            if priority_group.configured == True:
                configured_count += 1
        if configured_count < 1 :
            self.config_mgr.report_error('%s: no configured priority groups' % self.name)

    def process_config (self) :

        super(PriorityGroupConfigSet,self).process_config()

        unmapped_cos    = { 0: 1,
                            1: 1,
                            2: 1,
                            3: 1,
                            4: 1,
                            5: 1,
                            6: 1,
                            7: 1 }
        self.cos2group = {}
        self.pg2sp     = {}
        self.q2sp      = { 'uc'   : {},
                           'mc'   : {},
                           'cpu'  : {} }

        for traffic_type in self.set_dict :
            priority_group = self.set_dict[traffic_type]
            if priority_group.configured == False :
                continue
            pg_id = priority_group.id
            sp_id = priority_group.service_pool

            # map each queue to a service pool
            priority_group.map_queue_to_sp(self.q2sp)

            #  map each CoS value to a priority group
            for cos_id in priority_group.cos_list :
                if unmapped_cos[cos_id] == 0 :
                    self.config_mgr.report_error('Error: traffic group %s assigned Cos %d (already assigned)')
                unmapped_cos[cos_id] = 0
                self.cos2group[cos_id] = pg_id

            # map each priority group to a service pool
            self.pg2sp[pg_id] = sp_id

        # verify every CoS value has been assiged to a traffic group
        for cos_id in unmapped_cos:
            if unmapped_cos[cos_id] == 1 :
                self.config_mgr.report_error('CoS value %d is not mapped to a traffic group' % cos_id)

# ==============================================================================
#
#                             C O S __ P K T __ C O N F I G
#
# ==============================================================================
class CosPktConfig(Config):
    def __init__ (self, name) :
        config_dict = { 'packet_priorities' : 'int list' }
        self.packet_priorities = []
        super(CosPktConfig,self).__init__('CoS %s Packet Config' % name, config_dict)

    def set_config (self, name, value) :
        super(CosPktConfig,self).set_config(name, value)
        if not hasattr(self, name[0]) :
            self.config_mgr.report_error('%s missing %s priorities %s' % (self.name, name[0], value))

    def check_config (self) :
        super(CosPktConfig,self).check_config()

    def process_config (self) :
        super(CosPktConfig,self).process_config()

# ==============================================================================
#
#                  P O R T __ G R O U P __ C O N F I G __ S E T
#
# ==============================================================================
class PortGroupConfig(Config):
    def __init__ (self, name) :
        config_dict = {'rx_enable'          : 'bool',
                       'tx_enable'          : 'bool',
                       'port_set'           : 'string',
                       'shared_cell_limit'  : 'int',
                       'minimum_cell_limit' : 'int',
                       'port_buffer_bytes'  : 'int' }
        self.sdk_port_list = []
        super(PortGroupConfig,self).__init__('Port Group ' + name,
                                             config_dict)
    def check_config (self) :
        if not hasattr(self, 'shared_cell_limit') :
            self.shared_cell_limit = 4
        if not hasattr(self, 'shared_reset_floor') :
            self.shared_reset_floor = 0
            if self.shared_reset_floor < 0 :
                self.shared_reset_floor = 0
        if not hasattr(self, 'minimum_cell_limit') :
            self.minimum_cell_limit = 45
        if not hasattr(self, 'port_buffer_bytes') :
            self.port_buffer_bytes = 2024
        self.pg_hdrm = int(self.port_buffer_bytes / self.config_mgr.hardware.cell_bytes)
        # keep pg headroom in bounds
        min_per_port_pg_hdrm_cells      =  4
        max_pg_hdrm_per_port_cell_limit = 20
        if self.pg_hdrm < min_per_port_pg_hdrm_cells :
            self.pg_hdrm = min_per_port_pg_hdrm_cells
        elif self.pg_hdrm > max_pg_hdrm_per_port_cell_limit :
            error_msg = '\nPG headroom allocation %d is greater than the maximum value:' % pg_hdrm
            error_msg += ' reverting to max value %d' % max_pg_hdrm_per_port_cell_limit
            self.report_error(error_msg)
            self.pg_hdrm = max_pg_hdrm_per_port_cell_limit
        super(PortGroupConfig,self).check_config()

    def set_config (self, name, value) :
        super(PortGroupConfig,self).set_config(name, value)
        if name[0] == 'port_set' :
            # parse the port set string
            port_group_list = value.split(',')
            for port_group in port_group_list:
                port_group = port_group.strip()
                range_list = port_group.split('-')
                if len(range_list) > 0:
                    start_linux_port = range_list[0]
                    if start_linux_port not in self.config_mgr.port_desc.linux_port_map :
                        self.config_mgr.report_error('linux port %s not found in linux port map' % start_linux_port)
                        return
                    if len(range_list) > 1 :
                        end_linux_port = range_list[1]
                        if end_linux_port not in self.config_mgr.port_desc.linux_port_map :
                            self.config_mgr.report_error('linux port %s not found in linux port map' % end_linux_port)
                            return
                    else :
                        end_linux_port = range_list[0]
                    in_range_flag = False
                    for linux_port_label in self.config_mgr.port_desc.linux_port_list :
                        if linux_port_label == start_linux_port :
                            in_range_flag = True
                        if in_range_flag == True :
                            sdk_port_label = self.config_mgr.port_desc.linux_port_map[linux_port_label]
                            self.sdk_port_list.append(sdk_port_label)
                        if linux_port_label == end_linux_port :
                            break

# ==============================================================================
#
#                  P O R T __ G R O U P __  C O N F I G __ S E T
#
# ==============================================================================
class PortGroupConfigSet(ConfigSet):
    def __init__ (self) :
        config_dict     = {'port_group_list' : 'string' }
        port_group_dict = {}
        super(PortGroupConfigSet,self).__init__('Port Group Set', config_dict, port_group_dict)

    def check_config (self) :
        if not hasattr(self, 'port_group_list') :
            self.port_group_list = '[]'
        super(PortGroupConfigSet,self).check_config()

    def set_config (self, name, value) :
        if name[0] not in self.config_dict :
            port_group_name = name[0]
            if port_group_name not in self.set_dict :
                self.set_dict[port_group_name] = PortGroupConfig(port_group_name)
                self.set_dict[port_group_name].init_config(self.config_mgr)
        super(PortGroupConfigSet,self).set_config(name, value)

# ==============================================================================
#
#                  T R A F F I C __ C O N F I G __ S E T
#
# ==============================================================================
class TrafficConfigSet(ConfigSet):
    def __init__ (self) :
        config_dict = {'packet_priority_source'   : 'string',
                       'remark_packet_priority'   : 'string',
                       'yellow_limit_percent'     : 'float',
                       'red_limit_percent'        : 'float',
                       'yellow_packet_priorities' : 'int list',
                       'red_packet_priorities'    : 'int list',
                       'priority_group_list'      : 'string',
                       'disable_custom_datapath_config' : 'bool'}
        cos_dict = {}
        self.cos_id_dict = {}
        super(TrafficConfigSet,self).__init__('Traffic Set', config_dict, cos_dict)

    def init_config (self, config_mgr) :
        for cos_id in range(config_mgr.hardware.num_priorities) :
            self.cos_id_dict[cos_id] = CosPktConfig('%s' % cos_id)
            self.set_dict['cos_%d' % cos_id] = self.cos_id_dict[cos_id]
        super(TrafficConfigSet,self).init_config(config_mgr)

    def check_config (self) :
        if not hasattr(self, 'drop_behavior') :
            self.drop_behavior = 'color-blind'
        if not hasattr(self, 'yellow_packet_priorities') :
            self.yellow_packet_priorities = None
        if not hasattr(self, 'red_packet_priorities') :
            self.red_packet_priorities = None
        if not hasattr(self, 'yellow_limit_percent') :
            self.yellow_limit_percent = None
        if not hasattr(self, 'red_limit_percent') :
            self.red_limit_percent = None
        if not hasattr(self, 'disable_custom_datapath_config') :
            self.disable_custom_datapath_config = False
        super(TrafficConfigSet,self).check_config()

    def process_config (self) :
        super(TrafficConfigSet,self).process_config()
        if self.red_limit_percent != None or self.yellow_limit_percent != None :
            self.color_aware = True
            if self.red_limit_percent == None or self.yellow_limit_percent == None :
                self.config_mgr.report_error('%s: color-aware drop limits are incomplete: red and yellow must both be configured.')
                sys.exit(1)
        else :
            self.color_aware = False
        self.pkt2cos = {}
        if self.packet_priority_source  == '802.1p' :
            num_priorities = 8
        elif self.packet_priority_source == 'dscp' :
            num_priorities = 64
        else :
            self.report_error('packet priority source %s not supported' % self.packet_priority_source)
            sys.exit(1)
        priority_assigned = [0] * num_priorities
        for cos_id, cos_config in self.cos_id_dict.iteritems() :
            cos_config.process_config()
            for packet_priority in cos_config.packet_priorities :
                if packet_priority > num_priorities :
                    self.report_error('packet priority %d greater than maximum packet %s priority value %d' % (packet_priority,
                                                                                                               self.packet_priority_source,
                                                                                                               num_priorities))
                    sys.exit(1)
                else :
                    self.pkt2cos[packet_priority] = cos_id
                    priority_assigned[packet_priority] = 1

        # any packet priorities which have not been assigned get mapped to the first bulk traffic cos ID
        if len(self.config_mgr.priority_group.bulk.cos_list) < 1 :
            default_cos_id = None
        else :
            default_cos_id = self.config_mgr.priority_group.bulk.cos_list[0]
        for packet_priority, flag in enumerate(priority_assigned) :
            if flag == 0:
                if default_cos_id == None :
                    self.report_error('packet priority %d not assigned and no cos values assigned to bulk traffic: exiting')
                    sys.exit(1)
                else :
                    self.pkt2cos[packet_priority] = default_cos_id

        self.pkt2color = {}
        drop_color_dict = { 'yellow' : self.yellow_packet_priorities,
                            'red'    : self.red_packet_priorities }
        if self.packet_priority_source == 'dscp' :
            num_priorities = 64
        elif self.packet_priority_source == '802.1p' :
            num_priorities = 8
        else :
            self.report_error('packet priority source %s not supported' % self.packet_priority_source == '802.1p')
            sys.exit(1)
        assigned_flag_list = [0] * num_priorities
        for color, packet_priority_list in drop_color_dict.iteritems() :
            if packet_priority_list == None :
                continue
            for packet_priority in packet_priority_list :
                assigned_flag_list[packet_priority] = 1
                self.pkt2color[packet_priority] = color

        for packet_priority, flag in enumerate(assigned_flag_list) :
            if flag == 0 :
                self.pkt2color[packet_priority] = 'green'

# ==============================================================================
#
#                       H A R D W A R E __ C O N F I G
#
# ==============================================================================
class HardwareConfig(Config):
    def __init__ (self) :
        config_dict = { 'total_buffer_cells'  : 'int',
                        'cell_bytes'          : 'int',
                        'max_frame_cells'     : 'int',
                        'num_priorities'      : 'int',
                        'num_priority_groups' : 'int',
                        'num_service_pools'   : 'int',
                        'num_mc_queues'       : 'int' }
        super(HardwareConfig,self).__init__('Hardware', config_dict)

# ==============================================================================
#
#                         I G N O R E __ C O N F I G
#
# ==============================================================================
class IgnoreConfig(object):

    def init_config(self, config_mgr) :
        pass

    def set_config(self, name, value) :
        pass

    def check_config(self) :
        pass

    def process_config(self) :
        pass

# ==============================================================================
#
#                         C O N F I G __ M A N A G E R
#
# ==============================================================================
class ConfigManager(object):

    def __init__(self, chip) :

        super(ConfigManager,self).__init__()

        self.chip            = chip
        self.hardware        = HardwareConfig()
        self.traffic         = TrafficConfigSet()
        self.priority_group  = PriorityGroupConfigSet()
        self.ingress_service_pool = ServicePoolConfigSet('Ingress')
        self.egress_service_pool  = ServicePoolConfigSet('Egress')
        self.cos_queue       = CosQueueConfigSet()
        self.scheduling      = SchedulingConfig()
        self.link_pause      = PortGroupConfigSet()
        self.port_desc       = PortDesc(self.chip)
        self.buffer_desc     = BufferDesc()
        self.ignore_config   = IgnoreConfig()
        self.forwarding_table = FwdTableConfig()
        self.forwarding_section = []
        self.error_comment   = ""
        self.num_cpu_queues  = 48
        self.exception_q_start  = 8
        self.exception_q_weight = 8

        self.manager_dict = { 'hardware'       : self.hardware,
                              'traffic'        : self.traffic,
                              'priority_group' : self.priority_group,
                              'ingress_service_pool' : self.ingress_service_pool,
                              'egress_service_pool'  : self.egress_service_pool,
                              'cos_egr_queue'  : self.cos_queue,
                              'scheduling'     : self.scheduling,
                              'link_pause'     : self.link_pause,
                              'forwarding_table' : self.forwarding_table,
                              'dos'            : self.ignore_config,
                              'dos_enable'     : self.ignore_config,
                              'cut_through_enable' : self.ignore_config,
                              'ecmp_max_paths' : self.ignore_config,
                              'symmetric_hash_enable' : self.ignore_config,
                              'resilient_hash_enable' : self.ignore_config,
                              'resilient_hash_entries_ecmp' : self.ignore_config}

        # for when order matters
        self.manager_list = [ self.hardware,
                              self.traffic,
                              self.priority_group,
                              self.ingress_service_pool,
                              self.egress_service_pool,
                              self.cos_queue,
                              self.link_pause,
                              self.scheduling,
                              self.forwarding_table,
                              self.ignore_config]

    # ------------------------------------------------------------------
    #
    #                     i n i t __ c o n f i g
    #
    # ------------------------------------------------------------------
    def init_config (self) :
        for manager in self.manager_list:
            manager.init_config(self)
        self.buffer_desc.init_desc(self)

    # ------------------------------------------------------------------
    #
    #             r e a d __ c o n f i g __ f i l e
    #
    # ------------------------------------------------------------------
    def read_config_file (self, config_file) :
        line_buffer = ""
        f = open(config_file)
        for line in f:
            line = line.rstrip()
            line_buffer = line_buffer + line
            line_buffer = line_buffer.rstrip("\\")
            if not re.match(r"(.*)\\", line) :
                line = line_buffer
                line_buffer = ""
            else :
                continue
            config = line.split()
            if len(config) == 0 :
                continue
            if config[0].startswith("#") :
                continue
            if len(config) < 3 :
                self.report_error('configuration line not recognized: %s' % line)
                continue
            name  = config[0]
            value = ' '.join(config[2:])
            # trim any comment
            value = value.split('#')
            value = value[0]
            # remove leading and trailing spaces
            value = value.strip()

            name_level = name.split('.')
            if not name_level :
                self.report_error('configuration parameter %s not recognized' % name)
                continue
            manager_name = name_level[0]
            if manager_name not in self.manager_dict :
                self.report_error('configuration parameter level %s in %s not supported' % (manager_name, name))
                continue
            manager = self.manager_dict[manager_name]
            manager.set_config(name_level[1:], value)
        f.close()

    # ------------------------------------------------------------------
    #
    #        r e a d __ f o r w a r d i n g __ c o n f i g __ f i l e
    #
    # ------------------------------------------------------------------
    def read_forwarding_config_file (self, forwarding_file) :

        section_name = ""
        line_buffer = ""
        f = open(forwarding_file)
        for line in f:
            #print line
            config = line.split()
            if len(config) == 0 :
                continue
            if config[0] == "section:" :
                section_name = config[1]
                continue
            self.forwarding_section.append(line)

    # ------------------------------------------------------------------
    #
    #             r e a d __ p o r t __ c o n f i g
    #
    # ------------------------------------------------------------------
    def read_port_config (self,
                          ports_config,
                          bcm_config,
                          linux_port_map_config) :
        self.port_desc.read_port_config(ports_config,
                                        bcm_config,
                                        linux_port_map_config)

    def dump_port_map(self) :
        self.port_desc.dump_port_map()

    # ------------------------------------------------------------------
    #
    #                  r e p o r t __ e r r o r
    #
    # ------------------------------------------------------------------
    def report_error(self, error_msg) :
        logger.error(error_msg)
        self.error_comment += '# %s\n' % error_msg

    # ------------------------------------------------------------------
    #
    #          c a l c u l a t e __ i n g r e s s __ b u f f e r s
    #
    # ------------------------------------------------------------------
    def __calculate_ingress_buffers (self) :

        self.buffer_comment += '# ------- ingress buffers ------- \n'

        pg_buffer_limit      = self.buffer_desc.pg_buffer_limit
        ing_sp_buffer_limit  = self.buffer_desc.ing_sp_buffer_limit
        ing_sp_buffer_offset = self.buffer_desc.ing_sp_buffer_offset
        color_aware_flag     = self.traffic.color_aware

        cpu_pg_min_cells    = 45
        wan_pg_min_cells    = 1

        total_mem_cells     = self.hardware.total_buffer_cells
        num_service_pools   = self.hardware.num_service_pools
        num_priority_groups = self.hardware.num_priority_groups

        self.buffer_desc.init_available_cell_count('ingress', total_mem_cells)
        self.buffer_comment += '# total mem cells: %d\n' % self.buffer_desc.get_remaining_cells('ingress')

        # calculate the service pool buffers
        for sp_id in range(num_service_pools) :
            buffer_limit = 0
            if sp_id in self.ingress_service_pool.pool_dict :
                service_pool_config = self.ingress_service_pool.pool_dict[sp_id]
                percent = service_pool_config.percent
                if color_aware_flag == True :
                    limit_name = 'green'
                else :
                    limit_name = ''
                buffer_limit = int((percent/100) * total_mem_cells)
                ing_sp_buffer_limit[sp_id]  = {}
                ing_sp_buffer_offset[sp_id] = {}
                error = self.buffer_desc.allocate_cells('ingress', 'service_pool', buffer_limit)
                if not error :
                    ing_sp_buffer_limit[sp_id]['green'] = buffer_limit
                else :
                    ing_sp_buffer_limit[sp_id]['green'] = 100
                self.buffer_comment += '# service pool %d %s limit %d\n' % (sp_id, limit_name, ing_sp_buffer_limit[sp_id]['green'])

                if color_aware_flag == False :
                    ing_sp_buffer_limit[sp_id]['yellow'] = None
                    ing_sp_buffer_limit[sp_id]['red'] = None
                else :
                    # set the color-aware limits
                    yellow_sp_cells = int(ing_sp_buffer_limit[sp_id]['green'] * float(self.traffic.yellow_limit_percent / 100))
                    ing_sp_buffer_limit[sp_id]['yellow'] = yellow_sp_cells
                    self.buffer_comment += '# service pool %d yellow limit %d\n' % (sp_id, ing_sp_buffer_limit[sp_id]['yellow'])
                    red_sp_cells = int(ing_sp_buffer_limit[sp_id]['green'] * float(self.traffic.red_limit_percent / 100))
                    ing_sp_buffer_limit[sp_id]['red'] = red_sp_cells
                    self.buffer_comment += '# service pool %d red limit %d\n' % (sp_id, ing_sp_buffer_limit[sp_id]['red'])

        # set the CPU port per-priority-group minimum buffer
        self.buffer_desc.cpu_pg_min_cells = cpu_pg_min_cells

        # calculate the per-priority group buffer allocations for the minimum and headroom buffers
        for traffic_type, priority_group in self.priority_group.set_dict.iteritems() :
            if priority_group.configured == False :
                continue
            pg_id = priority_group.id
            buffer_config = priority_group.set_dict['ingress_buffer']
            pg_min_percent  = buffer_config.min_percent
            if pg_id not in pg_buffer_limit :
                pg_buffer_limit[pg_id] = {}
            # minimum buffer calculation
            buffer_limit = int((pg_min_percent/100) * total_mem_cells)
            if buffer_limit == 0:
                # we always allocate at least one cell to avoid a blocked priority group (hardware issue)
                buffer_limit = wan_pg_min_cells * self.port_desc.get_weighted_port_count()
            error = self.buffer_desc.allocate_cells('ingress', 'pg_min', buffer_limit)
            if not error :
                pg_buffer_limit[pg_id]['pg_min'] = buffer_limit
            else :
                pg_buffer_limit[pg_id]['pg_min'] = wan_pg_min_cells
            self.buffer_comment +=  '# pg %d min limit %d\n' % (pg_id, pg_buffer_limit[pg_id]['pg_min'])

            # account for the CPU min cells
            error = self.buffer_desc.allocate_cells('ingress', 'pg_min', cpu_pg_min_cells)

            pg_buffer_limit[pg_id]['pg_hdrm'] = {}
            if priority_group.lossless_flag == True :
                # pg headroom buffer calculation
                for port_group_name in priority_group.port_group_name_list :
                    port_group = priority_group.set_dict[port_group_name]
                    for sdk_port_label in port_group.sdk_port_list :
                        error = self.buffer_desc.allocate_cells('ingress', 'pg_hdrm', port_group.pg_hdrm)
                        if not error :
                            pg_buffer_limit[pg_id]['pg_hdrm'][sdk_port_label] = port_group.pg_hdrm
                        else :
                            pg_buffer_limit[pg_id]['pg_hdrm'][sdk_port_label] = 0
                        self.buffer_comment += '# %s pg %d hdrm buffer %d\n' % (sdk_port_label,
                                                                                pg_id,
                                                                                pg_buffer_limit[pg_id]['pg_hdrm'][sdk_port_label])

        # global headroom allocation
        buffer_limit = (self.hardware.max_frame_cells
                        * self.port_desc.get_port_count())
        error = self.buffer_desc.allocate_cells('ingress', 'global_headroom', buffer_limit)
        if not error :
            self.buffer_desc.global_headroom = buffer_limit
        else :
            self.buffer_desc.global_headroom = 0
        self.buffer_comment += '# global headroom buffer: %d\n' % (self.buffer_desc.global_headroom)

        # calculate the total size of the shared buffer
        buffer_limit = self.buffer_desc.get_remaining_cells('ingress')
        error = self.buffer_desc.allocate_cells('ingress', 'shared', buffer_limit)
        if not error :
            self.buffer_desc.shared_buffer_limit = buffer_limit
        else :
            self.buffer_desc.shared_buffer_limit = 0
        self.buffer_comment += '# shared buffer limit: %d\n' % self.buffer_desc.shared_buffer_limit

        # calculate per-priority group limits on the shared buffer
        shared_buffer_limit = self.buffer_desc.shared_buffer_limit
        for traffic_type in self.priority_group.set_dict :
            priority_group     = self.priority_group.set_dict[traffic_type]
            if priority_group.configured == False :
                continue
            pg_id             = priority_group.id
            buffer_config     = priority_group.set_dict['ingress_buffer']
            pg_shared_percent = buffer_config.shared_percent
            buffer_limit      = int(shared_buffer_limit * (pg_shared_percent) / 100)
            if buffer_limit > shared_buffer_limit :
                buffer_limit = shared_buffer_limit
                error_msg = '%s traffic: ingress shared buffer limit' % traffic_type,
                error_msg += ' exceeds total shared buffer, reducing to %d\n' % shared_buffer_limit
                self.report_error(error_msg)
            if pg_id not in pg_buffer_limit :
                pg_buffer_limit[pg_id] = {}
            pg_buffer_limit[pg_id]['shared'] = buffer_limit
            self.buffer_comment += '# pg %d shared limit: %d\n' % (pg_id, pg_buffer_limit[pg_id]['shared'])
        self.buffer_comment += '\n'

    # ------------------------------------------------------------------
    #
    #          c a l c u l a t e __ e g r e s s __ b u f f e r s
    #
    # ------------------------------------------------------------------
    def __calculate_egress_buffers (self) :
        self.buffer_comment +=  '# egress: calculating buffer size\n'

        eg_sp_buffer_limit       = self.buffer_desc.eg_sp_buffer_limit
        queue_buffer_limit       = self.buffer_desc.queue_buffer_limit
        queue_buffer_unlimited   = self.buffer_desc.queue_buffer_unlimited
        queue_buffer_color_aware = self.buffer_desc.queue_buffer_color_aware
        color_aware_flag         = self.traffic.color_aware

        total_mem_cells   = self.hardware.total_buffer_cells
        num_service_pools = self.hardware.num_service_pools
        self.buffer_desc.init_available_cell_count('egress', total_mem_cells)
        self.buffer_comment += '# total mem cells: %d\n' % self.buffer_desc.get_remaining_cells('egress')

        for traffic_type in self.priority_group.set_dict :
            priority_group = self.priority_group.set_dict[traffic_type]
            if priority_group.configured == False:
                continue
            for q_type in priority_group.queue :
                if q_type not in queue_buffer_limit :
                    queue_buffer_limit[q_type] = {}
                if q_type not in queue_buffer_unlimited :
                    queue_buffer_unlimited[q_type] = {}
                num_queues = len(priority_group.queue[q_type])
                for q_id in priority_group.queue[q_type].keys() :
                    queue_buffer_limit[q_type][q_id] = {}
                    if priority_group.egress_buffer.set_dict[q_type].unlimited == True :
                        queue_buffer_unlimited[q_type][q_id] = 1
                    else :
                        queue_buffer_unlimited[q_type][q_id] = 0

                if priority_group.egress_buffer.set_dict[q_type].unlimited == True :
                    continue

                # allocate minimum buffer cells
                buffer_type = 'minimum'
                min_percent = priority_group.egress_buffer.set_dict[q_type].min_percent
                min_buffer_limit = int((min_percent / 100) * total_mem_cells)
                error = self.buffer_desc.allocate_cells('egress', buffer_type, min_buffer_limit)
                if error :
                    min_buffer_limit = 0
                # divide up the minimum cells amoung the assigned egress queues
                min_buffer_limit = int(min_buffer_limit / num_queues)
                for q_id in priority_group.queue[q_type].keys() :
                    queue_buffer_limit[q_type][q_id] = {}
                    queue_buffer_limit[q_type][q_id][buffer_type] = min_buffer_limit
                    self.buffer_comment += '# %s queue %d %s limit: %d\n' % (q_type,
                                                                             q_id,
                                                                             buffer_type,
                                                                             queue_buffer_limit[q_type][q_id][buffer_type])
        remaining_buffer_cells = self.buffer_desc.get_remaining_cells('egress')
        self.buffer_comment += '# total minimum buffer cells: %d\n' % (total_mem_cells - remaining_buffer_cells)

        # set service pool sizes: these can be oversubscribed and are not allocated from the total cells
        min_sp_limit = 3 * self.hardware.max_frame_cells  # hardware requirement, per register spec
        for sp_id in range(num_service_pools) :
            eg_sp_buffer_limit[sp_id] = {}
            eg_sp_buffer_limit[sp_id]['green']  = min_sp_limit
            if color_aware_flag == True :
                eg_sp_buffer_limit[sp_id]['yellow'] = min_sp_limit
                eg_sp_buffer_limit[sp_id]['red']    = min_sp_limit
            else :
                eg_sp_buffer_limit[sp_id]['yellow'] = None
                eg_sp_buffer_limit[sp_id]['red']    = None
            green_label = ''
            if color_aware_flag == True :
                green_label = 'green'
            if sp_id in self.egress_service_pool.pool_dict and \
                   self.egress_service_pool.pool_dict[sp_id].configured  == True :
                sp_percent = self.egress_service_pool.pool_dict[sp_id].percent
                green_sp_limit = int(total_mem_cells * (sp_percent / 100))
                eg_sp_buffer_limit[sp_id]['green'] = green_sp_limit
                if color_aware_flag == True :
                    # set the color-aware limits
                    yellow_sp_cells = int(eg_sp_buffer_limit[sp_id]['green'] * float(self.traffic.yellow_limit_percent / 100))
                    eg_sp_buffer_limit[sp_id]['yellow'] = yellow_sp_cells
                    red_sp_cells = int(eg_sp_buffer_limit[sp_id]['green'] * float(self.traffic.red_limit_percent / 100))
                    eg_sp_buffer_limit[sp_id]['red'] = red_sp_cells
            self.buffer_comment += '# service pool %d %s limit: %d\n' % (sp_id, green_label, eg_sp_buffer_limit[sp_id]['green'])
            if color_aware_flag == True :
                self.buffer_comment += '# service pool %d yellow limit: %d\n' % (sp_id, eg_sp_buffer_limit[sp_id]['yellow'])
                self.buffer_comment += '# service pool %d red limit: %d\n' % (sp_id, eg_sp_buffer_limit[sp_id]['red'])

        # set service pool buffer limits
        for traffic_type, priority_group in self.priority_group.set_dict.iteritems() :
            if priority_group.configured == False :
                continue
            for q_type in priority_group.queue :
                sp_id             = priority_group.service_pool
                sp_mem_cells      = eg_sp_buffer_limit[sp_id]['green']
                q_buffer_config = priority_group.egress_buffer.set_dict[q_type]
                num_queues = len(priority_group.queue[q_type])
                if q_type not in queue_buffer_unlimited :
                    queue_buffer_unlimited[q_type] = {}
                if q_type not in queue_buffer_color_aware :
                    queue_buffer_color_aware[q_type] = {}
                green_sp_cells  = None
                yellow_sp_cells = None
                red_sp_cells    = None
                buffer_dict = { 'shared' : green_sp_cells }
                if q_buffer_config.unlimited == False :
                    green_sp_cells  = int((q_buffer_config.sp_percent/100) * sp_mem_cells)
                    if color_aware_flag == True :
                        yellow_sp_cells = int(green_sp_cells * float(self.traffic.yellow_limit_percent / 100))
                        red_sp_cells = int(green_sp_cells * float(self.traffic.red_limit_percent / 100))
                        buffer_dict = { 'shared green'  : green_sp_cells,
                                        'shared yellow' : yellow_sp_cells,
                                        'shared red'    : red_sp_cells }
                    else :
                        buffer_dict = { 'shared' : green_sp_cells }

                for q_id in priority_group.queue[q_type].keys() :
                    for name in buffer_dict.keys() :
                        if q_buffer_config.unlimited == True :
                            self.buffer_comment += '# %s queue %d %s limit: unlimited\n' % (q_type, q_id, name)
                        else :
                            if q_id not in queue_buffer_limit[q_type] :
                                queue_buffer_limit[q_type][q_id] = {}
                            queue_buffer_limit[q_type][q_id][name] = buffer_dict[name]
                            self.buffer_comment += '# %s queue %d %s limit: %s\n' % (q_type, q_id, name,
                                                                                     str(buffer_dict[name]))
                        queue_buffer_unlimited[q_type][q_id]   = q_buffer_config.unlimited
                        queue_buffer_color_aware[q_type][q_id] = color_aware_flag

        self.buffer_comment += '\n'

    # ----------------------------------------------------------
    #
    #          c a l c u l a t e __ b u f f e r __ s i z e s
    #
    # ----------------------------------------------------------
    def __calculate_buffer_sizes (self) :
        self.buffer_comment = ""
        self.__calculate_ingress_buffers()
        self.__calculate_egress_buffers()

    # ----------------------------------------------------------
    #
    #            ____ s e t __ m c __ q u e u e s
    #
    # ----------------------------------------------------------
    def __set_mc_queues (self) :

        num_mc_queues = self.hardware.num_mc_queues

        if num_mc_queues == 8 :
            self.cos_queue.match_mc_queues()

    # ----------------------------------------------------------
    #
    #     ____ c r e a t e __ q u e u e __ d i c t
    #
    # ----------------------------------------------------------
    def __create_queue_dict (self) :
        self.queue_dict = {}
        cpu_queue = 'cpu'
        for traffic_type in self.priority_group.set_dict :
            priority_group = self.priority_group.set_dict[traffic_type]
            if priority_group.configured == False :
                continue
            if cpu_queue not in self.queue_dict :
                    self.queue_dict[cpu_queue] = {}
            for q_type in priority_group.queue :
                if q_type not in self.queue_dict :
                    self.queue_dict[q_type] = {}
                weight = priority_group.weight
                for q_id in priority_group.queue[q_type].keys() :
                    self.queue_dict[q_type][q_id] = weight
                    if q_type == 'uc' :
                        # use the same values for the CPU queue
                        self.queue_dict[cpu_queue][q_id] = weight
        # set the CPU exception queue weights
        for q_id in range(self.exception_q_start, self.num_cpu_queues) :
            self.queue_dict[cpu_queue][q_id] = self.exception_q_weight

    def __update_uft (self) :
        cmd = '/usr/lib/cumulus/uft-update -p %s' % self.forwarding_table.profile
        subprocess.call(cmd.split())

    # ----------------------------------------------------------
    #
    #                  c h e c k __ c o n f i g
    #
    # ----------------------------------------------------------
    def check_config (self) :
        for manager in self.manager_list:
            manager.check_config()

    # ----------------------------------------------------------
    #
    #                  p r o c e s s __ c o n f i g
    #
    # ----------------------------------------------------------
    def process_config (self) :
        self.__set_mc_queues()
        for manager in self.manager_list:
            manager.process_config()
        self.__create_queue_dict()
        self.__calculate_buffer_sizes()
        self.__update_uft()

# ==============================================================================
#
#                   I N G R E S S __ R E G I S T E R __ M A N A G E R
#
# ==============================================================================
class IngressRegisterManager(RegisterManager):

    def __init__(self, chip, config_manager) :
        super(IngressRegisterManager,self).__init__(chip, config_manager)

        self.section_dict = {"priority mapping" : [],
                             "scheduling" : [],
                             "priority flow control" : [],
                             "flow control" : [],
                             "buffer management" : [],
                             "ports" : []}

        self.header_str = "Ingress Registers"

        self.egr_pri_cng_map_size   = 4224
        self.egr_pri_map_port_shift = 6

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ c n g __ m a p
    #
    # ------------------------------------------------------------------
    def generate_cng_map (self) :

        section = "priority mapping"

        cng_dict = { 'green'  : 0,
                     'yellow' : 3,
                     'red'    : 1 }

        # XXX verify the conversion from ingress/internal priority to cos ID is correct

        # initialize the packet remark flag
        value_list  = []
        value_pairs = [ValuePair('remark_outer_dot1p', 0)]
        name = 'egr_vlan_control_1'
        self.add_new_register(name, section, 'modreg', value_list, value_pairs)

        # maps untagged packets to internal priority
        value_list = ["0", "64"]
        value_pairs = [ValuePair("pri", 0), ValuePair("cng",0)]
        name = "ing_untagged_phb"
        self.add_new_register(name, section, "write", value_list, value_pairs)

        if self.config_manager.traffic.packet_priority_source == '802.1p' :
            # initialize the mapping between the VLAN tag priority and CFI fields
            # to the internal priority
            pri_cng_idx = 0
            pri_cng_entries = 1024
            value_list = ["%d" % pri_cng_idx, "%d" % pri_cng_entries]
            value_pairs = [ValuePair("pri", 0), ValuePair("cng",0)]
            name = "ing_pri_cng_map"
            self.add_new_register(name, section, "write", value_list, value_pairs)

            # maps the VLAN tag priority and CFI fields to configured internal priorities
            # -- we are initializing only the first profile in the table: the TRUST_DOT1P_PTR field
            #    must be used to index into the table, and it must be set to zero
            pri_cng_entries = 2
            profile_idx = 0
            for packet_priority, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
                cng = cng_dict[self.config_manager.traffic.pkt2color[packet_priority]]
                pri_cng_idx = (profile_idx << 4 | packet_priority << 1)
                value_list = ["%d" % pri_cng_idx, "%d" % pri_cng_entries]
                value_pairs = [ValuePair("pri", cos_id), ValuePair("cng",cng)]
                self.add_new_register(name, section, "modify", value_list, value_pairs)

        elif self.config_manager.traffic.packet_priority_source == 'dscp' :
            # dscp table config
            logical_port_list = self.config_manager.port_desc.port_map['logical'].keys()
            logical_port_list.sort()
            for logical_port in logical_port_list :
                port = logical_port + 1 # converting from the xe<n> label
                value_list = ["%d" % port, 1]
                value_pairs = [ValuePair('trust_dscp_v4', 1),
                               ValuePair('trust_dscp_v6', 1)]
                name = 'port'
                self.add_new_register(name, section, "modify", value_list, value_pairs)

                # for each dscp value
                dscp_max = len(self.config_manager.traffic.pkt2cos)
                for dscp, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
                    # generate the table index, write out the ingress priority and cng
                    cng = cng_dict[self.config_manager.traffic.pkt2color[dscp]]
                    table_idx  = (port * dscp_max) + dscp
                    value_list = ['%d' % table_idx, 1]
                    value_pairs = [ValuePair('dscp', dscp),
                                   ValuePair('pri',  cos_id),
                                   ValuePair('cng',  cng)]
                    name = "dscp_table"
                    self.add_new_register(name, section, "modify", value_list, value_pairs)

        if self.config_manager.traffic.remark_packet_priority == '802.1p' :
            # initialize egr_pri_cng_map
            pri_cng_idx = 0
            pri_cng_entries = self.egr_pri_cng_map_size
            value_list = ["%d" % pri_cng_idx, "%d" % pri_cng_entries]
            value_pairs = [ValuePair("pri", 0), ValuePair("cfi",0)]
            name = "egr_pri_cng_map"
            self.add_new_register(name, section, "write", value_list, value_pairs)

            internal_priority_shift = 2
            port_shift = self.egr_pri_map_port_shift
            logical_port_list = self.port_desc.port_map['logical'].keys()
            logical_port_list.sort()
            for logical_port in logical_port_list :
                port = logical_port + 1 # convert from xe<n> index

                # populate egr_pri_cng_map entries:
                # outgoing 802.1p packet priority will be the CoS value
                for vlan_priority, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
                    table_idx   = (port << port_shift) | (cos_id << internal_priority_shift) # XXX check this: index with cos ID?
                    value_list  = ['%d' % table_idx, 4]
                    value_pairs = [ValuePair('pri', cos_id),
                                   ValuePair('cfi', 0)]
                    name = 'egr_pri_cng_map'
                    self.add_new_register(name, section, 'write', value_list, value_pairs)

            # enable the packet remark for all ports
            value_list  = []
            value_pairs = [ValuePair('remark_outer_dot1p', 1)]
            name = 'egr_vlan_control_1'
            self.add_new_register(name, section, 'modreg', value_list, value_pairs)

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ p o r t __ r e g
    #
    # ------------------------------------------------------------------
    def generate_port_reg (self) :

        section = "ports"

        value_list = ["0","67"]
        value_pairs = [ValuePair("port_pri",           "0"),
                       ValuePair("pri_mapping",        "0"),
                       ValuePair("trust_incoming_vid", "0"),
                       ValuePair("vt_enable",          "0") ]
        name    = "port"
        self.add_new_register(name, section, "modify", value_list, value_pairs)

    # ------------------------------------------------------------------
    #
    #               g e n e r a t e __ p r i o r i t y __ r e g
    #
    # ------------------------------------------------------------------
    def generate_priority_reg (self) :

        section = "priority mapping"

        max_priority_value = self.hardware.num_priorities

        # initialized the priority->priority group mappings
        values = []
        priority_value_pair = []
        pause_priority_value_pair = []
        for cos_id in reversed(xrange(max_priority_value)) :
            if cos_id in self.config_manager.priority_group.cos2group :
                pg_id = self.config_manager.priority_group.cos2group[cos_id]
            else :
                pg_id = 0
            priority_value_pair.append(ValuePair("pri%d_grp" % cos_id, pg_id))
            pause_priority_value_pair.append(ValuePair("pri%d_grp" % cos_id, 7))

        name = "port_pri_grp0"
        self.add_new_register(name, section, "setreg", values, priority_value_pair[8:])

        name = "port_pri_grp1"
        self.add_new_register(name, section, "setreg", values, priority_value_pair[:8])

        # adjust for ports with pause enabled
        for sdk_port_label in self.config_manager.port_desc.sdk_port_label_list :
            if sdk_port_label == 'cpu0' :
                continue
            tx_enable = 0
            rx_enable = 0
            for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
                if sdk_port_label in port_pause_config.sdk_port_list :
                    # enable pause on the port
                    tx_enable = port_pause_config.tx_enable
                    rx_enable = port_pause_config.rx_enable
                    break
            self.add_port_pause_cmd(sdk_port_label, tx_enable, rx_enable)

        for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
            for port_label in port_pause_config.sdk_port_list :
                name = "port_pri_grp0.%s" % port_label
                self.add_new_register(name, section, "setreg", values, pause_priority_value_pair[8:])
                name = "port_pri_grp1.%s" % port_label
                self.add_new_register(name, section, "setreg", values, pause_priority_value_pair[:8])

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ s p __ m a p __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_sp_map_regs (self) :

        section = "buffer management"


        num_priority_groups = self.config_manager.hardware.num_priority_groups
        value_list     = []
        value_pair     = []

        for pg_id in range(num_priority_groups) :
            if pg_id in self.config_manager.priority_group.pg2sp :
                sp_id = self.config_manager.priority_group.pg2sp[pg_id]
            else :
                sp_id = 0
            value_pair.append(ValuePair("pg%d_spid" % pg_id, "%d" % sp_id))

        name    = "port_pg_spid"
        self.add_new_register(name, section, "setreg", value_list, value_pair)

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ s p __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_sp_regs (self) :

        section           = "buffer management"
        num_service_pools = self.config_manager.hardware.num_service_pools

        # generate service pool settings
        color_aware_flag = 0
        for sp_id in range(num_service_pools) :
            name = "buffer_cell_limit_sp[%s]" % sp_id
            sp_limit = 0
            if sp_id in self.config_manager.buffer_desc.ing_sp_buffer_limit :
                sp_limit = self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['green']
            value_list = [sp_limit]
            value_pair = []
            self.add_new_register(name, section, "setreg", value_list, value_pair)

            if self.config_manager.ingress_service_pool.pool_dict[sp_id].configured != True :
                min_limit       = 0
                sp_yellow_limit = 0
                sp_red_limit    = 0
            else :
                min_limit = sp_limit
                if self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['yellow'] != None:
                    color_aware_flag |= 1 << sp_id
                    name = 'cell_spap_yellow_offset_sp[%d]' % sp_id
                    sp_yellow_limit = self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['yellow']
                    sp_yellow_offset = sp_limit - sp_yellow_limit
                    value_list = []
                    value_pair = [ValuePair('offset', sp_yellow_offset)]
                    self.add_new_register(name, section, "setreg", value_list, value_pair)
                    if min_limit > sp_yellow_limit :
                        min_limit = sp_yellow_limit
                if self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['red'] != None:
                    color_aware_flag |= 1 << sp_id
                    sp_red_limit = self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['red']
                    sp_red_offset = sp_limit - sp_red_limit
                    name = 'cell_spap_red_offset_sp[%d]' % sp_id
                    value_list = []
                    value_pair = [ValuePair('offset', sp_red_offset)]
                    self.add_new_register(name, section, "setreg", value_list, value_pair)
                    if min_limit > sp_red_limit :
                        min_limit = sp_red_limit

            sp_reset_offset  = 100
            if sp_reset_offset >= min_limit:
                sp_reset_offset = min_limit
            name = 'cell_reset_limit_offset_sp[%d]' % sp_id
            value_list = [sp_reset_offset]
            value_pair = []
            self.add_new_register(name, section, "setreg", value_list, value_pair)

        # ingress service pool color-aware enable bit map
        name = 'color_aware'
        value_list = []
        value_pair = [ValuePair('enable', color_aware_flag, 'hex')]
        self.add_new_register(name, section, "setreg", value_list, value_pair)

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ p g __ b u f __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_pg_buf_regs (self) :

        section           = "buffer management"
        value_list      = ["0"]
        value_pair_list = []

        pg_buffer_limit = self.config_manager.buffer_desc.pg_buffer_limit

        # generate the per-port min pg enable flag
        name = "port_min_pg_enable.$allports"
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # initialize the port_min_cell setting
        name = "port_min_cell"
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # initialize the pg_min_cell setting
        name = "pg_min_cell"
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # initialize the pg headroom config
        name = "pg_hdrm_limit_cell"
        value_list = []
        value_pair_list = [ValuePair("pg_ge", 0), ValuePair("pg_hdrm_limit", 0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # generate the per-port pg cell settings
        shared_buffer_enable_flag = 0
        for pg_id in pg_buffer_limit :

            # set the CPU port min
            name = "pg_min_cell"
            value_list = ["%d" % self.config_manager.buffer_desc.cpu_pg_min_cells]
            value_pair_list = []
            self.add_new_register(name + '[%s].' % pg_id + 'cpu0',
                                  section,
                                  "setreg",
                                  value_list,
                                  value_pair_list)

            if pg_buffer_limit[pg_id]['pg_min'] > 0 :
                port_bw = self.port_desc.get_port_bw()
                for bw in port_bw :
                    port_range_list = self.port_desc.get_port_bw_range_list(bw)

                    for port_range in port_range_list :
                        start_port      = port_range[0]
                        end_port        = port_range[1]
                        if start_port == end_port :
                            port_label = 'xe%d' % start_port
                        else :
                            port_label = 'xe%d-xe%d' % (start_port, end_port)

                        # pg min limit
                        per_port_cells  = self.port_desc.get_weighted_per_port_cells(pg_buffer_limit[pg_id]['pg_min'], bw)
                        name            = "pg_min_cell"
                        value_list      = ["%d" % per_port_cells]
                        value_pair_list = []
                        self.add_new_register(name + "[%s]." % pg_id + port_label,
                                              section,
                                              "setreg",
                                              value_list,
                                              value_pair_list)

            # pg headroom limit
            for sdk_port_label, buffer_limit in pg_buffer_limit[pg_id]['pg_hdrm'].iteritems() :
                name = 'pg_hdrm_limit_cell'
                value_list      = []
                value_pair_list = [ValuePair("pg_hdrm_limit", buffer_limit)]
                value_pair_list.append(ValuePair("pg_ge", 1)) # XXX set this correctly
                self.add_new_register(name + "(%s)." % pg_id + sdk_port_label,
                                      section,
                                      "modreg",
                                      value_list,
                                      value_pair_list)

            # Shared buffer registers
            if pg_buffer_limit[pg_id]['shared'] > 0 :
                # enable access to the shared service pool buffer
                sp_id = self.config_manager.priority_group.pg2sp[pg_id]
                shared_buffer_enable_flag |= (0x1 << sp_id)
                value_list      = []
                value_pair_list = []
                value_pair_list.append(ValuePair("pg_shared_limit",   pg_buffer_limit[pg_id]['shared']))
                value_pair_list.append(ValuePair("pg_shared_dynamic",   0))
                self.add_new_register("pg_shared_limit_cell(%s)" % pg_id,
                                      section,
                                      "setreg",
                                      value_list,
                                      value_pair_list)

        # adjust for ports with pause enabled
        pg_id           = 7
        name            = "pg_min_cell"
        value_pair_list = []
        for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
            for port_label in port_pause_config.sdk_port_list :
                value_list = ["%d" % port_pause_config.minimum_cell_limit]
                self.add_new_register(name + "[%s]." % pg_id + port_label,
                                      section,
                                      "setreg",
                                      value_list,
                                      value_pair_list)

        name = "pg_hdrm_limit_cell"
        value_list = []
        value_pair_list = []
        for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
            for port_label in port_pause_config.sdk_port_list :
                value_pair_list = [ValuePair("pg_ge", 1), ValuePair("pg_hdrm_limit", 160)]
                self.add_new_register(name + "[%s]." % pg_id + port_label,
                                      section,
                                      "setreg",
                                      value_list,
                                      value_pair_list)

        # generate use_sp_shared flag
        name = "use_sp_shared"
        value_list = ["0x%x" % shared_buffer_enable_flag]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #       g e n e r a t e __ g l o b a l  __ h d r m __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_global_hdrm_regs (self) :

        value_list      = []
        value_pair_list = {}
        name            = "global_hdrm_limit"
        section         = "buffer management"
        value_list.append("%d" % self.config_manager.buffer_desc.global_headroom)
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #       g e n e r a t e __ s h a r e d __ s p __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_shared_sp_regs (self) :

        section            = "buffer management"

        shared_buffer_limit = self.config_manager.buffer_desc.shared_buffer_limit
        # generate shared sp limit
        name = "buffer_cell_limit_sp_shared"
        value_list = ["%d" % shared_buffer_limit]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # generate port_shared registers
        value_list = [0]
        value_pair_list = []
        name = "port_shared_max_pg_enable.$allports"
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        name = "port_max_shared_cell.$allports"
        value_list = [0]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        value_pair_list = []
        for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
            for port_label in port_pause_config.sdk_port_list :
                name = 'port_max_shared_cell.%s' % port_label
                value_list = [port_pause_config.shared_cell_limit]
                self.add_new_register(name, section, "setreg", value_list, value_pair_list)
                name = 'port_shared_max_pg_enable.%s' % port_label
                value_list = [0xff]
                self.add_new_register(name, section, "setreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ b u f f e r __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_buffer_regs (self) :

        self.generate_sp_map_regs()
        self.generate_sp_regs()
        self.generate_shared_sp_regs()
        self.generate_pg_buf_regs()
        self.generate_global_hdrm_regs()

    # --------------------------------------------------------------------
    #
    #          g e n e r a t e __ f l o w c o n t r o l __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_flowcontrol_regs (self) :

        section = "flow control"
        name    = "port_pri_xon_enable.$allports"
        ingress_priority_bitmap = 0
        mc_cos_map = 0
        uc_cos_map = 0
        lossless_flag = False

        for label, priority_group in self.config_manager.priority_group.set_dict.iteritems() :
            if priority_group.configured == False :
                continue
            if priority_group.lossless_flag == False :
                continue
            lossless_flag = True
            for cos_id in priority_group.cos_list :
                ingress_priority_bitmap |= 1 << cos_id
                cos_queue_config = self.config_manager.cos_queue.cos_id_dict[cos_id]
                mc_cos_map |= (1 << cos_queue_config.mc)
                uc_cos_map |= (1 << cos_queue_config.uc)
        value_list      = ["%d" % ingress_priority_bitmap]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # default values for port_llfc_cfg register are OK
        name            = "prio2cos_llfc0"
        value_list      = []
        value_pair_list = [ValuePair("mc_cos0_5_bmp", mc_cos_map), ValuePair("uc_cos0_10_bmp", uc_cos_map)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        if lossless_flag == True :
            pfc_enable = 1
        else :
            pfc_enable = 0
        name       = "xmac_pfc_ctrl.$allports"
        value_list = []
        for port_group_name in priority_group.port_group_name_list :
            port_group = priority_group.set_dict[port_group_name]
            for sdk_port_label in port_group.sdk_port_list :
                tx_enable = 0
                if port_group.tx_enable:
                    tx_enable = 1
                rx_enable = 0
                if port_group.rx_enable:
                    rx_enable = 1
                value_pair_list = [ValuePair("tx_pfc_en", tx_enable),
                                   ValuePair("rx_pfc_en", rx_enable),
                                   ValuePair("pfc_stats_en", pfc_enable)]
                self.add_new_register(name, section, "modreg", value_list, value_pair_list)

        name = "xlport_config"
        value_list = []
        value_pair_list = [ValuePair("xpause_tx_en", 0),
                           ValuePair("xpause_rx_en", 1),
                           ValuePair("xpause_en", 0),
                           ValuePair("llfc_en", 0),
                           ValuePair("pfc_enable", pfc_enable)]
        self.add_new_register(name, section, "modreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #       g e n e r a t e __ m i s c __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_misc_regs (self) :

        # generate clean up registers
        value_list = []
        value_list.append("%d" % self.hardware.max_frame_cells) # XXX check for cells vs bytes
        value_pair_list = []
        name = "port_max_pkt_size"
        section = "ports"
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        value_list = ["0"]
        value_pair_list = []
        name = "color_aware"
        section = "buffer management"
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ o u t p u t __ o b j e c t s
    #
    # ------------------------------------------------------------------
    def generate_output_objects(self) :

        self.generate_cng_map()
        self.generate_port_reg()
        self.generate_misc_regs()
        self.generate_priority_reg()
        self.generate_buffer_regs()
        self.generate_flowcontrol_regs()

# ==============================================================================
#
#                 T 2 __ I N G R E S S __ R E G I S T E R __ M A N A G E R
#
# ==============================================================================
class T2_IngressRegisterManager(IngressRegisterManager):

    def __init__(self, chip, config_manager) :
        super(T2_IngressRegisterManager,self).__init__(chip, config_manager)

        self.mmu_ports_per_pipe     = 53
        self.cpu_mmu_port           = 52
        self.egr_pri_cng_map_size   = 6784
        self.egr_pri_map_port_shift = 6

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ p o r t __ p g __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_port_pg_regs (self) :

        section             = "buffer management"
        num_priority_groups = self.hardware.num_priority_groups
        logical_port_list   = self.port_desc.port_map['logical'].keys()
        pg_buffer_limit     = self.config_manager.buffer_desc.pg_buffer_limit

        # sort the logical port list for per-port configurations
        logical_port_list.sort()

        # generate default port pg settings
        name = ['thdi_port_pg_config_x', 'thdi_port_pg_config_y']
        value_list = ['0', '424']
        value_pair_list = [ValuePair("pg_min_limit",       0),
                           ValuePair("pg_shared_limit",    0),
                           ValuePair("pg_shared_dynamic",  0),
                           ValuePair("pg_reset_offset",    0),
                           ValuePair("pg_reset_floor",     0),
                           ValuePair("pg_hdrm_limit",      0),
                           ValuePair("pg_gbl_hdrm_en",     0)]
        self.add_new_register(name[0], section, "write", value_list, value_pair_list)
        self.add_new_register(name[1], section, "write", value_list, value_pair_list)

        for pg_id in range(num_priority_groups):
            # generate the weighted per-port, per-pg buffer allocation
            # field values for the minimum and pg headroom buffers

            if pg_id not in pg_buffer_limit :
                continue

            for sdk_port_label in self.config_manager.port_desc.sdk_port_label_list :
                if sdk_port_label == 'cpu0' :
                    continue
                logical_port = self.config_manager.port_desc.label_2_logical[sdk_port_label]
                mmu_port = self.port_desc.port_map['logical'][logical_port]['mmu_port']
                if mmu_port < 64 :
                    name_idx = 0
                else :
                    name_idx = 1
                bw = self.port_desc.port_map['logical'][logical_port]['bw']
                comment = 'xe%d pipe %s mmu_port %d pg %d' % (logical_port, name_idx, mmu_port & 0x3f, pg_id)
                value_pair_list = []

                pause_flag = False
                if pg_id == 7 :
                    for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
                        if sdk_port_label in port_pause_config.sdk_port_list :
                            pause_flag = True
                            break

                # set the pg min cell limit
                if pause_flag :
                    value_pair_list.append(ValuePair("pg_min_limit", port_pause_config.minimum_cell_limit))
                elif pg_buffer_limit[pg_id]['pg_min'] > 0 :
                    #print 'pg %d min buffer limit %s' % (pg_id, pg_buffer_limit[pg_id]['pg_min'])
                    per_port_min_cells = self.port_desc.get_weighted_per_port_cells(pg_buffer_limit[pg_id]['pg_min'], bw)
                    value_pair_list.append(ValuePair("pg_min_limit", int(per_port_min_cells)))

                # set the pg headroom cell limit
                if pause_flag :
                    value_pair_list.append(ValuePair("pg_gbl_hdrm_en", 1))
                    value_pair_list.append(ValuePair("pg_hdrm_limit", port_pause_config.pg_hdrm))
                    value_pair_list.append(ValuePair("pg_reset_offset", 0))
                elif sdk_port_label in pg_buffer_limit[pg_id]['pg_hdrm'] :
                    buffer_limit = pg_buffer_limit[pg_id]['pg_hdrm'][sdk_port_label]
                    value_pair_list.append(ValuePair("pg_hdrm_limit", buffer_limit))

                    reset_offset = 10 * bw  # 100 or 400 cells
                    value_pair_list.append(ValuePair("pg_reset_offset", reset_offset))

                # set the shared buffer limit
                if pause_flag :
                    value_pair_list.append(ValuePair("pg_shared_limit", port_pause_config.shared_cell_limit))
                    value_pair_list.append(ValuePair("pg_reset_floor", port_pause_config.shared_reset_floor))
                elif pg_buffer_limit[pg_id]['shared'] > 0 :
                    shared_limit = pg_buffer_limit[pg_id]['shared']
                    value_pair_list.append(ValuePair("pg_shared_limit", shared_limit))
                    reset_floor = shared_limit - 500
                    if reset_floor < 0 :
                        reset_floor = 0
                    value_pair_list.append(ValuePair("pg_reset_floor", reset_floor))

                # set the global headroom enable field
                value_pair_list.append(ValuePair("pg_gbl_hdrm_en", 1))

                if len(value_pair_list) > 0 :
                    idx = ((mmu_port & 0x3f) * num_priority_groups) + pg_id
                    value_list = ['%d' % idx, '1']
                    self.add_new_register(name[name_idx],
                                          section,
                                          "modify",
                                          value_list,
                                          value_pair_list,
                                          comment)

            # generate the CPU port configuration
            comment = 'cpu0 pipe 0 mmu_port %d pg %d' % (self.cpu_mmu_port, pg_id)
            idx = (self.cpu_mmu_port * num_priority_groups) + pg_id
            value_list = ['%d' % idx, '1']
            value_pair_list = [ValuePair("pg_min_limit", self.config_manager.buffer_desc.cpu_pg_min_cells)]
            self.add_new_register(name[0],
                                  section,
                                  "modify",
                                  value_list,
                                  value_pair_list,
                                  comment)

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ s p __ m a p __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_sp_map_regs (self) :

        section = "buffer management"
        num_priority_groups = self.hardware.num_priority_groups
        value_list     = []
        value_pair     = []

        for pg_id in range(num_priority_groups) :
            if pg_id in self.config_manager.priority_group.pg2sp :
                sp_id = self.config_manager.priority_group.pg2sp[pg_id]
            else :
                sp_id = 0
            value_pair.append(ValuePair("pg%d_spid" % pg_id, "%d" % sp_id))

        name    = "thdi_port_pg_spid"
        self.add_new_register(name, section, "setreg", value_list, value_pair)

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ s p __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_sp_regs (self) :

        section           = "buffer management"
        num_service_pools = self.hardware.num_service_pools

        # initialize the port_sp_config tables
        name = ['thdi_port_sp_config_x', 'thdi_port_sp_config_y']
        logical_port_list = self.port_desc.port_map['logical'].keys()
        logical_port_list.sort()
        for sp_id in range(num_service_pools) :
            if self.config_manager.ingress_service_pool.pool_dict[sp_id].configured != True :
                continue
            limit_cell_count = self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['green']
            resume_cell_count = limit_cell_count - 100
            if resume_cell_count < 0 :
                resume_cell_count = 0
            for logical_port in logical_port_list :
                mmu_port = self.port_desc.port_map['logical'][logical_port]['mmu_port']
                if mmu_port < 64 :
                    name_idx = 0
                else :
                    name_idx = 1
                table_idx = ((mmu_port & 0x3f) * num_service_pools) + sp_id
                value_list = [table_idx, 1]
                value_pair_list = [ValuePair('port_sp_min_limit', 0),
                                   ValuePair('port_sp_max_limit', limit_cell_count),
                                   ValuePair('port_sp_resume_limit', resume_cell_count)]
                comment = 'xe%d pipe %s mmu_port %d sp %d' % (logical_port, name_idx, mmu_port & 0x3f, sp_id)
                self.add_new_register(name[name_idx],
                                      section,
                                      'modify',
                                      value_list,
                                      value_pair_list,
                                      comment)

        # generate service pool settings
        color_aware_flag = 0
        for sp_id in range(num_service_pools) :
            if self.config_manager.ingress_service_pool.pool_dict[sp_id].configured != True :
                sp_limit = 0
            else :
                sp_limit = self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['green']
            name = 'thdi_buffer_cell_limit_sp'
            value_pair = []
            value_list = ["%d" % sp_limit]
            self.add_new_register(name + '[%d]' % sp_id, section, "setreg", value_list, value_pair)

            if self.config_manager.ingress_service_pool.pool_dict[sp_id].configured != True :
                min_limit       = 0
                sp_yellow_limit = 0
                sp_red_limit    = 0
            else :
                min_limit       = sp_limit
                sp_yellow_limit = sp_limit
                sp_red_limit    = sp_limit
                if self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['yellow'] != None:
                    color_aware_flag |= 1 << sp_id
                    name = 'thdi_cell_spap_yellow_offset_sp'
                    sp_yellow_limit = self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['yellow']
                    sp_yellow_offset = min_limit - sp_yellow_limit
                    value_list = []
                    value_pair = [ValuePair('offset', sp_yellow_offset)]
                    self.add_new_register(name + '[%d]' % sp_id, section, "setreg", value_list, value_pair)
                if self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['red'] != None:
                    color_aware_flag |= 1 << sp_id
                    sp_red_limit = self.config_manager.buffer_desc.ing_sp_buffer_limit[sp_id]['red']
                    sp_red_offset = min_limit - sp_red_limit
                    name = 'thdi_cell_spap_red_offset_sp'
                    value_list = []
                    value_pair = [ValuePair('offset', sp_red_offset)]
                    self.add_new_register(name + '[%d]' % sp_id, section, "setreg", value_list, value_pair)
            if sp_yellow_limit < min_limit :
                min_limit = sp_yellow_limit
            if sp_red_limit < min_limit :
                min_limit = sp_red_limit
            resume_cell_offset = (sp_limit - min_limit) + 100
            if min_limit < resume_cell_offset :
                resume_cell_offset = 0
            name = 'thdi_cell_reset_limit_offset_sp'
            value_pair = []
            value_list = ["%d" % resume_cell_offset]
            self.add_new_register(name + '[%d]' % sp_id, section, "setreg", value_list, value_pair)

        name = 'thdi_pool_config'
        value_list = []
        value_pair_list = [ValuePair('public_enable', 0xf), ValuePair('color_aware', color_aware_flag, 'hex')]
        self.add_new_register(name , section, "setreg", value_list, value_pair_list)


    # ------------------------------------------------------------------
    #
    #       g e n e r a t e __ s h a r e d __ s p __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_shared_sp_regs (self) :

        section         = "buffer management"
        name            = "thdi_buffer_cell_limit_public_pool"
        value_list      = [self.config_manager.buffer_desc.shared_buffer_limit]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #       g e n e r a t e __ g l o b a l  __ h d r m __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_global_hdrm_regs (self) :

        section         = "buffer management"

        name_list       = ["thdi_global_hdrm_limit_pipex", "thdi_global_hdrm_limit_pipey"]
        value_list      = []
        value_pair_list = []
        value_list.append("%d" % self.config_manager.buffer_desc.global_headroom)

        for name in name_list :
            self.add_new_register(name, section, "setreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ b u f f e r __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_buffer_regs (self) :

        self.generate_global_hdrm_regs()
        self.generate_sp_map_regs()
        self.generate_sp_regs()
        self.generate_port_pg_regs()
        self.generate_shared_sp_regs()

    # --------------------------------------------------------------------
    #
    #          g e n e r a t e __ f l o w c o n t r o l __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_flowcontrol_regs (self) :

        section = "flow control"

        # use profile index 0, so default values for port_llfc_cfg register are OK
        name       = 'prio2cos_profile'
        value_list = []
        pfc_enable = 0
        for label, priority_group in self.config_manager.priority_group.set_dict.iteritems() :
            if priority_group.configured == False :
                continue
            if priority_group.lossless_flag == False :
                continue
            ingress_priority_bitmap = 0
            mc_cos_map = 0
            uc_cos_map = 0
            pfc_enable = 1
            for cos_id in priority_group.cos_list :
                ingress_priority_bitmap |= 1 << cos_id
                cos_queue_config = self.config_manager.cos_queue.cos_id_dict[cos_id]
                mc_cos_map |= (1 << cos_queue_config.mc)
                uc_cos_map |= (1 << cos_queue_config.uc)
                profile_name = name + '[%d]' % cos_id
                value_pair_list = [ValuePair("cos_bmp", uc_cos_map)]
                self.add_new_register(profile_name, section, "setreg", value_list, value_pair_list)

        name       = "xlmac_pfc_ctrl"
        value_list = []
        for port_group_name in priority_group.port_group_name_list :
            port_group = priority_group.set_dict[port_group_name]
            for sdk_port_label in port_group.sdk_port_list :
                tx_enable = 0
                if port_group.tx_enable:
                    tx_enable = 1
                rx_enable = 0
                if port_group.rx_enable:
                    rx_enable = 1
                value_pair_list = [ValuePair("tx_pfc_en", tx_enable),
                                   ValuePair("rx_pfc_en", rx_enable),
                                   ValuePair("pfc_stats_en", pfc_enable)]
                port_reg_name = name + '.' + sdk_port_label
                self.add_new_register(port_reg_name, section, "modreg", value_list, value_pair_list)

        # adjust for ports with pause enabled
        for sdk_port_label in self.config_manager.port_desc.sdk_port_label_list :
            if sdk_port_label == 'cpu0' :
                continue
            tx_enable = 0
            rx_enable = 0
            for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
                if sdk_port_label in port_pause_config.sdk_port_list :
                    # enable pause on the port
                    tx_enable = port_pause_config.tx_enable
                    rx_enable = port_pause_config.rx_enable
                    break
            self.add_port_pause_cmd(sdk_port_label, tx_enable, rx_enable)

    # ------------------------------------------------------------------
    #
    #       g e n e r a t e __ m i s c __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_misc_regs (self) :

        # generate clean up registers
        section = "flow control"

        value_list = []
        value_list.append("%d" % 0x32) # XXX check this
        value_pair_list = []
        name = "thdi_port_max_pkt_size"
        section = "ports"
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #               g e n e r a t e __ p r i o r i t y __ r e g
    #
    # ------------------------------------------------------------------
    def generate_priority_reg (self) :

        section = "priority mapping"

        max_priority_value = self.hardware.num_priorities

        # initialized the priority->priority group mappings
        values = []
        priority_value_pair = []
        pause_priority_value_pair = []
        for cos_id in reversed(xrange(max_priority_value)) :
            if cos_id in self.config_manager.priority_group.cos2group :
                pg_id = self.config_manager.priority_group.cos2group[cos_id]
            else :
                pg_id = 0
            priority_value_pair.append(ValuePair("pri%d_grp" % cos_id, pg_id))
            pause_priority_value_pair.append(ValuePair("pri%d_grp" % cos_id, 7))

        name = "thdi_port_pri_grp0"
        self.add_new_register(name, section, "setreg", values, priority_value_pair[8:])

        name = "thdi_port_pri_grp1"
        self.add_new_register(name, section, "setreg", values, priority_value_pair[:8])

        # adjust for ports with pause enabled
        for pause_name, port_pause_config in self.config_manager.link_pause.set_dict.iteritems() :
            for port_label in port_pause_config.sdk_port_list :
                name = "thdi_port_pri_grp0.%s" % port_label
                self.add_new_register(name, section, "setreg", values, pause_priority_value_pair[8:])
                name = "thdi_port_pri_grp1.%s" % port_label
                self.add_new_register(name, section, "setreg", values, pause_priority_value_pair[:8])

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ c n g __ m a p
    #
    # ------------------------------------------------------------------
    def generate_cng_map (self) :

        section = "priority mapping"

        cng_dict = { 'green'  : 0,
                     'yellow' : 3,
                     'red'    : 1 }

        # XXX a quick hack: will go away when config moves to the SDK
        egr_vlan_mod = 'modreg'
        egr_value_list  = []
        if isinstance(self.chip, cumulus.platform.TridentTwoPlusChip) :
            egr_vlan_mod = 'mod'
            egr_value_list = ['0', '617']
    
        # initialize the packet remark flag
        value_pairs = [ValuePair('remark_outer_dot1p', 0)]
        name = 'egr_vlan_control_1'
        self.add_new_register(name, section, '%s' % egr_vlan_mod, egr_value_list, value_pairs)

        # initialize egr_pri_cng_map
        pri_cng_idx = 0
        value_list = ["%d" % pri_cng_idx, "%d" % self.egr_pri_cng_map_size]
        value_pairs = [ValuePair("pri", 0), ValuePair("cfi",0)]
        name = "egr_pri_cng_map"
        self.add_new_register(name, section, 'mod', value_list, value_pairs)

        # maps untagged packets to internal priority
        value_list = ["0", "64"]
        value_pairs = [ValuePair("pri", 0), ValuePair("cng",0)]
        name = "ing_untagged_phb"
        self.add_new_register(name, section, "write", value_list, value_pairs)

        if self.config_manager.traffic.packet_priority_source == '802.1p' :
            # initialize the mapping between the VLAN tag priority and CFI fields
            # to the internal priority
            pri_cng_idx = 0
            pri_cng_entries = 1024
            value_list = ["%d" % pri_cng_idx, "%d" % pri_cng_entries]
            value_pairs = [ValuePair("pri", 0), ValuePair("cng",0)]
            name = "ing_pri_cng_map"
            self.add_new_register(name, section, "write", value_list, value_pairs)

            # maps the VLAN tag priority and CFI fields to configured internal priorities
            # -- we are initializing only the first profile in the table: the TRUST_DOT1P_PTR field
            #    must be used to index into the table, and it must be set to zero (XXX is it used?)
            # XXX change this to use the configured priority mapping from the datapath config file XXX
            pri_cng_entries = 2
            profile_idx = 0
            for packet_priority, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
                cng = cng_dict[self.config_manager.traffic.pkt2color[packet_priority]]
                pri_cng_idx = (profile_idx << 4 | packet_priority << 1)
                value_list = ["%d" % pri_cng_idx, "%d" % pri_cng_entries]
                value_pairs = [ValuePair("pri", cos_id), ValuePair("cng",cng)]
                self.add_new_register(name, section, "modify", value_list, value_pairs)

        elif self.config_manager.traffic.packet_priority_source == 'dscp' :
            # dscp table config
            dscp_set = 1
            dscp_max = len(self.config_manager.traffic.pkt2cos)
            for dscp, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
                # generate the table index, write out the ingress priority and cng
                cng = cng_dict[self.config_manager.traffic.pkt2color[dscp]]
                table_idx  = (dscp_set * dscp_max) + dscp
                value_list = ['%d' % table_idx, 1]
                value_pairs = [ValuePair('dscp', dscp),
                               ValuePair('pri',  cos_id),
                               ValuePair('cng',  cng)]
                name    = "dscp_table"
                self.add_new_register(name, section, "modify", value_list, value_pairs)

            logical_port_list = self.port_desc.port_map['logical'].keys()
            logical_port_list.sort()
            for logical_port in logical_port_list :
                port = logical_port + 1 # convert from xe<n> label
                value_list = ["%d" % port, 1]
                value_pairs = [ValuePair('trust_dscp_v4', 1),
                               ValuePair('trust_dscp_v6', 1),
                               ValuePair('trust_dscp_ptr', dscp_set)]
                name = 'port'
                self.add_new_register(name, section, "modify", value_list, value_pairs)

                # magic T2 stuff
                port_shift  = 3
                for cos_id in range(self.config_manager.hardware.num_priorities) :
                    table_idx   = (port << port_shift) | cos_id
                    value_list  = ['%d' % table_idx, 1]
                    value_pairs = [ValuePair('hg_tc', cos_id)]
                    name = 'egr_map_mh'
                    self.add_new_register(name, section, 'write', value_list, value_pairs)

        if self.config_manager.traffic.remark_packet_priority == '802.1p' :
            logical_port_list = self.port_desc.port_map['logical'].keys()
            logical_port_list.sort()
            for logical_port in logical_port_list :
                port = logical_port + 1 # convert from xe<n> index
                # populate egr_pri_cng_map entries: outgoing 802.1p
                #   packet priority will be taken from the internal
                #   priority value
                internal_priority_shift = 2
                port_shift = self.egr_pri_map_port_shift
                for vlan_priority, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
                    table_idx   = (port << port_shift) | (cos_id << internal_priority_shift)
                    value_list  = ['%d' % table_idx, 4]
                    value_pairs = [ValuePair('pri', cos_id),
                                   ValuePair('cfi', 0)]
                    name = 'egr_pri_cng_map'
                    self.add_new_register(name, section, 'mod', value_list, value_pairs)
                    #print 'egr map pri %d idx %d' % (internal_priority, table_idx)

            # enable the packet remark
            value_pairs = [ValuePair('remark_outer_dot1p', 1)]
            name = 'egr_vlan_control_1'
            self.add_new_register(name, section, egr_vlan_mod, egr_value_list, value_pairs)

# ==============================================================================
#
#              F O R W A R D I N G __ R E G I S T E R __ M A N A G E R
#
# ==============================================================================
class ForwardingRegisterManager(RegisterManager):

    def __init__(self, chip, config_manager) :
        super(ForwardingRegisterManager,self).__init__(chip, config_manager)
        self.section_dict = {"hashing" : []}

    def generate_output_objects(self) :
        pass

    def print_output_objects(self, file_dict) :
        if self.config_manager.traffic.disable_custom_datapath_config == 1 :
            return
        file = (file_dict['register'])
        for line in self.config_manager.forwarding_section :
            file.write(line)

# ==============================================================================
#
#                    E G R E S S __ R E G I S T E R __ M A N A G E R
#
# ==============================================================================
class EgressRegisterManager(RegisterManager):

    # ------------------------------------------------------------------
    #
    #                          __ i n i t __
    #
    # ------------------------------------------------------------------
    def __init__(self, chip, config_manager) :
        super(EgressRegisterManager,self).__init__(chip, config_manager)

        self.section_dict = {"priority mapping"      : [],
                             "scheduling"            : [],
                             "priority flow control" : [],
                             "flow control"          : [],
                             "buffer management"     : [],
                             "overflow queue"        : [],
                             "ports"                 : []}

        # the error queue numbers need to be consistent with the queue used in hal_bcm.c
        #
        # The cell count is used to set the q_min_cell field for the cpu exception queues: the
        # SDK API supports a 'guarantee' property which is used to initialize the same field, but
        # the same value is used for all queues.
        self.cpu_error_list = [{'name'       : 'l3_mtu_check_fail',
                                'q_id'       : 32,
                                'cell_count' : 100},
                               {'name'       : 'nhop',
                                'q_id'       : 33,
                                'cell_count' : 1},
                               {'name'       : 'sflow',
                                'q_id'       : 34,
                                'cell_count' : 500},
                               ]

        # keep this fixed for now
        self.cos_mode = 0

        self.header_str = "Egress Registers"

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ c o s __ m a p
    #
    # ------------------------------------------------------------------
    def generate_cos_map (self) :
        raise NotImplementedError()

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ c p u __ c o s __ m a p
    #
    # ------------------------------------------------------------------
    def generate_cpu_cos_map (self) :
        raise NotImplementedError()

    # ------------------------------------------------------------------
    #
    #         g e n e r a t e __ s c h e d u l i n g __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_scheduling_regs (self) :
        raise NotImplementedError()

    # ---------------------------------------------------------------------
    #
    #       g e n e r a t e __ s p __ s h a r e d __ l i m i t __ r e g s
    #
    # ---------------------------------------------------------------------
    def generate_sp_shared_limit_regs (self) :
        raise NotImplementedError()

    # --------------------------------------------------------------------
    #
    #   g e n e r a t e __ q u e u e __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_queue_regs (self) :
        raise NotImplementedError()

    # ------------------------------------------------------------------
    #
    #           g e n e r a t e __ o u t p u t __ o b j e c t s
    #
    # ------------------------------------------------------------------
    def generate_output_objects(self) :

        self.generate_flowcontrol_regs()
        self.generate_cos_map()
        self.generate_cpu_cos_map()
        self.generate_queue_regs()
        self.generate_scheduling_regs()
        self.generate_sp_shared_limit_regs()

# ==============================================================================
#
#         T R I D E N T __ E G R E S S __ R E G I S T E R __ M A N A G E R
#
# ==============================================================================
class TridentEgressRegisterManager(EgressRegisterManager):

    # ------------------------------------------------------------------
    #
    #                          __ i n i t __
    #
    # ------------------------------------------------------------------
    def __init__(self, chip, config_manager) :
        super(TridentEgressRegisterManager,self).__init__(chip, config_manager)

        self.thdo_range    = [0,296,279]
        self.thdo_sp_range = [0,40,39]

        self.num_uc_queues_per_port = 10

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ c o s __ m a p
    #
    # ------------------------------------------------------------------
    def generate_cos_map (self) :

        section  = "priority mapping"

        value_list = []
        value_pairs = [ValuePair("queue_mode", 0), ValuePair("cos_mode", self.cos_mode)]
        name = "ing_cos_mode"
        self.add_new_register(name, section, "setreg", value_list, value_pairs)

        # cos_mode_x and cos_mode_y selects a queue for MC packets
        #    based on cos_map.mc_cos1 or phb_cos_map.mc_cos2
        value_list = ["0"]
        value_pairs = []
        name = "cos_mode_x"
        self.add_new_register(name, section, "setreg", value_list, value_pairs)

        name = "cos_mode_y"
        self.add_new_register(name, section, "setreg", value_list, value_pairs)

        # chooses one of the four quadrants of the cos map table
        value_list = ["0", "67", "0"]
        value_pairs = []
        name = "cos_map_sel"
        self.add_new_register(name, section, "write", value_list, value_pairs)

        # initialize the cos map table
        cos_map_idx = 0
        cos_map_entries = 64
        value_list = ["%d" % cos_map_idx, "%d" % cos_map_entries]
        value_pairs = [ValuePair("uc_cos1", 0), ValuePair("hg_cos",0), ValuePair("mc_cos1",0)]
        name = "cos_map"
        self.add_new_register(name, section, "write", value_list, value_pairs)

        # map ingress priorities to cos values
        cos_map_entries = 1
        for packet_priority, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
            cos_queue_config = self.config_manager.cos_queue.cos_id_dict[cos_id]
            uc_cos = cos_queue_config.uc
            mc_cos = cos_queue_config.mc
            value_list = ["%d" % cos_id, "%d" % cos_map_entries]
            value_pairs = [ValuePair("uc_cos1", uc_cos),
                           ValuePair("hg_cos",  uc_cos),
                           ValuePair("mc_cos1", mc_cos)]
            name = "cos_map"
            self.add_new_register(name, section, "write", value_list, value_pairs)

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ c p u __ c o s __ m a p
    #
    # ------------------------------------------------------------------
    def generate_cpu_cos_map (self) :

        # tcam entries: map packets enqueued to the CPU to one of 48 queues
        #  -- int_pri_key: 4-bit internal priority
        #  -- int_pri_mask: mask for 4-bit internal priority
        #  -- cos: one of 48 CoS for the CMIC port
        #  -- valid: tcam entry is valid
        #
        # by default: priority 'n' -> cos queue 'n'

        section = "priority mapping"

        cpu_cos_idx = 127
        cpu_cos_entries = 1
        name = "cpu_cos_map"

        # initialize the table
        value_list  = ["0", "128"]
        value_pairs = [ValuePair("int_pri_key", 0),
                       ValuePair("int_pri_mask", 0),
                       ValuePair("cos",0),
                       ValuePair("valid",0)]
        self.add_new_register(name, section, "modify", value_list, value_pairs)

        # apply our configuration
        for packet_priority, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
            cos_queue_config = self.config_manager.cos_queue.cos_id_dict[cos_id]
            cpu_queue   = cos_queue_config.cpu
            value_list  = ["%d" % cpu_cos_idx, "1"]
            value_pairs = [ValuePair("int_pri_key", cos_id),
                           ValuePair("int_pri_mask",15, 'hex'),
                           ValuePair("cos",cpu_queue),
                           ValuePair("valid",1)]
            cpu_cos_idx -= 1
            self.add_new_register(name, section, "modify", value_list, value_pairs)


    # ------------------------------------------------------------------
    #
    #     C O N F I G __ S 2 __ I N P U T S __ A N D __ W E I G H T S
    #
    # ------------------------------------------------------------------
    def config_s2_inputs_and_weights (self, traffic_type, s2_node_index, start_field_index) :

        # s2 node input and weight encoding: from TD register spec
        s2_map = { 'uc' : { 0 : 4,
                            1 : 5,
                            2 : 6,
                            3 : 7,
                            4 : 8,
                            5 : 9,
                            6 : 10,
                            7 : 11 },
                   'mc' : { 0 : 0,
                            1 : 1,
                            2 : 2,
                            3 : 3 } }

        uc_q_dict = self.config_manager.priority_group.set_dict[traffic_type].queue['uc']
        mc_q_dict = self.config_manager.priority_group.set_dict[traffic_type].queue['mc']
        q_type_list = ['uc', 'mc']
        q_list = []
        if len(uc_q_dict) > 0 :
            q_list.append(uc_q_dict.keys())
        if len(mc_q_dict) > 0 :
            q_list.append(mc_q_dict.keys())
        if len(q_list) == 0 :
            return start_field_index

        section = "scheduling"
        input_name = 's2_s3_routing(%d).$allports' % s2_node_index
        field_name_prefix = 's3_group_no_i'

        value_list      = []
        input_value_pair_list = []

        q_idx      = 0
        q_list_idx = 0
        for field_idx in range(start_field_index,9) :
            field_name = field_name_prefix + '%d' % field_idx
            q_id   = q_list[q_list_idx][q_idx]
            q_weight = self.config_manager.priority_group.set_dict[traffic_type].weight
            q_type = q_type_list[q_list_idx]
            cosweight_idx = s2_map[q_type][q_id]
            input_value_pair_list.append(ValuePair(field_name, s2_map[q_type][q_id]))

            # generate the weight register
            weight_name = 's2_cosweights(%d).$allports' % cosweight_idx
            weight_value_pair_list = [ValuePair('cosweights', q_weight)]
            self.add_new_register(weight_name, section, "modreg", value_list, weight_value_pair_list)

            # move to the next bulk traffic queue
            q_idx += 1
            if q_idx >= len(q_list[q_list_idx]) :
                q_list_idx += 1
                q_idx = 0
                if q_list_idx >= len(q_list) :
                    break

        # generate the input register
        section = "scheduling"
        self.add_new_register(input_name, section, "modreg", value_list, input_value_pair_list)

        return field_idx + 1

    # ------------------------------------------------------------------
    #
    #         g e n e r a t e __ s c h e d u l i n g __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_scheduling_regs (self) :

        section   = "priority mapping"

        name = "es_queue_to_prio"
        value_list = []
        value_pair_list = []
        for idx in reversed(xrange(7)) :
            value_pair_list.append(ValuePair("prio_%d" % idx,idx))
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        section = "scheduling"
        algorithm = self.config_manager.scheduling.algorithm
        scheduling_select = 0
        if algorithm == "dwrr" :
            scheduling_select = 3
        elif algorithm == "wrr" :
            scheduling_select = 2
        elif algorithm == "rr" :
            scheduling_select = 1
        elif algorithm == "sp" :
            scheduling_select = 0
        else :
            self.config_manager.report_error("scheduling algorithm %s not supported" % algorithm)
            return

        # apply the scheduling algorithm to all nodes
        l3_sched_select = scheduling_select << 6 | scheduling_select << 4 | scheduling_select << 2 | scheduling_select
        l2_sched_select = scheduling_select << 4 | scheduling_select << 2 | scheduling_select
        l1_sched_select = scheduling_select

        # --- level 3 scheduling nodes: inputs are mc queues ----
        name = 's3_config.$allports'
        value_list = []
        value_pair_list = []
        value_pair_list.append(ValuePair("route_uc_to_s2", 1, 'hex'))
        value_pair_list.append(ValuePair("scheduling_select", l3_sched_select, 'hex'))
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # s3_minspconfig register: strict priority for minimum bandwidth (not used)
        minspconfig_name = "s3_minspconfig"
        value_list = ["0"]
        value_pair_list = []
        self.add_new_register(minspconfig_name, section, "setreg", value_list, value_pair_list)

        name = 's3_cosweights.$allports'
        value_list = []
        value_pair_list = [ValuePair('cosweights', 16)] # default weight: each MC queue gets an S3 node to itself
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        name = 's3_config_mc.$allports'
        value_list = []
        value_pair_list = [ValuePair('use_mc_group', 0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # ---- level 2 scheduling nodes: inputs are uc queues and level 3 nodes  -----
        name = 's2_config.$allports'
        value_list = []
        value_pair_list = []
        value_pair_list.append(ValuePair("scheduling_select", l2_sched_select, 'hex'))
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # initialize S2 cosweights
        name = 's2_cosweights.$allports'
        value_list = []
        value_pair_list = [ValuePair('cosweights', 0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        queue_dict = self.config_manager.queue_dict
        num_queues = self.num_uc_queues_per_port
        for q_id in range(num_queues) :
            if q_id not in queue_dict['uc'] :
                continue
        # s2_minspconfig register: strict priority for minimum bandwidth (not used)
        minspconfig_name = "s2_minspconfig"
        value_list = ["0"]
        value_pair_list = []
        self.add_new_register(minspconfig_name, section, "setreg", value_list, value_pair_list)

        # initialize the s3->s2 mapping
        name = 's2_s3_routing.$allports'
        value_list = []
        value_pair_list = []
        value_pair_list.append(ValuePair('s3_group_no_i8', 0x1f, 'hex'))
        value_pair_list.append(ValuePair('s3_group_no_i7', 0x1f, 'hex'))
        value_pair_list.append(ValuePair('s3_group_no_i6', 0x1f, 'hex'))
        value_pair_list.append(ValuePair('s3_group_no_i5', 0x1f, 'hex'))
        value_pair_list.append(ValuePair('s3_group_no_i4', 0x1f, 'hex'))
        value_pair_list.append(ValuePair('s3_group_no_i3', 0x1f, 'hex'))
        value_pair_list.append(ValuePair('s3_group_no_i2', 0x1f, 'hex'))
        value_pair_list.append(ValuePair('s3_group_no_i1', 0x1f, 'hex'))
        value_pair_list.append(ValuePair('s3_group_no_i0', 0x1f, 'hex'))
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # set the s3->s2 mapping for s2.0: bulk traffic
        s2_node_index     = 0
        start_field_index = 0
        next_field_index = self.config_s2_inputs_and_weights('bulk',
                                                             s2_node_index,
                                                             start_field_index)

        # add other traffic groups except service and control to s2.0
        for label, priority_group in self.config_manager.priority_group.set_dict.iteritems() :
            if label == 'bulk' or label == 'service' or label == 'control' :
                continue
            if priority_group.configured == True :
                start_field_index = next_field_index
                next_field_index = self.config_s2_inputs_and_weights(label,
                                                                     s2_node_index,
                                                                     start_field_index)

        # set the s3->s2 mapping for s2.1: service traffic
        s2_node_index = 1
        start_field_index = 0
        self.config_s2_inputs_and_weights('service', s2_node_index, start_field_index)

        # set the s3->s2 mapping for s2.2: control traffic
        s2_node_index = 2
        start_field_index = 0
        self.config_s2_inputs_and_weights('control', s2_node_index, start_field_index)

        # ---- level 1 scheduling nodes: inputs are level 2 nodes  -----
        name = 'esconfig.$allports'
        value_list = []
        value_pair_list = []
        value_pair_list.append(ValuePair("scheduling_select", l1_sched_select, 'hex'))
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # default weight: low weight for unused inputs
        name = 'cosweights.$allports'
        weight = 2
        value_list = [weight]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # input weight for bulk and other traffic (using bulk weight)
        # cosweights(1) controls S2.0
        name = 'cosweights(1).$allports'
        weight = self.config_manager.priority_group.set_dict['bulk'].weight
        value_list = [weight]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # input weight for service traffic
        # cosweights(2) controls S2.1
        name = 'cosweights(2).$allports'
        weight = self.config_manager.priority_group.set_dict['service'].weight
        value_list = [weight]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # input weight for control traffic
        # cosweights(3) controls S2.2
        name = 'cosweights(3).$allports'
        weight = self.config_manager.priority_group.set_dict['control'].weight
        value_list = [weight]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # minspconfig register
        minspconfig_name = "minspconfig"
        value_list = ["0"]
        value_pair_list = []
        self.add_new_register(minspconfig_name, section, "setreg", value_list, value_pair_list)

        # cosmask register
        value_list = []
        value_pair_list = [ValuePair("cosmaskrxen",1)]
        self.add_new_register("cosmask", section, "modreg", value_list, value_pair_list)

        name = "es_tdm_config"
        value_list = []
        value_pair_list = [ValuePair("en_cpu_slot_sharing",0)]
        self.add_new_register(name, section, "modreg", value_list, value_pair_list)
        return

    # ---------------------------------------------------------------------
    #
    #       g e n e r a t e __ s p __ s h a r e d __ l i m i t __ r e g s
    #
    # ---------------------------------------------------------------------
    def generate_sp_shared_limit_regs (self) :

        section     = "buffer management"
        limit_name  = "op_buffer_shared_limit_cell"
        yellow_limit_name = 'op_buffer_limit_yellow_cell' # 8-cell granularity
        red_limit_name    = 'op_buffer_limit_red_cell'    # 8-cell granularity
        resume_name = "op_buffer_shared_limit_resume_cell"
        yellow_resume_name = 'op_buffer_limit_resume_yellow_cell' # 8-cell granularity
        red_resume_name = 'op_buffer_limit_resume_red_cell'       # 8-cell granularity

        num_service_pools = self.config_manager.hardware.num_service_pools
        value_pairs = []

        for sp_id in range(num_service_pools) :
            sp_limit = 0
            if sp_id in self.config_manager.buffer_desc.eg_sp_buffer_limit :
                sp_limit = self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['green']
            value_list = [sp_limit]
            self.add_new_register(limit_name + "[%d]" % sp_id, section, "setreg", value_list, value_pairs)
            resume_limit = sp_limit - 100
            if resume_limit < 0 :
                resume_limit = 0
            value_list = [resume_limit]
            self.add_new_register(resume_name + "[%d]" % sp_id, section, "setreg", value_list, value_pairs)

            if self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['yellow'] != None :
                sp_limit = self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['yellow']
                value_list = [(sp_limit >> 3)]
                self.add_new_register(yellow_limit_name + "[%d]" % sp_id, section, "setreg", value_list, value_pairs)
                resume_limit = sp_limit - 100
                if resume_limit < 0 :
                    resume_limit = 0
                value_list = [resume_limit >> 3]
                self.add_new_register(yellow_resume_name + "[%d]" % sp_id, section, "setreg", value_list, value_pairs)

            if self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['red'] != None :
                sp_limit = self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['red']
                value_list = [(sp_limit >> 3)]
                self.add_new_register(red_limit_name + "[%d]" % sp_id, section, "setreg", value_list, value_pairs)
                resume_limit = sp_limit - 100
                if resume_limit < 0 :
                    resume_limit = 0
                value_list = [resume_limit >> 3]
                self.add_new_register(red_resume_name + "[%d]" % sp_id, section, "setreg", value_list, value_pairs)

    # ------------------------------------------------------------------
    #
    #   g e n e r a t e __ i n i t __ m c __ q u e u e __ c o n f i g
    #
    # ------------------------------------------------------------------
    def generate_init_mc_queue_config (self) :

        section = "buffer management"
        name = "op_queue_config_cell"
        value_list = []
        value_pair_list = [ ValuePair("q_min_cell",0),
                            ValuePair("q_shared_limit_cell",0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        name = "op_queue_config1_cell"
        value_pair_list = [ ValuePair("q_spid",0),
                            ValuePair("q_limit_enable_cell",0),
                            ValuePair("q_limit_dynamic_cell",0),
                            ValuePair("q_color_enable_cell",0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # XXX this should not be hard coded?
        name = "op_queue_reset_offset_cell"
        value_list = ["3"]
        value_pair_list = []
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        name = "op_port_config_cell"
        value_list = []
        value_pair_list = [ ValuePair("op_shared_limit_cell",0),
                            ValuePair("op_shared_reset_value_cell",0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        name = "op_port_config1_cell"
        value_pair_list = [ ValuePair("port_limit_enable_cell",0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #   g e n e r a t e __ m c __ a n d __ c p u __ q u e u e __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_mc_and_cpu_queue_regs (self) :

        # the op_queue_config registers configure front panel port multicast queues
        # and the cpu port cpu queues

        config1_name = "op_queue_config1_cell"
        config_name  = "op_queue_config_cell"
        port_suffix  = {"front panel" : "$allports", "cpu": "cpu0"}
        field_name = {"minimum": "q_min_cell", "shared" : "q_shared_limit_cell", "total" : "q_total_limit_cell"}
        section      = "buffer management"
        queue_buffer_limit       = self.config_manager.buffer_desc.queue_buffer_limit
        queue_buffer_unlimited   = self.config_manager.buffer_desc.queue_buffer_unlimited
        queue_buffer_color_aware = self.config_manager.buffer_desc.queue_buffer_color_aware
        value_list = []

        for port_type in port_suffix :
            suffix = port_suffix[port_type]
            q_type = 'mc'
            if port_type == 'cpu' :
                q_type = 'cpu'
            for q_id in queue_buffer_limit[q_type] :

                # check for a CPU error queue
                if q_type == 'cpu' and q_id == self.cpu_error_list[0]['q_id'] :
                    self.config_manager.report_error("Error: bad CPU queue %d: reserved for an error queue" % q_id)
                    return

                # get the service pool ID
                sp_id = self.config_manager.priority_group.q2sp[q_type][q_id]
                value_pair_list = [ ValuePair("q_spid", sp_id)]

                if queue_buffer_unlimited[q_type][q_id] == False :
                    value_pair_list.append(ValuePair("q_limit_enable_cell", 1))
                    if queue_buffer_color_aware[q_type][q_id] == True :
                        msg = 'Color-aware limits are not supported on egress '
                        msg += 'queues on this platform: setting color-blind '
                        msg += 'limits on %s queue %d' % (q_type, q_id)
                        self.config_manager.report_error(msg)
                        # use the green buffer limit as the shared buffer limit
                        queue_buffer_limit[q_type][q_id]['shared'] = queue_buffer_limit[q_type][q_id]['shared green']

                # generate registers the register
                self.add_new_register(config1_name + ("[%d]." % q_id) + suffix,
                                      section,
                                      "modreg",
                                      value_list,
                                      value_pair_list)

                if  queue_buffer_unlimited[q_type][q_id] == False :
                    if q_type in queue_buffer_limit and \
                           q_id in queue_buffer_limit[q_type] :
                        value_pair_list = []
                        for field_type in field_name:
                            if field_type in queue_buffer_limit[q_type][q_id] :
                                field = field_name[field_type]
                                value_pair_list.append(ValuePair(field, queue_buffer_limit[q_type][q_id][field_type]))
                        if len(value_pair_list) > 0 :
                            self.add_new_register(config_name + "[%d]." % q_id + suffix,
                                                  section,
                                                  "modreg",
                                                  value_list,
                                                  value_pair_list)

        # assign some default buffer space to our CPU error queue
        for cpu_error in self.cpu_error_list :
            suffix = port_suffix['cpu']
            value_pair_list = [(ValuePair('q_min_cell', cpu_error['cell_count']))]
            q_id = cpu_error['q_id']
            self.add_new_register(config_name + ("[%d]." % q_id) + suffix,
                                  section,
                                  "modreg",
                                  value_list,
                                  value_pair_list)
            value_pair_list = [(ValuePair("q_limit_enable_cell", 1))]
            self.add_new_register(config1_name + ("[%d]." % q_id) + suffix,
                                  section,
                                  "modreg",
                                  value_list,
                                  value_pair_list)

    # --------------------------------------------------------------------
    #
    #   g e n e r a t e __ f r o n t __ u c __ q u e u e __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_front_uc_queue_regs (self) :

        section       = "buffer management"
        thdo_range    = self.thdo_range
        thdo_sp_range = self.thdo_sp_range
        field_name = {"minimum": "q_min_cell", "shared" : "q_shared_limit_cell"}
        queue_buffer_limit       = self.config_manager.buffer_desc.queue_buffer_limit
        queue_buffer_unlimited   = self.config_manager.buffer_desc.queue_buffer_unlimited
        queue_buffer_color_aware = self.config_manager.buffer_desc.queue_buffer_color_aware
        q_type = 'uc'

        thdo_reg_desc = [{"name": "thdo_config_",    "index": [0,1], "range": thdo_range},
                         {"name": "thdo_config_sp_", "index": [0,1], "range": thdo_sp_range}]

        for desc in thdo_reg_desc :
            for idx in desc["index"] :
                name = desc["name"] + "%d" % idx
                value_list = ["%d" % desc["range"][0], "%d" % desc["range"][1]]
                value_pair_list = [ ValuePair("q_min_cell",0),
                                    ValuePair("q_shared_limit_cell",0),
                                    ValuePair("q_limit_enable_cell",0),
                                    ValuePair("q_color_enable_cell",0),
                                    ValuePair("q_limit_dynamic_cell",0)]
                self.add_new_register(name, section, "write", value_list, value_pair_list)

            thdo_qdrprst_reg_desc = [{"name": "thdo_qdrprst_", "index": [0,1], "range": thdo_range},
                                     {"name": "thdo_qdrprst_sp_", "index": [0,1], "range": thdo_sp_range}]

        for desc in thdo_qdrprst_reg_desc :
            for idx in desc["index"] :
                name = desc["name"] + "%d" % idx
                value_list = ["%d" % desc["range"][0], "%d" % desc["range"][1], 0]
                value_pair_list = []
                self.add_new_register(name, section, "write", value_list, value_pair_list)

        name = "op_uc_port_config_cell"
        value_list = []
        value_pair_list = [ValuePair("op_shared_limit_cell",0),
                           ValuePair("op_shared_reset_value_cell",0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # initialize the queue to service pool mapping
        name = "op_uc_port_config1_cell"
        value_list = []
        value_pair_list = [ValuePair("port_limit_enable_cell",0),
                           ValuePair("q_e2e_ds_en",0)]
        for q_id in range(10) :
            value_pair_list.append(ValuePair("cos%d_spid" % q_id,0))
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # write the configured service pool mapping
        name = "op_uc_port_config1_cell"
        value_list = []
        value_pair_list = [ValuePair("port_limit_enable_cell",0),
                           ValuePair("q_e2e_ds_en",0)]
        for q_id, sp_id in self.config_manager.priority_group.q2sp[q_type].iteritems() :
            value_pair_list.append(ValuePair("cos%d_spid" % q_id,sp_id))
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        # generate the registers for queues with buffer limits
        for q_id  in queue_buffer_limit[q_type] :
            if queue_buffer_unlimited[q_type][q_id] == False :
                if queue_buffer_color_aware[q_type][q_id] == True :
                    msg = 'Color-aware limits are not supported on egress '
                    msg += 'queues on this platform: setting color-blind '
                    msg += 'limits on %s queue %d' % (q_type, q_id)
                    self.config_manager.report_error(msg)
                    # use the green buffer limit as the shared buffer limit
                    queue_buffer_limit[q_type][q_id]['shared'] = queue_buffer_limit[q_type][q_id]['shared green']

                # generate the registers for queues with minumum or shared buffer limits
                port_type = "front panel"
                total_cell_count = 0
                value_list = ["1"]
                value_pair_list = [ValuePair("q_limit_enable_cell",1)]
                for buffer_type in ['minimum', 'shared'] :
                    if buffer_type in queue_buffer_limit[q_type][q_id] :
                        field = field_name[buffer_type]
                        cell_count = queue_buffer_limit[q_type][q_id][buffer_type]
                        total_cell_count += cell_count
                        value_pair_list.append(ValuePair(field, cell_count))

                for desc in thdo_reg_desc :
                    for idx in desc["index"] :
                        name = desc["name"] + "%d" % idx
                        loop_list = [q_id, desc["range"][2], 10]
                        self.add_new_register_template(name,
                                                       section,
                                                       "modify",
                                                       loop_list,
                                                       value_list,
                                                       value_pair_list)

                qdrp_reset_max = 8191
                qdrp_reset = total_cell_count - self.hardware.max_frame_cells
                if qdrp_reset > qdrp_reset_max:
                    qdrp_reset = qdrp_reset_max
                if qdrp_reset < 0 :
                    qdrp_reset = 0
                value_pair_list = [ValuePair("qdrp_reset", qdrp_reset)]
                for desc in thdo_qdrprst_reg_desc :
                    for idx in desc["index"] :
                        name = desc["name"] + "%d" % idx
                        loop_list = [q_id, desc["range"][2], 10]
                        self.add_new_register_template(name,
                                                       section,
                                                       "modify",
                                                       loop_list,
                                                       value_list,
                                                       value_pair_list)

    # --------------------------------------------------------------------
    #
    #       g e n e r a t e __ o v e r f l o w __ q u e u e __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_overflow_queue_regs (self) :

        section    = "overflow queue"
        name       = "op_voq_port_config"
        value_list = []
        value_pair_list = [ ValuePair("q_sel_p37",0),
                            ValuePair("q_sel_p36",0),
                            ValuePair("q_sel_p35",0),
                            ValuePair("q_sel_p34",0),
                            ValuePair("q_sel_p4",0),
                            ValuePair("q_sel_p3",0),
                            ValuePair("q_sel_p2",0),
                            ValuePair("q_sel_p1",0)]
        self.add_new_register(name, section, "setreg", value_list, value_pair_list)

        name            = "ovq_flowcontrol_threshold"
        value_list      = []
        value_pair_list = [ ValuePair("ovq_fc_enable",0)]
        self.add_new_register(name, section, "modreg", value_list, value_pair_list)

    # --------------------------------------------------------------------
    #
    #       g e n e r a t e __ f l o w c o n t r o l __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_flowcontrol_regs (self) :
        pass

    # --------------------------------------------------------------------
    #
    #   g e n e r a t e __ q u e u e __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_queue_regs (self) :
        self.generate_init_mc_queue_config()
        self.generate_mc_and_cpu_queue_regs()
        self.generate_front_uc_queue_regs()
        self.generate_overflow_queue_regs()

# ==============================================================================
#
#                 T 2 __ E G R E S S __ R E G I S T E R __ M A N A G E R
#
# ==============================================================================
class T2_EgressRegisterManager(EgressRegisterManager):

    def __init__(self, chip, config_manager) :
        super(T2_EgressRegisterManager,self).__init__(chip, config_manager)

        self.num_queues_per_pipe    = 2048
        self.num_uc_queues_per_pipe = 1480
        self.num_mc_queues_per_pipe =  520
        self.num_uc_queues_per_port =   10
        self.cpu_mmu_port           =   52

        self.local_to_port_q_flag   = 0x1
        self.global_q_flag          = 0x2

        self.l1_idx                 = [0,0]
        self.l0_idx                 = [0,0]
        # XXX egr_pri_cng_map ?

        self.yellow_profile_values = [-1] * 8
        self.red_profile_values    = [-1] * 8

    # ------------------------------------------------------------------
    #
    #                  g e t __ b a s e __ c p u __ q u e u e
    #
    # ------------------------------------------------------------------
    def get_base_cpu_queue (self, flags=0) :

        base_queue = 0
        if flags & self.global_q_flag :
            base_queue = 2000
        return base_queue

    # ------------------------------------------------------------------
    #
    #                  g e t __ b a s e __ u c __ q u e u e
    #
    # ------------------------------------------------------------------
    def get_base_uc_queue (self, logical_port, flags=0) :
        base_queue = self.port_desc.port_map['logical'][logical_port]['uc_queue']
        if base_queue < 0 :
            return -1

        if base_queue >= 2048 and (flags & self.global_q_flag) == 0:
            # caller is looking for per-pipe queue number
            base_queue -= 2048
        return base_queue

    # ------------------------------------------------------------------
    #
    #                  g e t __ b a s e __ m c __ q u e u e
    #
    # ------------------------------------------------------------------
    def get_base_mc_queue (self, mmu_port, flags=0) :

        if mmu_port < 0 :
            return -1
        port = mmu_port & 0x3f
        base_queue = port * 10
        if flags ^ self.local_to_port_q_flag :
            base_queue += 1480
        if mmu_port >= 64 and flags & self.global_q_flag:
            base_queue += 2048
        return base_queue

    # ---------------------------------------------------------------------
    #
    #       g e n e r a t e __ s p __ s h a r e d __ l i m i t __ r e g s
    #
    # ---------------------------------------------------------------------
    def generate_sp_shared_limit_regs (self) :

        section = "buffer management"
        num_service_pools   = self.hardware.num_service_pools
        logical_port_list   = self.port_desc.port_map['logical'].keys()
        logical_port_list.sort()
        max_resume_limit = 16384 # register max

        # configure the size of each egress service pool buffer and resume limit
        mc_type_list = ['db', 'mcqe']
        for mc_type in mc_type_list :
            limit_granularity_shift  = 0
            resume_granularity_shift = 3
            if mc_type == 'mcqe' :
                limit_granularity_shift = 2
            limit_prefix  = { 'green'  : 'mmu_thdm_%s_pool_shared_limit' % mc_type,
                              'yellow' : 'mmu_thdm_%s_pool_yellow_shared_limit' % mc_type,
                              'red'    : 'mmu_thdm_%s_pool_red_shared_limit' % mc_type }
            resume_prefix = { 'green'  : 'mmu_thdm_%s_pool_resume_limit' % mc_type,
                              'yellow' : 'mmu_thdm_%s_pool_yellow_resume_limit' % mc_type,
                              'red'    : 'mmu_thdm_%s_pool_red_resume_limit' % mc_type }
            for sp_id in xrange(num_service_pools) :
                limit_name = limit_prefix['green']+'[%d]' % sp_id
                shared_limit = int(self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['green'] >> limit_granularity_shift)
                value_list = []
                value_pair_list = [ValuePair('shared_limit', shared_limit)]
                self.add_new_register(limit_name, section, "setreg", value_list, value_pair_list)

                resume_name = resume_prefix['green'] +'[%d]' % sp_id
                resume_limit = (shared_limit - 100) >> resume_granularity_shift
                if resume_limit < 0 :
                    resume_limit = 0
                value_list = []
                value_pair_list = [ValuePair('resume_limit', resume_limit)]
                self.add_new_register(resume_name, section, "setreg", value_list, value_pair_list)

                color_aware = False
                if self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['yellow'] != None :
                    color_aware = True
                    limit_name = limit_prefix['yellow']+'[%d]' % sp_id
                    shared_limit = int(self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['yellow'])
                    value_list = []
                    value_pair_list = [ValuePair('yellow_shared_limit', shared_limit >> 3)]  # 8 cell granularity
                    self.add_new_register(limit_name, section, "setreg", value_list, value_pair_list)

                    resume_name = resume_prefix['yellow'] +'[%d]' % sp_id
                    resume_limit = (shared_limit - 100) >> resume_granularity_shift
                    if resume_limit < 0 :
                        resume_limit = 0
                        value_list = []
                        value_pair_list = [ValuePair('yellow_resume_limit', resume_limit)]
                        self.add_new_register(resume_name, section, "setreg", value_list, value_pair_list)

                if self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['red'] != None :
                    color_aware = True
                    limit_name = limit_prefix['red']+'[%d]' % sp_id
                    shared_limit = int(self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['red'])
                    value_list = []
                    value_pair_list = [ValuePair('red_shared_limit', shared_limit >> 3)]  # 8 cell granularity
                    self.add_new_register(limit_name, section, "setreg", value_list, value_pair_list)

                    resume_name = resume_prefix['red'] +'[%d]' % sp_id
                    resume_limit = (shared_limit - 100) >> resume_granularity_shift
                    if resume_limit < 0 :
                        resume_limit = 0
                    value_list = []
                    value_pair_list = [ValuePair('red_resume_limit', resume_limit)]
                    self.add_new_register(resume_name, section, "setreg", value_list, value_pair_list)

                if color_aware:
                    name = 'mmu_thdm_%s_device_thr_config' % mc_type
                    value_list = []
                    value_pair_list = [ValuePair('pool_color_limit_enable_%d' % sp_id, 1)]
                    self.add_new_register(name, section, "modreg", value_list, value_pair_list)

        logical_port_list    = self.port_desc.port_map['logical'].keys()
        logical_port_list.sort()

        resume_granularity_shift = 3
        uc_limit_name  = ['mmu_thdu_xpipe_config_port', 'mmu_thdu_ypipe_config_port']
        uc_resume_name = ['mmu_thdu_xpipe_resume_port', 'mmu_thdu_ypipe_resume_port']
        for sp_id in range(num_service_pools) :
            limit_cell_count = int(self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['green'] * 0.90)
            resume_cell_count = (limit_cell_count - 100) >> resume_granularity_shift  # granuarity: cell count / 8
            if resume_cell_count < 0 :
                resume_cell_count = 0

            for logical_port in logical_port_list :
                mmu_port = self.port_desc.port_map['logical'][logical_port]['mmu_port']
                if mmu_port < 64 :
                    name_idx = 0
                else :
                    name_idx = 1
                table_idx = ((mmu_port & 0x3f) * num_service_pools) + sp_id
                value_list = [table_idx, 1]
                comment    = 'xe%d pipe %s mmu_port %d sp %d' % (logical_port, name_idx, mmu_port & 0x3f, sp_id)
                value_pair = [ValuePair('shared_limit', limit_cell_count)]
                self.add_new_register(uc_limit_name[name_idx], section, "write", value_list, value_pair, comment)

                value_pair = [ValuePair('shared_resume', resume_cell_count)]
                self.add_new_register(uc_resume_name[name_idx], section, "write", value_list, value_pair, comment)


        mc_type_list   = ['db', 'mcqe']
        limit_granularity_shift  = 0
        resume_granularity_shift = 3
        for mc_type in mc_type_list :
            if mc_type == 'mcqe' :
                limit_granularity_shift  = 2
            for sp_id in range(num_service_pools) :
                limit_cell_count = int(self.config_manager.buffer_desc.eg_sp_buffer_limit[sp_id]['green'] * 0.90) >> limit_granularity_shift
                resume_cell_count = (limit_cell_count - 100) >> resume_granularity_shift
                if resume_cell_count < 0 :
                    resume_cell_count = 0

                for logical_port in logical_port_list :
                    mmu_port = self.port_desc.port_map['logical'][logical_port]['mmu_port']
                    if mmu_port < 64 :
                        name_idx = 0
                    else :
                        name_idx = 1
                    mc_name    = 'mmu_thdm_%s_portsp_config_%d' % (mc_type, name_idx)
                    comment    = 'xe%d pipe %s mmu_port %d sp %d' % (logical_port, name_idx, mmu_port & 0x3f, sp_id)
                    table_idx  = ((mmu_port & 0x3f) * num_service_pools) + sp_id
                    value_list = [table_idx, 1]
                    value_pair = [ValuePair('shared_limit', limit_cell_count),
                                  ValuePair('shared_resume_limit', resume_cell_count),
                                  ValuePair('shared_limit_enable', 1)]
                    self.add_new_register(mc_name, section, "write", value_list, value_pair, comment)


    # ------------------------------------------------------------------
    #
    #            g e n e r a t e __ c p u __ c o s __ m a p
    #
    # ------------------------------------------------------------------
    def generate_cpu_cos_map (self) :

        section = "priority mapping"

        cpu_cos_idx = 127
        name = 'cpu_cos_map'
        # initialize the table
        value_list  = ["0", "128"]
        value_pairs = [ValuePair("int_pri_key", 0),
                       ValuePair("int_pri_mask", 0),
                       ValuePair("cos",0),
                       ValuePair("valid",0)]

        # configure our entries
        self.add_new_register(name, section, "modify", value_list, value_pairs)
        for packet_priority, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
            cos_queue_config = self.config_manager.cos_queue.cos_id_dict[cos_id]
            cpu_queue   = cos_queue_config.cpu
            value_list  = ["%d" % cpu_cos_idx, "1"]
            value_pairs = [ValuePair("int_pri_key", cos_id),
                           ValuePair("int_pri_mask",15, 'hex'),
                           ValuePair("cos",cpu_queue),
                           ValuePair("valid",1)]
            cpu_cos_idx -= 1
            self.add_new_register(name, section, "modify", value_list, value_pairs) 

        return

    # ------------------------------------------------------------------
    #
    #             g e t __ q __ c o n f i g __ v a l u e s
    #
    # ------------------------------------------------------------------
    def get_q_config_values (self, q_type, q_id, bw) :

        queue_buffer_limit       = self.config_manager.buffer_desc.queue_buffer_limit
        queue_buffer_unlimited   = self.config_manager.buffer_desc.queue_buffer_unlimited
        queue_buffer_color_aware = self.config_manager.buffer_desc.queue_buffer_color_aware
        values                   = {}

        sp_id = self.config_manager.priority_group.q2sp[q_type][q_id]
        values['spid'] = sp_id
        if queue_buffer_unlimited[q_type][q_id] == True :
            values['limit_enable'] = 0
            values['min_limit']    = 0
            values['shared_limit'] = 0
        else :
            values['limit_enable'] = 1
            buffer_type = 'minimum'
            if buffer_type in queue_buffer_limit[q_type][q_id] :
                q_min_cells = queue_buffer_limit[q_type][q_id][buffer_type]
            else :
                q_min_cells = 0
            values['min_limit']  = self.port_desc.get_weighted_per_port_cells(q_min_cells, bw)

            # allow access to the service pool buffer
            if queue_buffer_color_aware[q_type][q_id] :
                buffer_type = 'shared green'
            else :
                buffer_type = 'shared'
            if buffer_type in queue_buffer_limit[q_type][q_id] :
                q_shared_cells = queue_buffer_limit[q_type][q_id][buffer_type]
            else :
                q_shared_cells = 0
            values['shared_limit']  = q_shared_cells

        values['color_limit_enable'] = 0
        values['yellow_limit']       = 0
        values['red_limit']          = 0
        if queue_buffer_unlimited[q_type][q_id] == False \
               and queue_buffer_color_aware[q_type][q_id] :
            # set the red and yellow limits
            values['color_limit_enable'] = 1
            buffer_type = 'shared yellow'
            if buffer_type in queue_buffer_limit[q_type][q_id] :
                q_shared_cells = queue_buffer_limit[q_type][q_id][buffer_type]
            else :
                q_shared_cells = 0
            values['yellow_limit'] = q_shared_cells

            buffer_type = 'shared red'
            if buffer_type in queue_buffer_limit[q_type][q_id] :
                q_shared_cells = queue_buffer_limit[q_type][q_id][buffer_type]
            else :
                q_shared_cells = 0
            values['red_limit'] = q_shared_cells

        values['dynamic_limit'] = 0

        return values

    # ------------------------------------------------------------------
    #
    #        g e n e r a t e __ c p u __ q u e u e __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_cpu_queue_regs (self,
                                 section) :

        q_type = 'cpu'
        name = ['mmu_thdm_db_queue_config_0', 'mmu_thdm_db_queue_config_1']

        # clear the configurations
        value_list = [self.num_mc_queues_per_pipe, self.config_manager.num_cpu_queues]
        value_pairs = [ValuePair("q_spid",                0),
                       ValuePair("q_min_limit",           0),
                       ValuePair("q_shared_limit",        0),
                       ValuePair("yellow_shared_limit",   0),
                       ValuePair("red_shared_limit",      0),
                       ValuePair("q_limit_dynamic",       0),
                       ValuePair("q_color_limit_dynamic", 0),
                       ValuePair("q_limit_enable",        0),
                       ValuePair("q_color_limit_enable",  0)]

        self.add_new_register(name[0], section, "write", value_list, value_pairs)
        self.add_new_register(name[1], section, "write", value_list, value_pairs)

        # set configurations
        cpu_q_base = self.get_base_cpu_queue() + self.num_mc_queues_per_pipe
        mmu_port = self.cpu_mmu_port
        name_idx = 0
        for q_id in self.config_manager.priority_group.q2sp['cpu'] :  # just a handy list of the CPU traffic queues
            value_list = [cpu_q_base + q_id, 1]
            values = self.get_q_config_values(q_type, q_id, 0)
            value_pairs = [ ValuePair("q_spid",                values['spid']),
                            ValuePair("q_min_limit",           values['min_limit']),
                            ValuePair("q_shared_limit",        values['shared_limit']),
                            ValuePair("yellow_shared_limit",   values['yellow_limit']),
                            ValuePair("red_shared_limit",      values['red_limit']),
                            ValuePair("q_limit_dynamic",       values['dynamic_limit']),
                            ValuePair("q_color_limit_dynamic", values['dynamic_limit']),
                            ValuePair("q_limit_enable",        values['limit_enable']),
                            ValuePair("q_color_limit_enable",  values['color_limit_enable'])]
            comment = 'cpu0 pipe 0 mmu_port %d queue %d' % (mmu_port & 0x3f, q_id)
            self.add_new_register(name[name_idx], section, "write", value_list, value_pairs, comment)

            # set the resume configs
            comment = ''
            self.set_color_resume_profile(section, self.yellow_profile_values, 'db', value_list, 'yellow', values['yellow_limit'], comment)
            self.set_color_resume_profile(section, self.red_profile_values, 'db', value_list, 'red', values['red_limit'], comment)

        # assign some default buffer space to our CPU error queue
        for cpu_error in self.cpu_error_list :
            q_id = cpu_error['q_id']
            value_list = [cpu_q_base + q_id, 1]
            value_pairs = [ ValuePair("q_min_limit", cpu_error['cell_count']),
                            ValuePair("q_limit_enable", 1) ]
            self.add_new_register(name[name_idx], section, "write", value_list, value_pairs)

    # ------------------------------------------------------------------
    #
    #         s e t __ c o l o r __ r e s u m e __ p r o f i l e
    #
    # ------------------------------------------------------------------
    def set_color_resume_profile (self, section, profile_values, mc_type, value_list, color, limit, comment) :

        name = { 'db'   : ['mmu_thdm_db_queue_offset_0', 'mmu_thdm_db_queue_offset_1'],
                 'mcqe' : ['mmu_thdm_mcqe_queue_offset_0', 'mmu_thdm_mcqe_queue_offset_1']}

        if limit == 0 :
            return

        offset_value = 100
        if limit - offset_value < 0 :
            offset_value = 0
        offset_value  = offset_value / 8

        match_idx = -1
        for profile_idx, profile_value in enumerate(profile_values) :
            if profile_value == offset_value :
                match_idx = profile_idx
                break
            if profile_value == -1 :
                match_idx = profile_idx
                profile_values[match_idx] = offset_value
                profile_value_list  = []
                value_pairs = [ValuePair('%s_resume_offset' % color, offset_value)]
                reg_name = 'mmu_thdm_%s_queue_resume_offset_profile_%s[%d]' % (mc_type, color, match_idx)
                self.add_new_register(reg_name, section, "modreg", profile_value_list, value_pairs, '')
                break
        if match_idx < 0 :
            self.report_error('too many %s profile values' % color)
            sys.exit(1)

        value_pairs = [ValuePair('%s_resume_offset_profile_sel' % color,  match_idx)]
        self.add_new_register(name[mc_type][0], section, "modify", value_list, value_pairs)
        self.add_new_register(name[mc_type][1], section, "modify", value_list, value_pairs)

    # ------------------------------------------------------------------
    #
    #        g e n e r a t e __ m c __ q u e u e __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_mc_queue_regs (self,
                                logical_port_list,
                                section,
                                qe_flag) :

        name = {'db'   : ['mmu_thdm_db_queue_config_0',   'mmu_thdm_db_queue_config_1'],
                'mcqe' : ['mmu_thdm_mcqe_queue_config_0', 'mmu_thdm_mcqe_queue_config_1'] }

        q_type = 'mc'
        mc_type = 'db'
        granularity = 1
        if qe_flag == True :
            mc_type = 'mcqe'
            granularity = 4

        # clear the limit configurations
        value_list = [0, self.num_mc_queues_per_pipe + self.config_manager.num_cpu_queues]
        value_pairs = [ValuePair("q_spid",                0),
                       ValuePair("q_min_limit",           0),
                       ValuePair("q_shared_limit",        0),
                       ValuePair("yellow_shared_limit",   0),
                       ValuePair("red_shared_limit",      0),
                       ValuePair("q_limit_dynamic",       0),
                       ValuePair("q_color_limit_dynamic", 0),
                       ValuePair("q_limit_enable",        0),
                       ValuePair("q_color_limit_enable",  0)]
        self.add_new_register(name[mc_type][0], section, "write", value_list, value_pairs)
        self.add_new_register(name[mc_type][1], section, "write", value_list, value_pairs)

        # clear the resume configs
        value_pairs = [ValuePair('resume_offset',                    0),
                       ValuePair('red_resume_offset_profile_sel',    0),
                       ValuePair('yellow_resume_offset_profile_sel', 0)]
        #self.add_new_register(name[mc_type]['resume'][0], section, "write", value_list, value_pairs)
        #self.add_new_register(name[mc_type]['resume'][1], section, "write", value_list, value_pairs)  XXX need the resume names here

        # clear the profile configs
        value_list = []
        value_pairs = [ValuePair('red_resume_offset', 0)]
        # self.add_new_register(name[mc_type]['red_profile'], section, "write", value_list, value_pairs) XXX need the profile names here
        value_pairs = [ValuePair('yellow_resume_offset', 0)]
        # self.add_new_register(name[mc_type]['yellow_profile'], section, "write", value_list, value_pairs)

        # configure the queues
        yellow_profile_values = [-1] * 8
        red_profile_values    = [-1] * 8

        for logical_port in logical_port_list :
            mmu_port = self.port_desc.port_map['logical'][logical_port]['mmu_port']
            if mmu_port < 64 :
                name_idx = 0
            else :
                name_idx = 1
            bw = self.port_desc.port_map['logical'][logical_port]['bw']
            mc_q_base   = self.get_base_mc_queue(mmu_port, self.local_to_port_q_flag)
            for q_id in self.config_manager.priority_group.q2sp['mc'] :  # just a handy list of the MC traffic queues
                value_list  = [mc_q_base + q_id, 1]
                values = self.get_q_config_values(q_type, q_id, bw)

                # set the limit configs
                value_pairs = []

                # set the service pool ID
                value_pairs.append(ValuePair("q_spid", values['spid']))

                if values['limit_enable'] == 1 :
                    value_pairs.append(ValuePair("q_limit_enable", values['limit_enable']))

                if values['color_limit_enable'] == 1 :
                    value_pairs.append(ValuePair("q_color_limit_enable", values['color_limit_enable']))

                if values['min_limit'] > 0 :
                    value_pairs.append(ValuePair("q_min_limit", int(values['min_limit']/granularity)))
                if values['shared_limit'] > 0 :
                    value_pairs.append(ValuePair("q_shared_limit", int(values['shared_limit']/granularity)))

                if values['yellow_limit'] > 0 :
                    value_pairs.append(ValuePair("yellow_shared_limit", int(values['yellow_limit'] >> 3)))

                if values['red_limit'] > 0 :
                    value_pairs.append(ValuePair("red_shared_limit", int(values['red_limit'] >> 3)))

                if values['dynamic_limit'] > 0 :
                    value_pairs.append(ValuePair("q_limit_dynamic",       values['dynamic_limit']))
                    value_pairs.append(ValuePair("q_color_limit_dynamic", values['dynamic_limit']))

                if len(value_pairs) > 0 :
                    comment = 'xe%d pipe %s mmu_port %d queue %d' % (logical_port, name_idx, mmu_port & 0x3f, q_id)
                    self.add_new_register(name[mc_type][name_idx], section, "modify", value_list, value_pairs, comment)

                # set the resume configs
                self.set_color_resume_profile(section, self.yellow_profile_values, mc_type, value_list, 'yellow', values['yellow_limit'], comment)
                self.set_color_resume_profile(section, self.red_profile_values, mc_type, value_list, 'red', values['red_limit'], comment)

    # --------------------------------------------------------------------
    #
    #         g e n e r a t e __ u c __ q u e u e __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_uc_queue_regs (self,
                                logical_port_list,
                                section) :

        name = {'q_config' : ['mmu_thdu_xpipe_config_queue', 'mmu_thdu_ypipe_config_queue'],
                'q_to_grp' : ['mmu_thdu_xpipe_q_to_qgrp_map', 'mmu_thdu_ypipe_q_to_qgrp_map'],
                'q_resume' : ['mmu_thdu_xpipe_resume_queue', 'mmu_thdu_ypipe_resume_queue']}

        q_type = 'uc'

        # clear the queue configurations
        value_list = [0, self.num_uc_queues_per_pipe]
        value_pair_list = [ValuePair("q_min_limit_cell",     0),
                           ValuePair("q_shared_limit_cell",  0),
                           ValuePair("limit_red_cell",       0),
                           ValuePair("limit_yellow_cell",    0),
                           ValuePair("q_limit_dynamic_cell", 0)]
        self.add_new_register(name['q_config'][0], section, "write", value_list, value_pair_list)
        self.add_new_register(name['q_config'][1], section, "write", value_list, value_pair_list)

        value_pair_list = [ValuePair("qyel_reset", 0),
                           ValuePair("qred_reset", 0),
                           ValuePair("qdrp_reset", 0)]
        self.add_new_register(name['q_resume'][0], section, "write", value_list, value_pair_list)
        self.add_new_register(name['q_resume'][1], section, "write", value_list, value_pair_list)

        value_pair_list = [ValuePair("q_spid", 0),
                           ValuePair("q_limit_enable", 0),
                           ValuePair("qgroup_valid", 0),
                           ValuePair("q_color_enable_cell", 0)]
        self.add_new_register(name['q_to_grp'][0], section, "write", value_list, value_pair_list)
        self.add_new_register(name['q_to_grp'][1], section, "write", value_list, value_pair_list)

        # configure the queues
        for logical_port in logical_port_list :
            mmu_port   = self.port_desc.port_map['logical'][logical_port]['mmu_port']
            if mmu_port < 64 :
                name_idx = 0
            else :
                name_idx = 1
            bw = self.port_desc.port_map['logical'][logical_port]['bw']
            uc_q_base = self.get_base_uc_queue(logical_port, self.local_to_port_q_flag)
            for q_id in self.config_manager.priority_group.q2sp[q_type] :  # just a handy list of the UC traffic queues
                value_list = [uc_q_base + q_id, 1]
                values = self.get_q_config_values(q_type, q_id, bw)

                value_pair_list = []
                if values['spid'] != 0 :
                    value_pair_list.append(ValuePair("q_spid", values['spid']))
                if values['limit_enable'] == 1 :
                    value_pair_list.append(ValuePair("q_limit_enable", values['limit_enable']))
                if values['color_limit_enable'] == 1 :
                    value_pair_list.append(ValuePair("q_color_enable_cell", values['color_limit_enable']))
                if len(value_pair_list) > 0 :
                    comment = 'xe%d pipe %s mmu_port %d queue %d' % (logical_port, name_idx, mmu_port & 0x3f, q_id)
                    self.add_new_register(name['q_to_grp'][name_idx], section, "mod", value_list, value_pair_list, comment)
                if values['limit_enable'] == 0 :
                    continue

                # enable and set the buffer limits
                value_pair_list = [ ValuePair("q_min_limit_cell",     values['min_limit']),
                                    ValuePair("limit_red_cell",       values['red_limit'] >> 3),  # field is in units of 8 cells
                                    ValuePair("limit_yellow_cell",    values['yellow_limit'] >> 3),     # field is in units of 8 cells
                                    ValuePair("q_shared_limit_cell",  values['shared_limit']),
                                    ValuePair("q_limit_dynamic_cell", values['dynamic_limit'])]
                comment = 'xe%d pipe %s mmu_port %d queue %d' % (logical_port, name_idx, mmu_port & 0x3f, q_id)
                self.add_new_register(name['q_config'][name_idx], section, "mod", value_list, value_pair_list, comment)

                # reset fields are in units of 8 cells
                q_reset_max = 16383
                qdrp_reset = (values['shared_limit'] - 100) >> 3
                if qdrp_reset > q_reset_max:
                    qdrp_reset = q_reset_max
                if qdrp_reset < 0 :
                    qdrp_reset = 0

                qyel_reset = (values['yellow_limit'] - 100) >> 3
                if qyel_reset > q_reset_max:
                    qyel_reset = q_reset_max
                if qyel_reset < 0 :
                    qyel_reset = 0

                qred_reset = (values['red_limit'] - 100) >> 3
                if qred_reset > q_reset_max:
                    qred_reset = q_reset_max
                if qred_reset < 0 :
                    qred_reset = 0

                value_pair_list = [ValuePair("qdrp_reset", qdrp_reset),
                                   ValuePair("qyel_reset", qyel_reset),
                                   ValuePair("qred_reset", qred_reset),]
                comment = 'xe%d pipe %s mmu_port %d queue %d: value = cells/8' % (logical_port, name_idx, mmu_port & 0x3f, q_id)
                self.add_new_register(name['q_resume'][name_idx], section, "mod", value_list, value_pair_list, comment)

    # --------------------------------------------------------------------
    #
    #   g e n e r a t e __ q u e u e __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_queue_regs (self) :

        section = "buffer management"

        logical_port_list  = self.port_desc.port_map['logical'].keys()
        logical_port_list.sort()

        self.generate_uc_queue_regs(logical_port_list, section)
        self.generate_mc_queue_regs(logical_port_list, section, False)
        self.generate_mc_queue_regs(logical_port_list, section, True)
        self.generate_cpu_queue_regs(section)

    # --------------------------------------------------------------------
    #
    #       g e n e r a t e __ o v e r f l o w __ q u e u e __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_overflow_queue_regs (self) :
        pass

    # --------------------------------------------------------------------
    #
    #       g e n e r a t e __ f l o w c o n t r o l __ r e g s
    #
    # --------------------------------------------------------------------
    def generate_flowcontrol_regs (self) :

        section = 'flow control'

        for label, priority_group in self.config_manager.priority_group.set_dict.iteritems() :
            if priority_group.configured == False :
                continue
            if priority_group.lossless_flag == False :
                continue

            for cos_id in priority_group.cos_list :
                cos_queue_config = self.config_manager.cos_queue.cos_id_dict[cos_id]
                cosq = cos_queue_config.uc
                for port_group_name in priority_group.port_group_name_list :
                    port_group = priority_group.set_dict[port_group_name]
                    for sdk_port_label in port_group.sdk_port_list :
                        logical_port = self.port_desc.label_2_logical[sdk_port_label]
                        mmu_port = self.port_desc.port_map['logical'][logical_port]['mmu_port']
                        spad_offset = mmu_port * 4
                        if mmu_port < 64 :
                            name = 'mmu_intfi_xpipe_fc_map_tbl2'
                        else :
                            name = 'mmu_intfi_ypipe_fc_map_tbl2'

                        # UC queue
                        uc_hw_index = self.get_base_uc_queue(logical_port) + cos_queue_config.uc
                        if self.config_manager.port_desc.is_hsp_port(logical_port) :
                            if ((uc_hw_index % 10) > 7) :
                                print 'bad uc hw index value %d' % uc_hw_index
                                continue
                            port_num = uc_hw_index / 10;
                            increment = 0
                            if uc_hw_index % 10 >= 4 :
                                increment = 1
                            map_entry_index_num = port_num * 2 + increment
                            map_entry_index = map_entry_index_num / 4;
                            eindex = map_entry_index_num & 0x3;
                        else :
                            map_entry_index = uc_hw_index/16;
                            eindex = (uc_hw_index % 16) / 4;

                        value_list = []
                        value_list = ['%d 1' % map_entry_index]
                        value_pair_list = [ValuePair('index%d' % eindex, spad_offset + cosq / 4),
                                           ValuePair('sel%d' % eindex, 1)] # PFC flag
                        self.add_new_register(name, section, "mod", value_list, value_pair_list)

                        # MC queue
                        mc_hw_index = self.get_base_mc_queue(mmu_port) + cos_queue_config.mc
                        if ((mc_hw_index % 10) > 7) :
                            print 'bad mc hw index value %d' % mc_hw_index
                            continue
                        mc_q_offset = mc_hw_index % 1480
                        map_entry_index = mc_hw_index - (mc_q_offset * 2 / 10)
                        map_entry_index /= 16;
                        eindex = (mc_q_offset % 10)/4;
                        if (mc_q_offset /10) % 2 == 0 :
                            eindex += 2
                        value_list      = []
                        value_list = ['%d 1' % map_entry_index]
                        value_pair_list = [ValuePair('index%d' % eindex, spad_offset + cosq / 4),
                                           ValuePair('sel%d' % eindex, 1)] # PFC flag
                        self.add_new_register(name, section, "mod", value_list, value_pair_list)

    # ------------------------------------------------------------------
    #
    #                  g e n e r a t e __ c o s __ m a p
    #
    # ------------------------------------------------------------------
    def generate_cos_map (self) :

        section  = "priority mapping"

        value_list = []
        logical_port_list = self.port_desc.port_map['logical'].keys()
        logical_port_list.sort()
        for logical_port in logical_port_list :
            port_name = '.xe%d' % logical_port
            mmu_port = self.port_desc.port_map['logical'][logical_port]['mmu_port']
            uc_base_q = self.get_base_uc_queue(logical_port, self.global_q_flag)
            value_pairs = [ValuePair("queue_mode", 0), ValuePair("cos_mode",self.cos_mode)]
            value_pairs.append(ValuePair('base_queue_num_0', uc_base_q))
            value_pairs.append(ValuePair('base_queue_num_1', uc_base_q))
            name = 'ing_cos_mode_64' + port_name
            self.add_new_register(name, section, "modreg", value_list, value_pairs)

        # cos_mode_x and cos_mode_y selects a queue for MC packets
        #    based on cos_map.mc_cos1 or phb_cos_map.mc_cos2
        value_list = ["0"]
        value_pairs = []
        name = "cos_mode_x_64"
        self.add_new_register(name, section, "setreg", value_list, value_pairs)

        name = "cos_mode_y_64"
        self.add_new_register(name, section, "setreg", value_list, value_pairs)

        # chooses one of the four quadrants of the cos map table
        value_list = ["0", "67", "0"]
        value_pairs = []
        name = "cos_map_sel"
        self.add_new_register(name, section, "write", value_list, value_pairs)

        # initialize the cos map table
        cos_map_idx = 0
        cos_map_entries = 64
        value_list = ["%d" % cos_map_idx, "%d" % cos_map_entries]
        value_pairs = [ValuePair("uc_cos1", 0), ValuePair("hg_cos",0), ValuePair("mc_cos1",0)]
        name = "cos_map"
        self.add_new_register(name, section, "write", value_list, value_pairs)

        # map ingress priorities to cos values
        cos_map_entries = 1
        for packet_priority, cos_id in self.config_manager.traffic.pkt2cos.iteritems() :
            cos_queue_config = self.config_manager.cos_queue.cos_id_dict[cos_id]
            uc_cos = cos_queue_config.uc # XXX check this
            mc_cos = cos_queue_config.mc # XXX check this
            value_list = ["%d" % cos_id, "%d" % cos_map_entries]
            value_pairs = [ValuePair("uc_cos1", uc_cos),
                           ValuePair("hg_cos",  uc_cos),
                           ValuePair("mc_cos1", mc_cos)]
            name = "cos_map"
            self.add_new_register(name, section, "write", value_list, value_pairs)

    # -----------------------------------------------------------------
    #
    #           g e n e r a t e __ l l s __ l 0 __ n o d e
    #
    # -----------------------------------------------------------------
    def generate_lls_l0_l1_node (self,
                                 pipe_idx,
                                 node_label,
                                 node_idx,
                                 node_weight,
                                 parent_idx,
                                 num_spri_queues,
                                 low_spri_queue,
                                 comment) :

        section     = "scheduling"
        weight_name = 'es_pipe%d_lls_%s_child_weight_cfg' % (pipe_idx, node_label)
        parent_name = 'es_pipe%d_lls_%s_parent' % (pipe_idx, node_label)
        mema_name   = 'es_pipe%d_lls_%s_mema_config' % (pipe_idx, node_label)
        memb_name   = 'es_pipe%d_lls_%s_memb_config' % (pipe_idx, node_label)

        value_list = [node_idx, 1]

        # node weight
        value_pairs = [ValuePair('c_weight', node_weight)]
        self.add_new_register(weight_name, section, 'write', value_list, value_pairs, comment)

        #  address of parent node
        value_pairs = [ValuePair('c_parent', parent_idx)]
        self.add_new_register(parent_name, section, 'write', value_list, value_pairs, comment)

        # L0 or L1 mem configuration
        value_pairs = [ValuePair('p_num_spri', num_spri_queues)]
        if num_spri_queues > 0 :
            # L0 node only
            value_pairs.append(ValuePair('p_start_spri', low_spri_queue))
        self.add_new_register(mema_name, section, 'write', value_list, value_pairs, comment)
        self.add_new_register(memb_name, section, 'write', value_list, value_pairs, comment)

    # -----------------------------------------------------------------
    #
    #           g e n e r a t e __ l l s __ l 2 __ n o d e
    #
    # -----------------------------------------------------------------
    def generate_lls_l2_node (self, pipe_idx, node_idx, node_weight, parent_idx, comment) :

        section        = "scheduling"
        l2_weight_name = 'es_pipe%d_lls_l2_child_weight_cfg' % pipe_idx
        l2_parent_name = 'es_pipe%d_lls_l2_parent' % pipe_idx

        # node weight
        value_list = [node_idx, 1]
        value_pairs = [ValuePair("c_weight", node_weight)]
        self.add_new_register(l2_weight_name, section, 'modify', value_list, value_pairs, comment)

        #  parent address
        value_pairs = [ValuePair('c_parent', parent_idx)]
        self.add_new_register(l2_parent_name, section, 'modify', value_list, value_pairs, comment)

    # ------------------------------------------------------------------
    #
    #      g e n e r a t e __ l l s __ s c h e d u l i n g __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_lls_scheduling_regs (self, logical_port, cpu_flag) :

        # linked list scheduler for ports under 40G, and for ports 40G and over on some chips

        num_queues = self.num_uc_queues_per_port
        if cpu_flag == True :
            num_queues  = self.config_manager.num_cpu_queues
            mmu_port    = self.cpu_mmu_port
            pipe_idx    = 0
            port_label  = 'cpu'
            queue_label = 'cpu'
        else :
            mmu_port   = self.port_desc.port_map['logical'][logical_port]['mmu_port']
            if mmu_port < 64 :
                pipe_idx = 0
            else :
                pipe_idx = 1
            port_label = 'xe%d' % logical_port
            queue_label = 'uc'

        #
        # this code assumes a 1:1:1 mapping between uc, mc, and CPU queues
        #

        # strict priority scheduling
        low_spri_queue  = 0
        num_spri_queues = 0

        # the port scheduling node index
        port_idx = mmu_port & 0x3f

        # weight UC with respect to MC
        # XXX make uc/mc weights configurable
        uc_weight = 16
        mc_weight = 16

        # one L0 node per port: it schedules between CoS values (UC and MC merged by L1 node)
        # the L0 weight doesn't affect scheduling behavior
        l0_idx = self.l0_idx[pipe_idx]
        l0_weight = 16

        queue_dict = self.config_manager.queue_dict
        for q_id in range(num_queues) :
            if q_id not in queue_dict[queue_label] :
                continue
            queue_weight = queue_dict[queue_label][q_id]

            # one L1 node per CoS value: it schedules between UC and MC queues
            l1_idx = self.l1_idx[pipe_idx]

            if queue_weight == 0 :
                if num_spri_queues == 0 :
                    low_spri_queue = l1_idx
                num_spri_queues += 1

            if cpu_flag == False :

                # ---- UC L2 node configuration ----
                base_uc_idx    = self.get_base_uc_queue(logical_port)
                l2_idx      = base_uc_idx + q_id
                l1_idx      = self.l1_idx[pipe_idx]
                comment     = '%s pipe %s UC queue %d' % (port_label, pipe_idx, q_id)
                self.generate_lls_l2_node(pipe_idx, l2_idx, uc_weight, l1_idx, comment)

                # ---- MC L2 and L1 node configuration ----
                base_mc_idx = self.get_base_mc_queue(mmu_port)
                l2_idx      = base_mc_idx + q_id
                comment     = '%s pipe %s MC queue %d' % (port_label, pipe_idx, q_id)
                self.generate_lls_l2_node(pipe_idx, l2_idx, mc_weight, l1_idx, comment)

            else :
                # ---- CPU L2 and L1 node configuration ----
                # cpu queue weights
                base_cpu_idx   = (self.get_base_cpu_queue(self.global_q_flag))
                l2_idx      = base_cpu_idx + q_id
                l1_idx      = self.l1_idx[pipe_idx]
                comment = '%s pipe %s CPU queue %d' % (port_label, 0, q_id)
                self.generate_lls_l2_node(0, l2_idx, uc_weight, l1_idx, comment)

            # ---- L1 scheduler node configuration ----
            comment = '%s pipe %s queue %d' % (port_label, pipe_idx, q_id)
            self.generate_lls_l0_l1_node(pipe_idx, 'l1', l1_idx, queue_weight, l0_idx, 0, 0, comment)
            self.l1_idx[pipe_idx] += 1

        # ---- L0 scheduler node configuration ----
        comment = '%s pipe %s' % (port_label, pipe_idx)
        self.generate_lls_l0_l1_node(pipe_idx, 'l0', l0_idx, l0_weight, port_idx, num_spri_queues, low_spri_queue, comment)
        self.l0_idx[pipe_idx] += 1

        # --- port scheduler node configuration ---
        section        = "scheduling"
        port_mema_name = 'es_pipe%d_lls_port_mema_config' % pipe_idx
        port_memb_name = 'es_pipe%d_lls_port_memb_config' % pipe_idx
        comment        = '%s pipe %s mmu_port %d' % (port_label, pipe_idx, mmu_port)
        value_list     = [port_idx, 1]
        value_pairs    = [ValuePair('p_num_spri', 0)]
        self.add_new_register(port_mema_name, section, 'write', value_list, value_pairs, comment)
        self.add_new_register(port_memb_name, section, 'write', value_list, value_pairs, comment)

    #------------------------------------------------------------------------
    # HSP Level 2 scheduler nodes: the queues 
    # All the sched params are hardcoded
    #------------------------------------------------------------------------
    def generate_hsp_l2_node(self, logical_port):
        section   = "scheduling"
        port_label = 'xe%d' % logical_port

        # 10 UC and 10MC queues per-port
        # 2UC and 2MC queues reserved for QM/SC traffic
        # we use the remaining 8 pairs as COS queues
        l2_cnt = 8

        # enable_sp_in_min(10bits) - if bit is set strict pri q under min bw
        # mask(10bits) -  if set suspend scheduling at the parent/L1 level
        uc_name = 'hsp_sched_l2_uc_queue_config.' + port_label
        mc_name = 'hsp_sched_l2_mc_queue_config.' + port_label
        value_list = []
        value_pairs = [ValuePair("enable_sp_in_min", 0), ValuePair("mask",0)]
        self.add_new_register(uc_name, section, "setreg", value_list, 
                              value_pairs)
        self.add_new_register(mc_name, section, "setreg", value_list, 
                              value_pairs)

        # Weight - Value is in unit of packets (when WRR is used).
        l2_weight = 16
        uc_name = 'hsp_sched_l2_uc_queue_weight'
        mc_name = 'hsp_sched_l2_mc_queue_weight'
        value_list = []
        for id in range(l2_cnt):
            value_pairs = [ValuePair("weight", l2_weight)]
            self.add_new_register(uc_name + '[%d].%s' % (id, port_label),
                                  section, "setreg", value_list, value_pairs)
            value_pairs = [ValuePair("weight", l2_weight)]
            self.add_new_register(mc_name + '[%d].%s' % (id, port_label),
                                  section, "setreg", value_list, value_pairs)

    #------------------------------------------------------------------------
    # HSP Level 1 scheduler nodes: one node per cos
    # weight per-node is user configurable via traffic.conf
    #------------------------------------------------------------------------
    def generate_hsp_l1_node(self, logical_port):
        section   = "scheduling"
        port_label = 'xe%d' % logical_port

        # 10 L1 nodes, L1.8 and L1.9 reserved for QM and SC
        l1_cnt = 8

        # enable_wrr(10bits) - if bit is set WRR else WERR
        # enable_sp_in_min(10bits) - if bit is set strict-pri scheduling
        #                            under min bw
        # mask(10bits) - suspend scheduling at the parent/L0 level
        name = 'hsp_sched_l1_node_config.' + port_label

        #XXX - Currently we hardcode the sched to WRR (only possible modes are
        # WRR and WERR). SP can be set via weight 0.
        wrr_bit_mask = 0xff #we only control sched of the 8 cos nodes
        value_list = []
        value_pairs = [ValuePair("mask", 0), 
                       ValuePair("enable_wrr", wrr_bit_mask), 
                       ValuePair("enable_sp_in_min", 0)]
        self.add_new_register(name, section, "setreg", value_list, 
                              value_pairs)

        #weight is configurable per-priority
        name = 'hsp_sched_l1_node_weight'
        value_list = []
        queue_dict = self.config_manager.queue_dict
        for q_id in range(l1_cnt):
            if q_id not in queue_dict['uc'] :
                continue
            weight = queue_dict['uc'][q_id]
            value_pairs = [ValuePair("weight", weight)]
            self.add_new_register(name + '[%d].%s' % (q_id, port_label),
                                  section, "setreg", value_list, value_pairs)

    #------------------------------------------------------------------------
    # HSP Level 0 scheduler nodes: one node
    # All sched params are hardcoded at this level
    #------------------------------------------------------------------------
    def generate_hsp_l0_node(self, logical_port):
        section   = "scheduling"
        port_label = 'xe%d' % logical_port

        # There are 5 L0 nodes. 
        # L0.0 is reserved for MC. 
        # L0.4 is reserved for SC/QM
        # L0.1 is the only node we use (L0.2, L0.3 are available but unused)
        l0_idx = 1

        # enable_wrr(5bits) - if bit is set WRR else WERR (AK - need to
        #                     investigate WERR usage)
        # enable_sp_in_min(5bits) - if bit is set strict-pri scheduling
        #                           under min bw
        # mask(5bits) - suspend scheduling at the parent/L0 level
        name = "hsp_sched_l0_node_config"
        wrr_bit_mask = 1 << l0_idx
        name = 'hsp_sched_l0_node_config.' + port_label
        value_list = []
        value_pairs = [ValuePair("mask", 0), ValuePair("enable_wrr", 
                             wrr_bit_mask), ValuePair("enable_sp_in_min", 0)]
        self.add_new_register(name, section, "setreg", value_list, 
                              value_pairs)

        #bit map of L1 nodes associated with L0.1 (L1.0 - L1.7)
        name = 'hsp_sched_l0_node_connection_config[%d].%s' % (l0_idx, port_label)
        value_list = []
        value_pairs = [ValuePair("children_connection_map", 0xff)]
        self.add_new_register(name, section, "setreg", value_list, 
                              value_pairs)

        name = "hsp_sched_l0_node_weight"
        lo_weight = 16
        value_pairs = [ValuePair("weight", lo_weight)]
        self.add_new_register(name + '[%d].%s' % (l0_idx, port_label),
                              section, "setreg", value_list, value_pairs)

    #------------------------------------------------------------------------
    # HSP port scheduler node 
    # All sched params are hardcoded at this level
    #------------------------------------------------------------------------
    def generate_hsp_port_node(self, logical_port):
        section   = "scheduling"
        port_label = 'xe%d' % logical_port

        # enable_wrr - sched mode. we currently just hardcode it. afterall
        #              there is only one active L0 node.
        # mc_group_mode - setting this to 1 would result in all the L2/MC
        #                 queues being grouped into one L0 node L0.0
        name = 'hsp_sched_port_config.' + port_label
        value_list = []
        value_pairs = [ValuePair("enable_wrr", 1), 
                       ValuePair("mc_group_mode", 0)]
        self.add_new_register(name, section, "setreg", value_list, 
                              value_pairs)

    # ------------------------------------------------------------------
    #
    #      g e n e r a t e __ h s p __ s c h e d u l i n g __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_hsp_scheduling_regs (self, logical_port) :
        # L2 => L1 => L0 => port

        # L2 nodes/queues
        self.generate_hsp_l2_node(logical_port)

        # L1 nodes (one per-cos)
        self.generate_hsp_l1_node(logical_port)

        # L0 node
        self.generate_hsp_l0_node(logical_port)

        # port level sched config
        self.generate_hsp_port_node(logical_port)

    # ------------------------------------------------------------------
    #
    #         g e n e r a t e __ s c h e d u l i n g __ r e g s
    #
    # ------------------------------------------------------------------
    def generate_scheduling_regs (self) :

        cpu_flag = False
        for port in self.port_desc.port_map['logical'].keys() :
            bw = self.port_desc.port_map['logical'][port]['bw']
            if self.port_desc.is_hsp_port(port):
                self.generate_hsp_scheduling_regs(port)
            else:
                self.generate_lls_scheduling_regs(port, cpu_flag)

        # configure CPU port scheduling
        cpu_flag = True
        self.generate_lls_scheduling_regs(0, cpu_flag)

# ==============================================================================
#
#                      D A T A P A T H __ C R E A T O R
#
# ==============================================================================
class DatapathCreator:

    # ----------------------------------------------------------
    #
    #                    __ i n i t __
    #
    # ----------------------------------------------------------
    def __init__(self, chip, chip_manager, config_manager):
        self.chip             = chip
        self.config_manager   = config_manager
        self.chip_manager     = chip_manager

    # ----------------------------------------------------------
    #
    #                w r i t e __ d a t a p a t h
    #
    # ----------------------------------------------------------
    def write_datapath(self):

        # write out the parameter file preamble
        parameter_file = open(self.sdk_parameter_file, 'w')
        parameter_file.write("\n# ---------------------------------------------------------------------\n")
        parameter_file.write("#\n")
        parameter_file.write("# Automatically generated by /usr/lib/cumulus/datapath-update: \n")
        parameter_file.write("# BCM SDK datapath parameters for %s\n" % self.chip.__class__.__name__)
        parameter_file.write("#\n")
        parameter_file.write("# ---------------------------------------------------------------------\n")
        parameter_file.write("\n")

        if self.config_manager.error_comment != '' :
            parameter_file.write('# -------  Error Messages --------\n')
            parameter_file.write(self.config_manager.error_comment)
            parameter_file.write('\n')

        # write out the register file preamble
        register_file = open(self.bcm_register_file, 'w')
        register_file.write("# ------------------------------------------------------------\n")
        register_file.write("#\n")
        register_file.write("# Automatically generated by /usr/lib/cumulus/datapath-update: \n")
        register_file.write("# datapath register configuration for %s\n" % self.chip.__class__.__name__)
        register_file.write("#\n")
        register_file.write("# ------------------------------------------------------------\n")
        register_file.write("\n")
        register_file.write(self.config_manager.buffer_comment)

        self.chip_manager.print_output_objects({'parameter' : parameter_file, 'register' : register_file })
        parameter_file.close()
        register_file.close()

    # ----------------------------------------------------------
    #
    #          w r i t e __ d a t a p a t h __ c o n f i g
    #
    # ----------------------------------------------------------
    def write_datapath_config (self) :
        f = open(self.output_file, 'w')
        f.write("stubbed output")
        f.close()

    # ----------------------------------------------------------
    #
    #                c r e a t e __ d a t a p a t h
    #
    # ----------------------------------------------------------
    def create_datapath(self,
                        hw_desc,
                        ports_config,
                        bcm_config,
                        linux_port_map_config,
                        traffic_config,
                        datapath_config,
                        forwarding_config,
                        bcm_register_file,
                        sdk_parameter_file) :

        self.config_manager.read_config_file(hw_desc)
        self.config_manager.init_config()  # order matters!  must be called just after hw desc read
        self.config_manager.read_port_config(ports_config,
                                             bcm_config,
                                             linux_port_map_config)
        self.config_manager.read_config_file(traffic_config)
        self.config_manager.read_config_file(datapath_config)
        self.config_manager.read_forwarding_config_file(forwarding_config)

        self.config_manager.check_config()
        if self.config_manager.traffic.disable_custom_datapath_config == True :
            logger.debug('Custom datapath configuration is disabled')

        self.config_manager.process_config()

        self.sdk_parameter_file = sdk_parameter_file
        self.bcm_register_file  = bcm_register_file

        self.chip_manager.generate_output_objects()
        self.write_datapath()
        #self.config_manager.dump_port_map()


# ----------------------------------------------------------
#
#                         m a i n
#
# ----------------------------------------------------------
def main(argv) :

    use_msg  = "datapath-update"
    use_msg += " -c <hardware description file>"
    use_msg += " -p <port config file>"
    use_msg += " -m <port map config file>"
    use_msg += " -t <traffic config file>"
    use_msg += " -d <datapath config file>"
    use_msg += " -f <forwarding config file>"
    use_msg += " -r <output register file>"
    use_msg += " -g <output parameter file>"

    try:
        opts, args = getopt.getopt(argv,
                                   "hc:p:m:l:t:d:f:r:g:",
                                   ["hw=","ports=","bcmconfig=","linuxportmap=","traffic=","datapath=","forwarding=","register=","parameter="])
    except getopt.GetoptError:
        logger.error('input error: s.b. %s' % use_msg)
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print '%s' % use_msg
            sys.exit()
        elif opt in ("-t", "--traffic"):
            traffic_config = arg
        elif opt in ("-p", "--ports"):
            ports_config = arg
        elif opt in ("-m", "--bcmconfig"):
            bcm_config = arg
        elif opt in ("-l", "--linuxportmap"):
            linux_port_map_config = arg
        elif opt in ("-c", "--hw"):
            hw_desc = arg
        elif opt in ("-d", "--datapath"):
            datapath_config = arg
        elif opt in ("-f", "--forwarding"):
            forwarding_config = arg
        elif opt in ("-r", "--register"):
            bcm_register_file = arg
        elif opt in ("-g", "--parameter"):
            sdk_parameter_file = arg

    logger.debug('Generating a register settings file: ')
    logger.debug('ports config is %s' % ports_config)
    logger.debug('hw description is %s' % hw_desc)
    logger.debug('traffic config is %s' % traffic_config)
    logger.debug('datapath config is %s' % datapath_config)
    logger.debug('forwarding config is %s' % forwarding_config)
    logger.debug('BCM register file is %s' % bcm_register_file)
    logger.debug('SDK parameter file is %s' % sdk_parameter_file)
    logger.debug('Generating %s and appending to %s from %s' % (bcm_register_file, sdk_parameter_file, datapath_config))

    # fetch the chip object
    platform_object = cumulus.platforms.probe()
    chip = platform_object.switch.chip
    if not (isinstance(chip, cumulus.platform.TridentChip) or \
            isinstance(chip, cumulus.platform.TridentTwo_56850_Chip) or \
            isinstance(chip, cumulus.platform.TridentTwo_56854_Chip) or \
            isinstance(chip, cumulus.platform.TridentTwoPlus_56860_Chip) or \
            isinstance(chip, cumulus.platform.TridentTwoPlus_56864_Chip) or \
            isinstance(chip, cumulus.platform.Helix4Chip) or \
            isinstance(chip, cumulus.platform.TomahawkChip)) :
            logger.error('Chip %s not supported' % chip.__class__.__name__)
            return

    config_manager = ConfigManager(chip)
    chip_manager   = ChipManagerFactory.get_new_manager(chip, config_manager)
    if chip_manager == None :
        logger.error('Chip %d not supported' % chip.__class__.__name__)
    creator = DatapathCreator(chip, chip_manager, config_manager)
    creator.create_datapath(hw_desc,
                            ports_config,
                            bcm_config,
                            linux_port_map_config,
                            traffic_config,
                            datapath_config,
                            forwarding_config,
                            bcm_register_file,
                            sdk_parameter_file)
    #creator.review_datapath()

# ----------------------------------------------------------
#
#                        e n t r y
#
# ----------------------------------------------------------

if __name__ == "__main__":
    main(sys.argv[1:])

